xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 83c560b3bf46e9b5a65f9a41b60e21898e286c9c)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33 #include "llvm/CodeGen/CodeGenCommonISel.h"
34 #include "llvm/CodeGen/FunctionLoweringInfo.h"
35 #include "llvm/CodeGen/GCMetadata.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsWebAssembly.h"
80 #include "llvm/IR/LLVMContext.h"
81 #include "llvm/IR/Metadata.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/Operator.h"
84 #include "llvm/IR/PatternMatch.h"
85 #include "llvm/IR/Statepoint.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/IR/User.h"
88 #include "llvm/IR/Value.h"
89 #include "llvm/MC/MCContext.h"
90 #include "llvm/Support/AtomicOrdering.h"
91 #include "llvm/Support/Casting.h"
92 #include "llvm/Support/CommandLine.h"
93 #include "llvm/Support/Compiler.h"
94 #include "llvm/Support/Debug.h"
95 #include "llvm/Support/MathExtras.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Target/TargetIntrinsicInfo.h"
98 #include "llvm/Target/TargetMachine.h"
99 #include "llvm/Target/TargetOptions.h"
100 #include "llvm/TargetParser/Triple.h"
101 #include "llvm/Transforms/Utils/Local.h"
102 #include <cstddef>
103 #include <iterator>
104 #include <limits>
105 #include <optional>
106 #include <tuple>
107 
108 using namespace llvm;
109 using namespace PatternMatch;
110 using namespace SwitchCG;
111 
112 #define DEBUG_TYPE "isel"
113 
114 /// LimitFloatPrecision - Generate low-precision inline sequences for
115 /// some float libcalls (6, 8 or 12 bits).
116 static unsigned LimitFloatPrecision;
117 
118 static cl::opt<bool>
119     InsertAssertAlign("insert-assert-align", cl::init(true),
120                       cl::desc("Insert the experimental `assertalign` node."),
121                       cl::ReallyHidden);
122 
123 static cl::opt<unsigned, true>
124     LimitFPPrecision("limit-float-precision",
125                      cl::desc("Generate low-precision inline sequences "
126                               "for some float libcalls"),
127                      cl::location(LimitFloatPrecision), cl::Hidden,
128                      cl::init(0));
129 
130 static cl::opt<unsigned> SwitchPeelThreshold(
131     "switch-peel-threshold", cl::Hidden, cl::init(66),
132     cl::desc("Set the case probability threshold for peeling the case from a "
133              "switch statement. A value greater than 100 will void this "
134              "optimization"));
135 
136 // Limit the width of DAG chains. This is important in general to prevent
137 // DAG-based analysis from blowing up. For example, alias analysis and
138 // load clustering may not complete in reasonable time. It is difficult to
139 // recognize and avoid this situation within each individual analysis, and
140 // future analyses are likely to have the same behavior. Limiting DAG width is
141 // the safe approach and will be especially important with global DAGs.
142 //
143 // MaxParallelChains default is arbitrarily high to avoid affecting
144 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
145 // sequence over this should have been converted to llvm.memcpy by the
146 // frontend. It is easy to induce this behavior with .ll code such as:
147 // %buffer = alloca [4096 x i8]
148 // %data = load [4096 x i8]* %argPtr
149 // store [4096 x i8] %data, [4096 x i8]* %buffer
150 static const unsigned MaxParallelChains = 64;
151 
152 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
153                                       const SDValue *Parts, unsigned NumParts,
154                                       MVT PartVT, EVT ValueVT, const Value *V,
155                                       std::optional<CallingConv::ID> CC);
156 
157 /// getCopyFromParts - Create a value that contains the specified legal parts
158 /// combined into the value they represent.  If the parts combine to a type
159 /// larger than ValueVT then AssertOp can be used to specify whether the extra
160 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
161 /// (ISD::AssertSext).
162 static SDValue
163 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
164                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
165                  std::optional<CallingConv::ID> CC = std::nullopt,
166                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
167   // Let the target assemble the parts if it wants to
168   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
169   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
170                                                    PartVT, ValueVT, CC))
171     return Val;
172 
173   if (ValueVT.isVector())
174     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
175                                   CC);
176 
177   assert(NumParts > 0 && "No parts to assemble!");
178   SDValue Val = Parts[0];
179 
180   if (NumParts > 1) {
181     // Assemble the value from multiple parts.
182     if (ValueVT.isInteger()) {
183       unsigned PartBits = PartVT.getSizeInBits();
184       unsigned ValueBits = ValueVT.getSizeInBits();
185 
186       // Assemble the power of 2 part.
187       unsigned RoundParts = llvm::bit_floor(NumParts);
188       unsigned RoundBits = PartBits * RoundParts;
189       EVT RoundVT = RoundBits == ValueBits ?
190         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
191       SDValue Lo, Hi;
192 
193       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
194 
195       if (RoundParts > 2) {
196         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
197                               PartVT, HalfVT, V);
198         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
199                               RoundParts / 2, PartVT, HalfVT, V);
200       } else {
201         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
202         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
203       }
204 
205       if (DAG.getDataLayout().isBigEndian())
206         std::swap(Lo, Hi);
207 
208       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
209 
210       if (RoundParts < NumParts) {
211         // Assemble the trailing non-power-of-2 part.
212         unsigned OddParts = NumParts - RoundParts;
213         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
214         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
215                               OddVT, V, CC);
216 
217         // Combine the round and odd parts.
218         Lo = Val;
219         if (DAG.getDataLayout().isBigEndian())
220           std::swap(Lo, Hi);
221         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
222         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
223         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
224                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
225                                          TLI.getShiftAmountTy(
226                                              TotalVT, DAG.getDataLayout())));
227         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
228         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
229       }
230     } else if (PartVT.isFloatingPoint()) {
231       // FP split into multiple FP parts (for ppcf128)
232       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
233              "Unexpected split");
234       SDValue Lo, Hi;
235       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
236       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
237       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
238         std::swap(Lo, Hi);
239       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
240     } else {
241       // FP split into integer parts (soft fp)
242       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
243              !PartVT.isVector() && "Unexpected split");
244       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
245       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
246     }
247   }
248 
249   // There is now one part, held in Val.  Correct it to match ValueVT.
250   // PartEVT is the type of the register class that holds the value.
251   // ValueVT is the type of the inline asm operation.
252   EVT PartEVT = Val.getValueType();
253 
254   if (PartEVT == ValueVT)
255     return Val;
256 
257   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
258       ValueVT.bitsLT(PartEVT)) {
259     // For an FP value in an integer part, we need to truncate to the right
260     // width first.
261     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
262     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
263   }
264 
265   // Handle types that have the same size.
266   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
267     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
268 
269   // Handle types with different sizes.
270   if (PartEVT.isInteger() && ValueVT.isInteger()) {
271     if (ValueVT.bitsLT(PartEVT)) {
272       // For a truncate, see if we have any information to
273       // indicate whether the truncated bits will always be
274       // zero or sign-extension.
275       if (AssertOp)
276         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
277                           DAG.getValueType(ValueVT));
278       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
279     }
280     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
281   }
282 
283   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
284     // FP_ROUND's are always exact here.
285     if (ValueVT.bitsLT(Val.getValueType()))
286       return DAG.getNode(
287           ISD::FP_ROUND, DL, ValueVT, Val,
288           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
289 
290     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
291   }
292 
293   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
294   // then truncating.
295   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
296       ValueVT.bitsLT(PartEVT)) {
297     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
298     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
299   }
300 
301   report_fatal_error("Unknown mismatch in getCopyFromParts!");
302 }
303 
304 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
305                                               const Twine &ErrMsg) {
306   const Instruction *I = dyn_cast_or_null<Instruction>(V);
307   if (!V)
308     return Ctx.emitError(ErrMsg);
309 
310   const char *AsmError = ", possible invalid constraint for vector type";
311   if (const CallInst *CI = dyn_cast<CallInst>(I))
312     if (CI->isInlineAsm())
313       return Ctx.emitError(I, ErrMsg + AsmError);
314 
315   return Ctx.emitError(I, ErrMsg);
316 }
317 
318 /// getCopyFromPartsVector - Create a value that contains the specified legal
319 /// parts combined into the value they represent.  If the parts combine to a
320 /// type larger than ValueVT then AssertOp can be used to specify whether the
321 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
322 /// ValueVT (ISD::AssertSext).
323 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
324                                       const SDValue *Parts, unsigned NumParts,
325                                       MVT PartVT, EVT ValueVT, const Value *V,
326                                       std::optional<CallingConv::ID> CallConv) {
327   assert(ValueVT.isVector() && "Not a vector value");
328   assert(NumParts > 0 && "No parts to assemble!");
329   const bool IsABIRegCopy = CallConv.has_value();
330 
331   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
332   SDValue Val = Parts[0];
333 
334   // Handle a multi-element vector.
335   if (NumParts > 1) {
336     EVT IntermediateVT;
337     MVT RegisterVT;
338     unsigned NumIntermediates;
339     unsigned NumRegs;
340 
341     if (IsABIRegCopy) {
342       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
343           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
344           NumIntermediates, RegisterVT);
345     } else {
346       NumRegs =
347           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
348                                      NumIntermediates, RegisterVT);
349     }
350 
351     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
352     NumParts = NumRegs; // Silence a compiler warning.
353     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
354     assert(RegisterVT.getSizeInBits() ==
355            Parts[0].getSimpleValueType().getSizeInBits() &&
356            "Part type sizes don't match!");
357 
358     // Assemble the parts into intermediate operands.
359     SmallVector<SDValue, 8> Ops(NumIntermediates);
360     if (NumIntermediates == NumParts) {
361       // If the register was not expanded, truncate or copy the value,
362       // as appropriate.
363       for (unsigned i = 0; i != NumParts; ++i)
364         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
365                                   PartVT, IntermediateVT, V, CallConv);
366     } else if (NumParts > 0) {
367       // If the intermediate type was expanded, build the intermediate
368       // operands from the parts.
369       assert(NumParts % NumIntermediates == 0 &&
370              "Must expand into a divisible number of parts!");
371       unsigned Factor = NumParts / NumIntermediates;
372       for (unsigned i = 0; i != NumIntermediates; ++i)
373         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
374                                   PartVT, IntermediateVT, V, CallConv);
375     }
376 
377     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
378     // intermediate operands.
379     EVT BuiltVectorTy =
380         IntermediateVT.isVector()
381             ? EVT::getVectorVT(
382                   *DAG.getContext(), IntermediateVT.getScalarType(),
383                   IntermediateVT.getVectorElementCount() * NumParts)
384             : EVT::getVectorVT(*DAG.getContext(),
385                                IntermediateVT.getScalarType(),
386                                NumIntermediates);
387     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
388                                                 : ISD::BUILD_VECTOR,
389                       DL, BuiltVectorTy, Ops);
390   }
391 
392   // There is now one part, held in Val.  Correct it to match ValueVT.
393   EVT PartEVT = Val.getValueType();
394 
395   if (PartEVT == ValueVT)
396     return Val;
397 
398   if (PartEVT.isVector()) {
399     // Vector/Vector bitcast.
400     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
401       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
402 
403     // If the parts vector has more elements than the value vector, then we
404     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
405     // Extract the elements we want.
406     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
407       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
408               ValueVT.getVectorElementCount().getKnownMinValue()) &&
409              (PartEVT.getVectorElementCount().isScalable() ==
410               ValueVT.getVectorElementCount().isScalable()) &&
411              "Cannot narrow, it would be a lossy transformation");
412       PartEVT =
413           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
414                            ValueVT.getVectorElementCount());
415       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
416                         DAG.getVectorIdxConstant(0, DL));
417       if (PartEVT == ValueVT)
418         return Val;
419       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
420         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
421 
422       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
423       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
424         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
425     }
426 
427     // Promoted vector extract
428     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
429   }
430 
431   // Trivial bitcast if the types are the same size and the destination
432   // vector type is legal.
433   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
434       TLI.isTypeLegal(ValueVT))
435     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
436 
437   if (ValueVT.getVectorNumElements() != 1) {
438      // Certain ABIs require that vectors are passed as integers. For vectors
439      // are the same size, this is an obvious bitcast.
440      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
441        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
442      } else if (ValueVT.bitsLT(PartEVT)) {
443        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
444        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
445        // Drop the extra bits.
446        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
447        return DAG.getBitcast(ValueVT, Val);
448      }
449 
450      diagnosePossiblyInvalidConstraint(
451          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
452      return DAG.getUNDEF(ValueVT);
453   }
454 
455   // Handle cases such as i8 -> <1 x i1>
456   EVT ValueSVT = ValueVT.getVectorElementType();
457   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
458     unsigned ValueSize = ValueSVT.getSizeInBits();
459     if (ValueSize == PartEVT.getSizeInBits()) {
460       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
461     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
462       // It's possible a scalar floating point type gets softened to integer and
463       // then promoted to a larger integer. If PartEVT is the larger integer
464       // we need to truncate it and then bitcast to the FP type.
465       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
466       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
467       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
468       Val = DAG.getBitcast(ValueSVT, Val);
469     } else {
470       Val = ValueVT.isFloatingPoint()
471                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
472                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
473     }
474   }
475 
476   return DAG.getBuildVector(ValueVT, DL, Val);
477 }
478 
479 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
480                                  SDValue Val, SDValue *Parts, unsigned NumParts,
481                                  MVT PartVT, const Value *V,
482                                  std::optional<CallingConv::ID> CallConv);
483 
484 /// getCopyToParts - Create a series of nodes that contain the specified value
485 /// split into legal parts.  If the parts contain more bits than Val, then, for
486 /// integers, ExtendKind can be used to specify how to generate the extra bits.
487 static void
488 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
489                unsigned NumParts, MVT PartVT, const Value *V,
490                std::optional<CallingConv::ID> CallConv = std::nullopt,
491                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
492   // Let the target split the parts if it wants to
493   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
494   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
495                                       CallConv))
496     return;
497   EVT ValueVT = Val.getValueType();
498 
499   // Handle the vector case separately.
500   if (ValueVT.isVector())
501     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
502                                 CallConv);
503 
504   unsigned OrigNumParts = NumParts;
505   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
506          "Copying to an illegal type!");
507 
508   if (NumParts == 0)
509     return;
510 
511   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
512   EVT PartEVT = PartVT;
513   if (PartEVT == ValueVT) {
514     assert(NumParts == 1 && "No-op copy with multiple parts!");
515     Parts[0] = Val;
516     return;
517   }
518 
519   unsigned PartBits = PartVT.getSizeInBits();
520   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
521     // If the parts cover more bits than the value has, promote the value.
522     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
523       assert(NumParts == 1 && "Do not know what to promote to!");
524       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
525     } else {
526       if (ValueVT.isFloatingPoint()) {
527         // FP values need to be bitcast, then extended if they are being put
528         // into a larger container.
529         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
530         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
531       }
532       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
533              ValueVT.isInteger() &&
534              "Unknown mismatch!");
535       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
536       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
537       if (PartVT == MVT::x86mmx)
538         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
539     }
540   } else if (PartBits == ValueVT.getSizeInBits()) {
541     // Different types of the same size.
542     assert(NumParts == 1 && PartEVT != ValueVT);
543     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
544   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
545     // If the parts cover less bits than value has, truncate the value.
546     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
547            ValueVT.isInteger() &&
548            "Unknown mismatch!");
549     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
550     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
551     if (PartVT == MVT::x86mmx)
552       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
553   }
554 
555   // The value may have changed - recompute ValueVT.
556   ValueVT = Val.getValueType();
557   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
558          "Failed to tile the value with PartVT!");
559 
560   if (NumParts == 1) {
561     if (PartEVT != ValueVT) {
562       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
563                                         "scalar-to-vector conversion failed");
564       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
565     }
566 
567     Parts[0] = Val;
568     return;
569   }
570 
571   // Expand the value into multiple parts.
572   if (NumParts & (NumParts - 1)) {
573     // The number of parts is not a power of 2.  Split off and copy the tail.
574     assert(PartVT.isInteger() && ValueVT.isInteger() &&
575            "Do not know what to expand to!");
576     unsigned RoundParts = llvm::bit_floor(NumParts);
577     unsigned RoundBits = RoundParts * PartBits;
578     unsigned OddParts = NumParts - RoundParts;
579     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
580       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
581 
582     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
583                    CallConv);
584 
585     if (DAG.getDataLayout().isBigEndian())
586       // The odd parts were reversed by getCopyToParts - unreverse them.
587       std::reverse(Parts + RoundParts, Parts + NumParts);
588 
589     NumParts = RoundParts;
590     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
591     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
592   }
593 
594   // The number of parts is a power of 2.  Repeatedly bisect the value using
595   // EXTRACT_ELEMENT.
596   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
597                          EVT::getIntegerVT(*DAG.getContext(),
598                                            ValueVT.getSizeInBits()),
599                          Val);
600 
601   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
602     for (unsigned i = 0; i < NumParts; i += StepSize) {
603       unsigned ThisBits = StepSize * PartBits / 2;
604       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
605       SDValue &Part0 = Parts[i];
606       SDValue &Part1 = Parts[i+StepSize/2];
607 
608       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
609                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
610       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
611                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
612 
613       if (ThisBits == PartBits && ThisVT != PartVT) {
614         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
615         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
616       }
617     }
618   }
619 
620   if (DAG.getDataLayout().isBigEndian())
621     std::reverse(Parts, Parts + OrigNumParts);
622 }
623 
624 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
625                                      const SDLoc &DL, EVT PartVT) {
626   if (!PartVT.isVector())
627     return SDValue();
628 
629   EVT ValueVT = Val.getValueType();
630   EVT PartEVT = PartVT.getVectorElementType();
631   EVT ValueEVT = ValueVT.getVectorElementType();
632   ElementCount PartNumElts = PartVT.getVectorElementCount();
633   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
634 
635   // We only support widening vectors with equivalent element types and
636   // fixed/scalable properties. If a target needs to widen a fixed-length type
637   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
638   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
639       PartNumElts.isScalable() != ValueNumElts.isScalable())
640     return SDValue();
641 
642   // Have a try for bf16 because some targets share its ABI with fp16.
643   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
644     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
645            "Cannot widen to illegal type");
646     Val = DAG.getNode(ISD::BITCAST, DL,
647                       ValueVT.changeVectorElementType(MVT::f16), Val);
648   } else if (PartEVT != ValueEVT) {
649     return SDValue();
650   }
651 
652   // Widening a scalable vector to another scalable vector is done by inserting
653   // the vector into a larger undef one.
654   if (PartNumElts.isScalable())
655     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
656                        Val, DAG.getVectorIdxConstant(0, DL));
657 
658   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
659   // undef elements.
660   SmallVector<SDValue, 16> Ops;
661   DAG.ExtractVectorElements(Val, Ops);
662   SDValue EltUndef = DAG.getUNDEF(PartEVT);
663   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
664 
665   // FIXME: Use CONCAT for 2x -> 4x.
666   return DAG.getBuildVector(PartVT, DL, Ops);
667 }
668 
669 /// getCopyToPartsVector - Create a series of nodes that contain the specified
670 /// value split into legal parts.
671 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
672                                  SDValue Val, SDValue *Parts, unsigned NumParts,
673                                  MVT PartVT, const Value *V,
674                                  std::optional<CallingConv::ID> CallConv) {
675   EVT ValueVT = Val.getValueType();
676   assert(ValueVT.isVector() && "Not a vector");
677   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
678   const bool IsABIRegCopy = CallConv.has_value();
679 
680   if (NumParts == 1) {
681     EVT PartEVT = PartVT;
682     if (PartEVT == ValueVT) {
683       // Nothing to do.
684     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
685       // Bitconvert vector->vector case.
686       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
687     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
688       Val = Widened;
689     } else if (PartVT.isVector() &&
690                PartEVT.getVectorElementType().bitsGE(
691                    ValueVT.getVectorElementType()) &&
692                PartEVT.getVectorElementCount() ==
693                    ValueVT.getVectorElementCount()) {
694 
695       // Promoted vector extract
696       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
697     } else if (PartEVT.isVector() &&
698                PartEVT.getVectorElementType() !=
699                    ValueVT.getVectorElementType() &&
700                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
701                    TargetLowering::TypeWidenVector) {
702       // Combination of widening and promotion.
703       EVT WidenVT =
704           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
705                            PartVT.getVectorElementCount());
706       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
707       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
708     } else {
709       // Don't extract an integer from a float vector. This can happen if the
710       // FP type gets softened to integer and then promoted. The promotion
711       // prevents it from being picked up by the earlier bitcast case.
712       if (ValueVT.getVectorElementCount().isScalar() &&
713           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
714         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
715                           DAG.getVectorIdxConstant(0, DL));
716       } else {
717         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
718         assert(PartVT.getFixedSizeInBits() > ValueSize &&
719                "lossy conversion of vector to scalar type");
720         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
721         Val = DAG.getBitcast(IntermediateType, Val);
722         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
723       }
724     }
725 
726     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
727     Parts[0] = Val;
728     return;
729   }
730 
731   // Handle a multi-element vector.
732   EVT IntermediateVT;
733   MVT RegisterVT;
734   unsigned NumIntermediates;
735   unsigned NumRegs;
736   if (IsABIRegCopy) {
737     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
738         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
739         RegisterVT);
740   } else {
741     NumRegs =
742         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
743                                    NumIntermediates, RegisterVT);
744   }
745 
746   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
747   NumParts = NumRegs; // Silence a compiler warning.
748   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
749 
750   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
751          "Mixing scalable and fixed vectors when copying in parts");
752 
753   std::optional<ElementCount> DestEltCnt;
754 
755   if (IntermediateVT.isVector())
756     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
757   else
758     DestEltCnt = ElementCount::getFixed(NumIntermediates);
759 
760   EVT BuiltVectorTy = EVT::getVectorVT(
761       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
762 
763   if (ValueVT == BuiltVectorTy) {
764     // Nothing to do.
765   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
766     // Bitconvert vector->vector case.
767     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
768   } else {
769     if (BuiltVectorTy.getVectorElementType().bitsGT(
770             ValueVT.getVectorElementType())) {
771       // Integer promotion.
772       ValueVT = EVT::getVectorVT(*DAG.getContext(),
773                                  BuiltVectorTy.getVectorElementType(),
774                                  ValueVT.getVectorElementCount());
775       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
776     }
777 
778     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
779       Val = Widened;
780     }
781   }
782 
783   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
784 
785   // Split the vector into intermediate operands.
786   SmallVector<SDValue, 8> Ops(NumIntermediates);
787   for (unsigned i = 0; i != NumIntermediates; ++i) {
788     if (IntermediateVT.isVector()) {
789       // This does something sensible for scalable vectors - see the
790       // definition of EXTRACT_SUBVECTOR for further details.
791       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
792       Ops[i] =
793           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
794                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
795     } else {
796       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
797                            DAG.getVectorIdxConstant(i, DL));
798     }
799   }
800 
801   // Split the intermediate operands into legal parts.
802   if (NumParts == NumIntermediates) {
803     // If the register was not expanded, promote or copy the value,
804     // as appropriate.
805     for (unsigned i = 0; i != NumParts; ++i)
806       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
807   } else if (NumParts > 0) {
808     // If the intermediate type was expanded, split each the value into
809     // legal parts.
810     assert(NumIntermediates != 0 && "division by zero");
811     assert(NumParts % NumIntermediates == 0 &&
812            "Must expand into a divisible number of parts!");
813     unsigned Factor = NumParts / NumIntermediates;
814     for (unsigned i = 0; i != NumIntermediates; ++i)
815       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
816                      CallConv);
817   }
818 }
819 
820 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
821                            EVT valuevt, std::optional<CallingConv::ID> CC)
822     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
823       RegCount(1, regs.size()), CallConv(CC) {}
824 
825 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
826                            const DataLayout &DL, unsigned Reg, Type *Ty,
827                            std::optional<CallingConv::ID> CC) {
828   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
829 
830   CallConv = CC;
831 
832   for (EVT ValueVT : ValueVTs) {
833     unsigned NumRegs =
834         isABIMangled()
835             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
836             : TLI.getNumRegisters(Context, ValueVT);
837     MVT RegisterVT =
838         isABIMangled()
839             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
840             : TLI.getRegisterType(Context, ValueVT);
841     for (unsigned i = 0; i != NumRegs; ++i)
842       Regs.push_back(Reg + i);
843     RegVTs.push_back(RegisterVT);
844     RegCount.push_back(NumRegs);
845     Reg += NumRegs;
846   }
847 }
848 
849 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
850                                       FunctionLoweringInfo &FuncInfo,
851                                       const SDLoc &dl, SDValue &Chain,
852                                       SDValue *Glue, const Value *V) const {
853   // A Value with type {} or [0 x %t] needs no registers.
854   if (ValueVTs.empty())
855     return SDValue();
856 
857   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
858 
859   // Assemble the legal parts into the final values.
860   SmallVector<SDValue, 4> Values(ValueVTs.size());
861   SmallVector<SDValue, 8> Parts;
862   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
863     // Copy the legal parts from the registers.
864     EVT ValueVT = ValueVTs[Value];
865     unsigned NumRegs = RegCount[Value];
866     MVT RegisterVT = isABIMangled()
867                          ? TLI.getRegisterTypeForCallingConv(
868                                *DAG.getContext(), *CallConv, RegVTs[Value])
869                          : RegVTs[Value];
870 
871     Parts.resize(NumRegs);
872     for (unsigned i = 0; i != NumRegs; ++i) {
873       SDValue P;
874       if (!Glue) {
875         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
876       } else {
877         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
878         *Glue = P.getValue(2);
879       }
880 
881       Chain = P.getValue(1);
882       Parts[i] = P;
883 
884       // If the source register was virtual and if we know something about it,
885       // add an assert node.
886       if (!Register::isVirtualRegister(Regs[Part + i]) ||
887           !RegisterVT.isInteger())
888         continue;
889 
890       const FunctionLoweringInfo::LiveOutInfo *LOI =
891         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
892       if (!LOI)
893         continue;
894 
895       unsigned RegSize = RegisterVT.getScalarSizeInBits();
896       unsigned NumSignBits = LOI->NumSignBits;
897       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
898 
899       if (NumZeroBits == RegSize) {
900         // The current value is a zero.
901         // Explicitly express that as it would be easier for
902         // optimizations to kick in.
903         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
904         continue;
905       }
906 
907       // FIXME: We capture more information than the dag can represent.  For
908       // now, just use the tightest assertzext/assertsext possible.
909       bool isSExt;
910       EVT FromVT(MVT::Other);
911       if (NumZeroBits) {
912         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
913         isSExt = false;
914       } else if (NumSignBits > 1) {
915         FromVT =
916             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
917         isSExt = true;
918       } else {
919         continue;
920       }
921       // Add an assertion node.
922       assert(FromVT != MVT::Other);
923       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
924                              RegisterVT, P, DAG.getValueType(FromVT));
925     }
926 
927     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
928                                      RegisterVT, ValueVT, V, CallConv);
929     Part += NumRegs;
930     Parts.clear();
931   }
932 
933   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
934 }
935 
936 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
937                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
938                                  const Value *V,
939                                  ISD::NodeType PreferredExtendType) const {
940   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
941   ISD::NodeType ExtendKind = PreferredExtendType;
942 
943   // Get the list of the values's legal parts.
944   unsigned NumRegs = Regs.size();
945   SmallVector<SDValue, 8> Parts(NumRegs);
946   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
947     unsigned NumParts = RegCount[Value];
948 
949     MVT RegisterVT = isABIMangled()
950                          ? TLI.getRegisterTypeForCallingConv(
951                                *DAG.getContext(), *CallConv, RegVTs[Value])
952                          : RegVTs[Value];
953 
954     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
955       ExtendKind = ISD::ZERO_EXTEND;
956 
957     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
958                    NumParts, RegisterVT, V, CallConv, ExtendKind);
959     Part += NumParts;
960   }
961 
962   // Copy the parts into the registers.
963   SmallVector<SDValue, 8> Chains(NumRegs);
964   for (unsigned i = 0; i != NumRegs; ++i) {
965     SDValue Part;
966     if (!Glue) {
967       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
968     } else {
969       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
970       *Glue = Part.getValue(1);
971     }
972 
973     Chains[i] = Part.getValue(0);
974   }
975 
976   if (NumRegs == 1 || Glue)
977     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
978     // flagged to it. That is the CopyToReg nodes and the user are considered
979     // a single scheduling unit. If we create a TokenFactor and return it as
980     // chain, then the TokenFactor is both a predecessor (operand) of the
981     // user as well as a successor (the TF operands are flagged to the user).
982     // c1, f1 = CopyToReg
983     // c2, f2 = CopyToReg
984     // c3     = TokenFactor c1, c2
985     // ...
986     //        = op c3, ..., f2
987     Chain = Chains[NumRegs-1];
988   else
989     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
990 }
991 
992 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
993                                         unsigned MatchingIdx, const SDLoc &dl,
994                                         SelectionDAG &DAG,
995                                         std::vector<SDValue> &Ops) const {
996   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
997 
998   InlineAsm::Flag Flag(Code, Regs.size());
999   if (HasMatching)
1000     Flag.setMatchingOp(MatchingIdx);
1001   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1002     // Put the register class of the virtual registers in the flag word.  That
1003     // way, later passes can recompute register class constraints for inline
1004     // assembly as well as normal instructions.
1005     // Don't do this for tied operands that can use the regclass information
1006     // from the def.
1007     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1008     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1009     Flag.setRegClass(RC->getID());
1010   }
1011 
1012   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1013   Ops.push_back(Res);
1014 
1015   if (Code == InlineAsm::Kind::Clobber) {
1016     // Clobbers should always have a 1:1 mapping with registers, and may
1017     // reference registers that have illegal (e.g. vector) types. Hence, we
1018     // shouldn't try to apply any sort of splitting logic to them.
1019     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1020            "No 1:1 mapping from clobbers to regs?");
1021     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1022     (void)SP;
1023     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1024       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1025       assert(
1026           (Regs[I] != SP ||
1027            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1028           "If we clobbered the stack pointer, MFI should know about it.");
1029     }
1030     return;
1031   }
1032 
1033   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1034     MVT RegisterVT = RegVTs[Value];
1035     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1036                                            RegisterVT);
1037     for (unsigned i = 0; i != NumRegs; ++i) {
1038       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1039       unsigned TheReg = Regs[Reg++];
1040       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1041     }
1042   }
1043 }
1044 
1045 SmallVector<std::pair<unsigned, TypeSize>, 4>
1046 RegsForValue::getRegsAndSizes() const {
1047   SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1048   unsigned I = 0;
1049   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1050     unsigned RegCount = std::get<0>(CountAndVT);
1051     MVT RegisterVT = std::get<1>(CountAndVT);
1052     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1053     for (unsigned E = I + RegCount; I != E; ++I)
1054       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1055   }
1056   return OutVec;
1057 }
1058 
1059 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1060                                AssumptionCache *ac,
1061                                const TargetLibraryInfo *li) {
1062   AA = aa;
1063   AC = ac;
1064   GFI = gfi;
1065   LibInfo = li;
1066   Context = DAG.getContext();
1067   LPadToCallSiteMap.clear();
1068   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1069   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1070       *DAG.getMachineFunction().getFunction().getParent());
1071 }
1072 
1073 void SelectionDAGBuilder::clear() {
1074   NodeMap.clear();
1075   UnusedArgNodeMap.clear();
1076   PendingLoads.clear();
1077   PendingExports.clear();
1078   PendingConstrainedFP.clear();
1079   PendingConstrainedFPStrict.clear();
1080   CurInst = nullptr;
1081   HasTailCall = false;
1082   SDNodeOrder = LowestSDNodeOrder;
1083   StatepointLowering.clear();
1084 }
1085 
1086 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1087   DanglingDebugInfoMap.clear();
1088 }
1089 
1090 // Update DAG root to include dependencies on Pending chains.
1091 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1092   SDValue Root = DAG.getRoot();
1093 
1094   if (Pending.empty())
1095     return Root;
1096 
1097   // Add current root to PendingChains, unless we already indirectly
1098   // depend on it.
1099   if (Root.getOpcode() != ISD::EntryToken) {
1100     unsigned i = 0, e = Pending.size();
1101     for (; i != e; ++i) {
1102       assert(Pending[i].getNode()->getNumOperands() > 1);
1103       if (Pending[i].getNode()->getOperand(0) == Root)
1104         break;  // Don't add the root if we already indirectly depend on it.
1105     }
1106 
1107     if (i == e)
1108       Pending.push_back(Root);
1109   }
1110 
1111   if (Pending.size() == 1)
1112     Root = Pending[0];
1113   else
1114     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1115 
1116   DAG.setRoot(Root);
1117   Pending.clear();
1118   return Root;
1119 }
1120 
1121 SDValue SelectionDAGBuilder::getMemoryRoot() {
1122   return updateRoot(PendingLoads);
1123 }
1124 
1125 SDValue SelectionDAGBuilder::getRoot() {
1126   // Chain up all pending constrained intrinsics together with all
1127   // pending loads, by simply appending them to PendingLoads and
1128   // then calling getMemoryRoot().
1129   PendingLoads.reserve(PendingLoads.size() +
1130                        PendingConstrainedFP.size() +
1131                        PendingConstrainedFPStrict.size());
1132   PendingLoads.append(PendingConstrainedFP.begin(),
1133                       PendingConstrainedFP.end());
1134   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1135                       PendingConstrainedFPStrict.end());
1136   PendingConstrainedFP.clear();
1137   PendingConstrainedFPStrict.clear();
1138   return getMemoryRoot();
1139 }
1140 
1141 SDValue SelectionDAGBuilder::getControlRoot() {
1142   // We need to emit pending fpexcept.strict constrained intrinsics,
1143   // so append them to the PendingExports list.
1144   PendingExports.append(PendingConstrainedFPStrict.begin(),
1145                         PendingConstrainedFPStrict.end());
1146   PendingConstrainedFPStrict.clear();
1147   return updateRoot(PendingExports);
1148 }
1149 
1150 void SelectionDAGBuilder::visit(const Instruction &I) {
1151   // Set up outgoing PHI node register values before emitting the terminator.
1152   if (I.isTerminator()) {
1153     HandlePHINodesInSuccessorBlocks(I.getParent());
1154   }
1155 
1156   // Add SDDbgValue nodes for any var locs here. Do so before updating
1157   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1158   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1159     // Add SDDbgValue nodes for any var locs here. Do so before updating
1160     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1161     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1162          It != End; ++It) {
1163       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1164       dropDanglingDebugInfo(Var, It->Expr);
1165       if (It->Values.isKillLocation(It->Expr)) {
1166         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1167         continue;
1168       }
1169       SmallVector<Value *> Values(It->Values.location_ops());
1170       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1171                             It->Values.hasArgList()))
1172         addDanglingDebugInfo(It, SDNodeOrder);
1173     }
1174   }
1175 
1176   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1177   if (!isa<DbgInfoIntrinsic>(I))
1178     ++SDNodeOrder;
1179 
1180   CurInst = &I;
1181 
1182   // Set inserted listener only if required.
1183   bool NodeInserted = false;
1184   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1185   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1186   if (PCSectionsMD) {
1187     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1188         DAG, [&](SDNode *) { NodeInserted = true; });
1189   }
1190 
1191   visit(I.getOpcode(), I);
1192 
1193   if (!I.isTerminator() && !HasTailCall &&
1194       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1195     CopyToExportRegsIfNeeded(&I);
1196 
1197   // Handle metadata.
1198   if (PCSectionsMD) {
1199     auto It = NodeMap.find(&I);
1200     if (It != NodeMap.end()) {
1201       DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1202     } else if (NodeInserted) {
1203       // This should not happen; if it does, don't let it go unnoticed so we can
1204       // fix it. Relevant visit*() function is probably missing a setValue().
1205       errs() << "warning: loosing !pcsections metadata ["
1206              << I.getModule()->getName() << "]\n";
1207       LLVM_DEBUG(I.dump());
1208       assert(false);
1209     }
1210   }
1211 
1212   CurInst = nullptr;
1213 }
1214 
1215 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1216   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1217 }
1218 
1219 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1220   // Note: this doesn't use InstVisitor, because it has to work with
1221   // ConstantExpr's in addition to instructions.
1222   switch (Opcode) {
1223   default: llvm_unreachable("Unknown instruction type encountered!");
1224     // Build the switch statement using the Instruction.def file.
1225 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1226     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1227 #include "llvm/IR/Instruction.def"
1228   }
1229 }
1230 
1231 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1232                                             DILocalVariable *Variable,
1233                                             DebugLoc DL, unsigned Order,
1234                                             RawLocationWrapper Values,
1235                                             DIExpression *Expression) {
1236   if (!Values.hasArgList())
1237     return false;
1238   // For variadic dbg_values we will now insert an undef.
1239   // FIXME: We can potentially recover these!
1240   SmallVector<SDDbgOperand, 2> Locs;
1241   for (const Value *V : Values.location_ops()) {
1242     auto *Undef = UndefValue::get(V->getType());
1243     Locs.push_back(SDDbgOperand::fromConst(Undef));
1244   }
1245   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1246                                         /*IsIndirect=*/false, DL, Order,
1247                                         /*IsVariadic=*/true);
1248   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1249   return true;
1250 }
1251 
1252 void SelectionDAGBuilder::addDanglingDebugInfo(const VarLocInfo *VarLoc,
1253                                                unsigned Order) {
1254   if (!handleDanglingVariadicDebugInfo(
1255           DAG,
1256           const_cast<DILocalVariable *>(DAG.getFunctionVarLocs()
1257                                             ->getVariable(VarLoc->VariableID)
1258                                             .getVariable()),
1259           VarLoc->DL, Order, VarLoc->Values, VarLoc->Expr)) {
1260     DanglingDebugInfoMap[VarLoc->Values.getVariableLocationOp(0)].emplace_back(
1261         VarLoc, Order);
1262   }
1263 }
1264 
1265 void SelectionDAGBuilder::addDanglingDebugInfo(const DbgValueInst *DI,
1266                                                unsigned Order) {
1267   // We treat variadic dbg_values differently at this stage.
1268   if (!handleDanglingVariadicDebugInfo(
1269           DAG, DI->getVariable(), DI->getDebugLoc(), Order,
1270           DI->getWrappedLocation(), DI->getExpression())) {
1271     // TODO: Dangling debug info will eventually either be resolved or produce
1272     // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1273     // between the original dbg.value location and its resolved DBG_VALUE,
1274     // which we should ideally fill with an extra Undef DBG_VALUE.
1275     assert(DI->getNumVariableLocationOps() == 1 &&
1276            "DbgValueInst without an ArgList should have a single location "
1277            "operand.");
1278     DanglingDebugInfoMap[DI->getValue(0)].emplace_back(DI, Order);
1279   }
1280 }
1281 
1282 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1283                                                 const DIExpression *Expr) {
1284   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1285     DIVariable *DanglingVariable = DDI.getVariable(DAG.getFunctionVarLocs());
1286     DIExpression *DanglingExpr = DDI.getExpression();
1287     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1288       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << printDDI(DDI)
1289                         << "\n");
1290       return true;
1291     }
1292     return false;
1293   };
1294 
1295   for (auto &DDIMI : DanglingDebugInfoMap) {
1296     DanglingDebugInfoVector &DDIV = DDIMI.second;
1297 
1298     // If debug info is to be dropped, run it through final checks to see
1299     // whether it can be salvaged.
1300     for (auto &DDI : DDIV)
1301       if (isMatchingDbgValue(DDI))
1302         salvageUnresolvedDbgValue(DDI);
1303 
1304     erase_if(DDIV, isMatchingDbgValue);
1305   }
1306 }
1307 
1308 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1309 // generate the debug data structures now that we've seen its definition.
1310 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1311                                                    SDValue Val) {
1312   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1313   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1314     return;
1315 
1316   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1317   for (auto &DDI : DDIV) {
1318     DebugLoc DL = DDI.getDebugLoc();
1319     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1320     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1321     DILocalVariable *Variable = DDI.getVariable(DAG.getFunctionVarLocs());
1322     DIExpression *Expr = DDI.getExpression();
1323     assert(Variable->isValidLocationForIntrinsic(DL) &&
1324            "Expected inlined-at fields to agree");
1325     SDDbgValue *SDV;
1326     if (Val.getNode()) {
1327       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1328       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1329       // we couldn't resolve it directly when examining the DbgValue intrinsic
1330       // in the first place we should not be more successful here). Unless we
1331       // have some test case that prove this to be correct we should avoid
1332       // calling EmitFuncArgumentDbgValue here.
1333       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1334                                     FuncArgumentDbgValueKind::Value, Val)) {
1335         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for " << printDDI(DDI)
1336                           << "\n");
1337         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1338         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1339         // inserted after the definition of Val when emitting the instructions
1340         // after ISel. An alternative could be to teach
1341         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1342         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1343                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1344                    << ValSDNodeOrder << "\n");
1345         SDV = getDbgValue(Val, Variable, Expr, DL,
1346                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1347         DAG.AddDbgValue(SDV, false);
1348       } else
1349         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1350                           << printDDI(DDI) << " in EmitFuncArgumentDbgValue\n");
1351     } else {
1352       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(DDI) << "\n");
1353       auto Undef = UndefValue::get(V->getType());
1354       auto SDV =
1355           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1356       DAG.AddDbgValue(SDV, false);
1357     }
1358   }
1359   DDIV.clear();
1360 }
1361 
1362 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1363   // TODO: For the variadic implementation, instead of only checking the fail
1364   // state of `handleDebugValue`, we need know specifically which values were
1365   // invalid, so that we attempt to salvage only those values when processing
1366   // a DIArgList.
1367   Value *V = DDI.getVariableLocationOp(0);
1368   Value *OrigV = V;
1369   DILocalVariable *Var = DDI.getVariable(DAG.getFunctionVarLocs());
1370   DIExpression *Expr = DDI.getExpression();
1371   DebugLoc DL = DDI.getDebugLoc();
1372   unsigned SDOrder = DDI.getSDNodeOrder();
1373 
1374   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1375   // that DW_OP_stack_value is desired.
1376   bool StackValue = true;
1377 
1378   // Can this Value can be encoded without any further work?
1379   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1380     return;
1381 
1382   // Attempt to salvage back through as many instructions as possible. Bail if
1383   // a non-instruction is seen, such as a constant expression or global
1384   // variable. FIXME: Further work could recover those too.
1385   while (isa<Instruction>(V)) {
1386     Instruction &VAsInst = *cast<Instruction>(V);
1387     // Temporary "0", awaiting real implementation.
1388     SmallVector<uint64_t, 16> Ops;
1389     SmallVector<Value *, 4> AdditionalValues;
1390     V = salvageDebugInfoImpl(VAsInst, Expr->getNumLocationOperands(), Ops,
1391                              AdditionalValues);
1392     // If we cannot salvage any further, and haven't yet found a suitable debug
1393     // expression, bail out.
1394     if (!V)
1395       break;
1396 
1397     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1398     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1399     // here for variadic dbg_values, remove that condition.
1400     if (!AdditionalValues.empty())
1401       break;
1402 
1403     // New value and expr now represent this debuginfo.
1404     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1405 
1406     // Some kind of simplification occurred: check whether the operand of the
1407     // salvaged debug expression can be encoded in this DAG.
1408     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1409       LLVM_DEBUG(
1410           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1411                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1412       return;
1413     }
1414   }
1415 
1416   // This was the final opportunity to salvage this debug information, and it
1417   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1418   // any earlier variable location.
1419   assert(OrigV && "V shouldn't be null");
1420   auto *Undef = UndefValue::get(OrigV->getType());
1421   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1422   DAG.AddDbgValue(SDV, false);
1423   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  " << printDDI(DDI)
1424                     << "\n");
1425 }
1426 
1427 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1428                                                DIExpression *Expr,
1429                                                DebugLoc DbgLoc,
1430                                                unsigned Order) {
1431   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1432   DIExpression *NewExpr =
1433       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1434   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1435                    /*IsVariadic*/ false);
1436 }
1437 
1438 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1439                                            DILocalVariable *Var,
1440                                            DIExpression *Expr, DebugLoc DbgLoc,
1441                                            unsigned Order, bool IsVariadic) {
1442   if (Values.empty())
1443     return true;
1444   SmallVector<SDDbgOperand> LocationOps;
1445   SmallVector<SDNode *> Dependencies;
1446   for (const Value *V : Values) {
1447     // Constant value.
1448     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1449         isa<ConstantPointerNull>(V)) {
1450       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1451       continue;
1452     }
1453 
1454     // Look through IntToPtr constants.
1455     if (auto *CE = dyn_cast<ConstantExpr>(V))
1456       if (CE->getOpcode() == Instruction::IntToPtr) {
1457         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1458         continue;
1459       }
1460 
1461     // If the Value is a frame index, we can create a FrameIndex debug value
1462     // without relying on the DAG at all.
1463     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1464       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1465       if (SI != FuncInfo.StaticAllocaMap.end()) {
1466         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1467         continue;
1468       }
1469     }
1470 
1471     // Do not use getValue() in here; we don't want to generate code at
1472     // this point if it hasn't been done yet.
1473     SDValue N = NodeMap[V];
1474     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1475       N = UnusedArgNodeMap[V];
1476     if (N.getNode()) {
1477       // Only emit func arg dbg value for non-variadic dbg.values for now.
1478       if (!IsVariadic &&
1479           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1480                                    FuncArgumentDbgValueKind::Value, N))
1481         return true;
1482       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1483         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1484         // describe stack slot locations.
1485         //
1486         // Consider "int x = 0; int *px = &x;". There are two kinds of
1487         // interesting debug values here after optimization:
1488         //
1489         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1490         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1491         //
1492         // Both describe the direct values of their associated variables.
1493         Dependencies.push_back(N.getNode());
1494         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1495         continue;
1496       }
1497       LocationOps.emplace_back(
1498           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1499       continue;
1500     }
1501 
1502     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1503     // Special rules apply for the first dbg.values of parameter variables in a
1504     // function. Identify them by the fact they reference Argument Values, that
1505     // they're parameters, and they are parameters of the current function. We
1506     // need to let them dangle until they get an SDNode.
1507     bool IsParamOfFunc =
1508         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1509     if (IsParamOfFunc)
1510       return false;
1511 
1512     // The value is not used in this block yet (or it would have an SDNode).
1513     // We still want the value to appear for the user if possible -- if it has
1514     // an associated VReg, we can refer to that instead.
1515     auto VMI = FuncInfo.ValueMap.find(V);
1516     if (VMI != FuncInfo.ValueMap.end()) {
1517       unsigned Reg = VMI->second;
1518       // If this is a PHI node, it may be split up into several MI PHI nodes
1519       // (in FunctionLoweringInfo::set).
1520       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1521                        V->getType(), std::nullopt);
1522       if (RFV.occupiesMultipleRegs()) {
1523         // FIXME: We could potentially support variadic dbg_values here.
1524         if (IsVariadic)
1525           return false;
1526         unsigned Offset = 0;
1527         unsigned BitsToDescribe = 0;
1528         if (auto VarSize = Var->getSizeInBits())
1529           BitsToDescribe = *VarSize;
1530         if (auto Fragment = Expr->getFragmentInfo())
1531           BitsToDescribe = Fragment->SizeInBits;
1532         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1533           // Bail out if all bits are described already.
1534           if (Offset >= BitsToDescribe)
1535             break;
1536           // TODO: handle scalable vectors.
1537           unsigned RegisterSize = RegAndSize.second;
1538           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1539                                       ? BitsToDescribe - Offset
1540                                       : RegisterSize;
1541           auto FragmentExpr = DIExpression::createFragmentExpression(
1542               Expr, Offset, FragmentSize);
1543           if (!FragmentExpr)
1544             continue;
1545           SDDbgValue *SDV = DAG.getVRegDbgValue(
1546               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1547           DAG.AddDbgValue(SDV, false);
1548           Offset += RegisterSize;
1549         }
1550         return true;
1551       }
1552       // We can use simple vreg locations for variadic dbg_values as well.
1553       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1554       continue;
1555     }
1556     // We failed to create a SDDbgOperand for V.
1557     return false;
1558   }
1559 
1560   // We have created a SDDbgOperand for each Value in Values.
1561   // Should use Order instead of SDNodeOrder?
1562   assert(!LocationOps.empty());
1563   SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1564                                         /*IsIndirect=*/false, DbgLoc,
1565                                         SDNodeOrder, IsVariadic);
1566   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1567   return true;
1568 }
1569 
1570 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1571   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1572   for (auto &Pair : DanglingDebugInfoMap)
1573     for (auto &DDI : Pair.second)
1574       salvageUnresolvedDbgValue(DDI);
1575   clearDanglingDebugInfo();
1576 }
1577 
1578 /// getCopyFromRegs - If there was virtual register allocated for the value V
1579 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1580 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1581   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1582   SDValue Result;
1583 
1584   if (It != FuncInfo.ValueMap.end()) {
1585     Register InReg = It->second;
1586 
1587     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1588                      DAG.getDataLayout(), InReg, Ty,
1589                      std::nullopt); // This is not an ABI copy.
1590     SDValue Chain = DAG.getEntryNode();
1591     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1592                                  V);
1593     resolveDanglingDebugInfo(V, Result);
1594   }
1595 
1596   return Result;
1597 }
1598 
1599 /// getValue - Return an SDValue for the given Value.
1600 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1601   // If we already have an SDValue for this value, use it. It's important
1602   // to do this first, so that we don't create a CopyFromReg if we already
1603   // have a regular SDValue.
1604   SDValue &N = NodeMap[V];
1605   if (N.getNode()) return N;
1606 
1607   // If there's a virtual register allocated and initialized for this
1608   // value, use it.
1609   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1610     return copyFromReg;
1611 
1612   // Otherwise create a new SDValue and remember it.
1613   SDValue Val = getValueImpl(V);
1614   NodeMap[V] = Val;
1615   resolveDanglingDebugInfo(V, Val);
1616   return Val;
1617 }
1618 
1619 /// getNonRegisterValue - Return an SDValue for the given Value, but
1620 /// don't look in FuncInfo.ValueMap for a virtual register.
1621 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1622   // If we already have an SDValue for this value, use it.
1623   SDValue &N = NodeMap[V];
1624   if (N.getNode()) {
1625     if (isIntOrFPConstant(N)) {
1626       // Remove the debug location from the node as the node is about to be used
1627       // in a location which may differ from the original debug location.  This
1628       // is relevant to Constant and ConstantFP nodes because they can appear
1629       // as constant expressions inside PHI nodes.
1630       N->setDebugLoc(DebugLoc());
1631     }
1632     return N;
1633   }
1634 
1635   // Otherwise create a new SDValue and remember it.
1636   SDValue Val = getValueImpl(V);
1637   NodeMap[V] = Val;
1638   resolveDanglingDebugInfo(V, Val);
1639   return Val;
1640 }
1641 
1642 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1643 /// Create an SDValue for the given value.
1644 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1645   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1646 
1647   if (const Constant *C = dyn_cast<Constant>(V)) {
1648     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1649 
1650     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1651       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1652 
1653     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1654       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1655 
1656     if (isa<ConstantPointerNull>(C)) {
1657       unsigned AS = V->getType()->getPointerAddressSpace();
1658       return DAG.getConstant(0, getCurSDLoc(),
1659                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1660     }
1661 
1662     if (match(C, m_VScale()))
1663       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1664 
1665     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1666       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1667 
1668     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1669       return DAG.getUNDEF(VT);
1670 
1671     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1672       visit(CE->getOpcode(), *CE);
1673       SDValue N1 = NodeMap[V];
1674       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1675       return N1;
1676     }
1677 
1678     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1679       SmallVector<SDValue, 4> Constants;
1680       for (const Use &U : C->operands()) {
1681         SDNode *Val = getValue(U).getNode();
1682         // If the operand is an empty aggregate, there are no values.
1683         if (!Val) continue;
1684         // Add each leaf value from the operand to the Constants list
1685         // to form a flattened list of all the values.
1686         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1687           Constants.push_back(SDValue(Val, i));
1688       }
1689 
1690       return DAG.getMergeValues(Constants, getCurSDLoc());
1691     }
1692 
1693     if (const ConstantDataSequential *CDS =
1694           dyn_cast<ConstantDataSequential>(C)) {
1695       SmallVector<SDValue, 4> Ops;
1696       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1697         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1698         // Add each leaf value from the operand to the Constants list
1699         // to form a flattened list of all the values.
1700         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1701           Ops.push_back(SDValue(Val, i));
1702       }
1703 
1704       if (isa<ArrayType>(CDS->getType()))
1705         return DAG.getMergeValues(Ops, getCurSDLoc());
1706       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1707     }
1708 
1709     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1710       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1711              "Unknown struct or array constant!");
1712 
1713       SmallVector<EVT, 4> ValueVTs;
1714       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1715       unsigned NumElts = ValueVTs.size();
1716       if (NumElts == 0)
1717         return SDValue(); // empty struct
1718       SmallVector<SDValue, 4> Constants(NumElts);
1719       for (unsigned i = 0; i != NumElts; ++i) {
1720         EVT EltVT = ValueVTs[i];
1721         if (isa<UndefValue>(C))
1722           Constants[i] = DAG.getUNDEF(EltVT);
1723         else if (EltVT.isFloatingPoint())
1724           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1725         else
1726           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1727       }
1728 
1729       return DAG.getMergeValues(Constants, getCurSDLoc());
1730     }
1731 
1732     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1733       return DAG.getBlockAddress(BA, VT);
1734 
1735     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1736       return getValue(Equiv->getGlobalValue());
1737 
1738     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1739       return getValue(NC->getGlobalValue());
1740 
1741     if (VT == MVT::aarch64svcount) {
1742       assert(C->isNullValue() && "Can only zero this target type!");
1743       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1744                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1745     }
1746 
1747     VectorType *VecTy = cast<VectorType>(V->getType());
1748 
1749     // Now that we know the number and type of the elements, get that number of
1750     // elements into the Ops array based on what kind of constant it is.
1751     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1752       SmallVector<SDValue, 16> Ops;
1753       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1754       for (unsigned i = 0; i != NumElements; ++i)
1755         Ops.push_back(getValue(CV->getOperand(i)));
1756 
1757       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1758     }
1759 
1760     if (isa<ConstantAggregateZero>(C)) {
1761       EVT EltVT =
1762           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1763 
1764       SDValue Op;
1765       if (EltVT.isFloatingPoint())
1766         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1767       else
1768         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1769 
1770       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1771     }
1772 
1773     llvm_unreachable("Unknown vector constant");
1774   }
1775 
1776   // If this is a static alloca, generate it as the frameindex instead of
1777   // computation.
1778   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1779     DenseMap<const AllocaInst*, int>::iterator SI =
1780       FuncInfo.StaticAllocaMap.find(AI);
1781     if (SI != FuncInfo.StaticAllocaMap.end())
1782       return DAG.getFrameIndex(
1783           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1784   }
1785 
1786   // If this is an instruction which fast-isel has deferred, select it now.
1787   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1788     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1789 
1790     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1791                      Inst->getType(), std::nullopt);
1792     SDValue Chain = DAG.getEntryNode();
1793     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1794   }
1795 
1796   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1797     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1798 
1799   if (const auto *BB = dyn_cast<BasicBlock>(V))
1800     return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1801 
1802   llvm_unreachable("Can't get register for value!");
1803 }
1804 
1805 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1806   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1807   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1808   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1809   bool IsSEH = isAsynchronousEHPersonality(Pers);
1810   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1811   if (!IsSEH)
1812     CatchPadMBB->setIsEHScopeEntry();
1813   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1814   if (IsMSVCCXX || IsCoreCLR)
1815     CatchPadMBB->setIsEHFuncletEntry();
1816 }
1817 
1818 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1819   // Update machine-CFG edge.
1820   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1821   FuncInfo.MBB->addSuccessor(TargetMBB);
1822   TargetMBB->setIsEHCatchretTarget(true);
1823   DAG.getMachineFunction().setHasEHCatchret(true);
1824 
1825   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1826   bool IsSEH = isAsynchronousEHPersonality(Pers);
1827   if (IsSEH) {
1828     // If this is not a fall-through branch or optimizations are switched off,
1829     // emit the branch.
1830     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1831         TM.getOptLevel() == CodeGenOptLevel::None)
1832       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1833                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1834     return;
1835   }
1836 
1837   // Figure out the funclet membership for the catchret's successor.
1838   // This will be used by the FuncletLayout pass to determine how to order the
1839   // BB's.
1840   // A 'catchret' returns to the outer scope's color.
1841   Value *ParentPad = I.getCatchSwitchParentPad();
1842   const BasicBlock *SuccessorColor;
1843   if (isa<ConstantTokenNone>(ParentPad))
1844     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1845   else
1846     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1847   assert(SuccessorColor && "No parent funclet for catchret!");
1848   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1849   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1850 
1851   // Create the terminator node.
1852   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1853                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1854                             DAG.getBasicBlock(SuccessorColorMBB));
1855   DAG.setRoot(Ret);
1856 }
1857 
1858 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1859   // Don't emit any special code for the cleanuppad instruction. It just marks
1860   // the start of an EH scope/funclet.
1861   FuncInfo.MBB->setIsEHScopeEntry();
1862   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1863   if (Pers != EHPersonality::Wasm_CXX) {
1864     FuncInfo.MBB->setIsEHFuncletEntry();
1865     FuncInfo.MBB->setIsCleanupFuncletEntry();
1866   }
1867 }
1868 
1869 // In wasm EH, even though a catchpad may not catch an exception if a tag does
1870 // not match, it is OK to add only the first unwind destination catchpad to the
1871 // successors, because there will be at least one invoke instruction within the
1872 // catch scope that points to the next unwind destination, if one exists, so
1873 // CFGSort cannot mess up with BB sorting order.
1874 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1875 // call within them, and catchpads only consisting of 'catch (...)' have a
1876 // '__cxa_end_catch' call within them, both of which generate invokes in case
1877 // the next unwind destination exists, i.e., the next unwind destination is not
1878 // the caller.)
1879 //
1880 // Having at most one EH pad successor is also simpler and helps later
1881 // transformations.
1882 //
1883 // For example,
1884 // current:
1885 //   invoke void @foo to ... unwind label %catch.dispatch
1886 // catch.dispatch:
1887 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
1888 // catch.start:
1889 //   ...
1890 //   ... in this BB or some other child BB dominated by this BB there will be an
1891 //   invoke that points to 'next' BB as an unwind destination
1892 //
1893 // next: ; We don't need to add this to 'current' BB's successor
1894 //   ...
1895 static void findWasmUnwindDestinations(
1896     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1897     BranchProbability Prob,
1898     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1899         &UnwindDests) {
1900   while (EHPadBB) {
1901     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1902     if (isa<CleanupPadInst>(Pad)) {
1903       // Stop on cleanup pads.
1904       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1905       UnwindDests.back().first->setIsEHScopeEntry();
1906       break;
1907     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1908       // Add the catchpad handlers to the possible destinations. We don't
1909       // continue to the unwind destination of the catchswitch for wasm.
1910       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1911         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1912         UnwindDests.back().first->setIsEHScopeEntry();
1913       }
1914       break;
1915     } else {
1916       continue;
1917     }
1918   }
1919 }
1920 
1921 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1922 /// many places it could ultimately go. In the IR, we have a single unwind
1923 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1924 /// This function skips over imaginary basic blocks that hold catchswitch
1925 /// instructions, and finds all the "real" machine
1926 /// basic block destinations. As those destinations may not be successors of
1927 /// EHPadBB, here we also calculate the edge probability to those destinations.
1928 /// The passed-in Prob is the edge probability to EHPadBB.
1929 static void findUnwindDestinations(
1930     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1931     BranchProbability Prob,
1932     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1933         &UnwindDests) {
1934   EHPersonality Personality =
1935     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1936   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1937   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1938   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1939   bool IsSEH = isAsynchronousEHPersonality(Personality);
1940 
1941   if (IsWasmCXX) {
1942     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1943     assert(UnwindDests.size() <= 1 &&
1944            "There should be at most one unwind destination for wasm");
1945     return;
1946   }
1947 
1948   while (EHPadBB) {
1949     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1950     BasicBlock *NewEHPadBB = nullptr;
1951     if (isa<LandingPadInst>(Pad)) {
1952       // Stop on landingpads. They are not funclets.
1953       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1954       break;
1955     } else if (isa<CleanupPadInst>(Pad)) {
1956       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1957       // personalities.
1958       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1959       UnwindDests.back().first->setIsEHScopeEntry();
1960       UnwindDests.back().first->setIsEHFuncletEntry();
1961       break;
1962     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1963       // Add the catchpad handlers to the possible destinations.
1964       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1965         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1966         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1967         if (IsMSVCCXX || IsCoreCLR)
1968           UnwindDests.back().first->setIsEHFuncletEntry();
1969         if (!IsSEH)
1970           UnwindDests.back().first->setIsEHScopeEntry();
1971       }
1972       NewEHPadBB = CatchSwitch->getUnwindDest();
1973     } else {
1974       continue;
1975     }
1976 
1977     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1978     if (BPI && NewEHPadBB)
1979       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1980     EHPadBB = NewEHPadBB;
1981   }
1982 }
1983 
1984 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1985   // Update successor info.
1986   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1987   auto UnwindDest = I.getUnwindDest();
1988   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1989   BranchProbability UnwindDestProb =
1990       (BPI && UnwindDest)
1991           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1992           : BranchProbability::getZero();
1993   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1994   for (auto &UnwindDest : UnwindDests) {
1995     UnwindDest.first->setIsEHPad();
1996     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1997   }
1998   FuncInfo.MBB->normalizeSuccProbs();
1999 
2000   // Create the terminator node.
2001   SDValue Ret =
2002       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
2003   DAG.setRoot(Ret);
2004 }
2005 
2006 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2007   report_fatal_error("visitCatchSwitch not yet implemented!");
2008 }
2009 
2010 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2011   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2012   auto &DL = DAG.getDataLayout();
2013   SDValue Chain = getControlRoot();
2014   SmallVector<ISD::OutputArg, 8> Outs;
2015   SmallVector<SDValue, 8> OutVals;
2016 
2017   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2018   // lower
2019   //
2020   //   %val = call <ty> @llvm.experimental.deoptimize()
2021   //   ret <ty> %val
2022   //
2023   // differently.
2024   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2025     LowerDeoptimizingReturn();
2026     return;
2027   }
2028 
2029   if (!FuncInfo.CanLowerReturn) {
2030     unsigned DemoteReg = FuncInfo.DemoteRegister;
2031     const Function *F = I.getParent()->getParent();
2032 
2033     // Emit a store of the return value through the virtual register.
2034     // Leave Outs empty so that LowerReturn won't try to load return
2035     // registers the usual way.
2036     SmallVector<EVT, 1> PtrValueVTs;
2037     ComputeValueVTs(TLI, DL,
2038                     PointerType::get(F->getContext(),
2039                                      DAG.getDataLayout().getAllocaAddrSpace()),
2040                     PtrValueVTs);
2041 
2042     SDValue RetPtr =
2043         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2044     SDValue RetOp = getValue(I.getOperand(0));
2045 
2046     SmallVector<EVT, 4> ValueVTs, MemVTs;
2047     SmallVector<uint64_t, 4> Offsets;
2048     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2049                     &Offsets, 0);
2050     unsigned NumValues = ValueVTs.size();
2051 
2052     SmallVector<SDValue, 4> Chains(NumValues);
2053     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2054     for (unsigned i = 0; i != NumValues; ++i) {
2055       // An aggregate return value cannot wrap around the address space, so
2056       // offsets to its parts don't wrap either.
2057       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2058                                            TypeSize::Fixed(Offsets[i]));
2059 
2060       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2061       if (MemVTs[i] != ValueVTs[i])
2062         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2063       Chains[i] = DAG.getStore(
2064           Chain, getCurSDLoc(), Val,
2065           // FIXME: better loc info would be nice.
2066           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2067           commonAlignment(BaseAlign, Offsets[i]));
2068     }
2069 
2070     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2071                         MVT::Other, Chains);
2072   } else if (I.getNumOperands() != 0) {
2073     SmallVector<EVT, 4> ValueVTs;
2074     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2075     unsigned NumValues = ValueVTs.size();
2076     if (NumValues) {
2077       SDValue RetOp = getValue(I.getOperand(0));
2078 
2079       const Function *F = I.getParent()->getParent();
2080 
2081       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2082           I.getOperand(0)->getType(), F->getCallingConv(),
2083           /*IsVarArg*/ false, DL);
2084 
2085       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2086       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2087         ExtendKind = ISD::SIGN_EXTEND;
2088       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2089         ExtendKind = ISD::ZERO_EXTEND;
2090 
2091       LLVMContext &Context = F->getContext();
2092       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2093 
2094       for (unsigned j = 0; j != NumValues; ++j) {
2095         EVT VT = ValueVTs[j];
2096 
2097         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2098           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2099 
2100         CallingConv::ID CC = F->getCallingConv();
2101 
2102         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2103         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2104         SmallVector<SDValue, 4> Parts(NumParts);
2105         getCopyToParts(DAG, getCurSDLoc(),
2106                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2107                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2108 
2109         // 'inreg' on function refers to return value
2110         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2111         if (RetInReg)
2112           Flags.setInReg();
2113 
2114         if (I.getOperand(0)->getType()->isPointerTy()) {
2115           Flags.setPointer();
2116           Flags.setPointerAddrSpace(
2117               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2118         }
2119 
2120         if (NeedsRegBlock) {
2121           Flags.setInConsecutiveRegs();
2122           if (j == NumValues - 1)
2123             Flags.setInConsecutiveRegsLast();
2124         }
2125 
2126         // Propagate extension type if any
2127         if (ExtendKind == ISD::SIGN_EXTEND)
2128           Flags.setSExt();
2129         else if (ExtendKind == ISD::ZERO_EXTEND)
2130           Flags.setZExt();
2131 
2132         for (unsigned i = 0; i < NumParts; ++i) {
2133           Outs.push_back(ISD::OutputArg(Flags,
2134                                         Parts[i].getValueType().getSimpleVT(),
2135                                         VT, /*isfixed=*/true, 0, 0));
2136           OutVals.push_back(Parts[i]);
2137         }
2138       }
2139     }
2140   }
2141 
2142   // Push in swifterror virtual register as the last element of Outs. This makes
2143   // sure swifterror virtual register will be returned in the swifterror
2144   // physical register.
2145   const Function *F = I.getParent()->getParent();
2146   if (TLI.supportSwiftError() &&
2147       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2148     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2149     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2150     Flags.setSwiftError();
2151     Outs.push_back(ISD::OutputArg(
2152         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2153         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2154     // Create SDNode for the swifterror virtual register.
2155     OutVals.push_back(
2156         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2157                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2158                         EVT(TLI.getPointerTy(DL))));
2159   }
2160 
2161   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2162   CallingConv::ID CallConv =
2163     DAG.getMachineFunction().getFunction().getCallingConv();
2164   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2165       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2166 
2167   // Verify that the target's LowerReturn behaved as expected.
2168   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2169          "LowerReturn didn't return a valid chain!");
2170 
2171   // Update the DAG with the new chain value resulting from return lowering.
2172   DAG.setRoot(Chain);
2173 }
2174 
2175 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2176 /// created for it, emit nodes to copy the value into the virtual
2177 /// registers.
2178 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2179   // Skip empty types
2180   if (V->getType()->isEmptyTy())
2181     return;
2182 
2183   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2184   if (VMI != FuncInfo.ValueMap.end()) {
2185     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2186            "Unused value assigned virtual registers!");
2187     CopyValueToVirtualRegister(V, VMI->second);
2188   }
2189 }
2190 
2191 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2192 /// the current basic block, add it to ValueMap now so that we'll get a
2193 /// CopyTo/FromReg.
2194 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2195   // No need to export constants.
2196   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2197 
2198   // Already exported?
2199   if (FuncInfo.isExportedInst(V)) return;
2200 
2201   Register Reg = FuncInfo.InitializeRegForValue(V);
2202   CopyValueToVirtualRegister(V, Reg);
2203 }
2204 
2205 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2206                                                      const BasicBlock *FromBB) {
2207   // The operands of the setcc have to be in this block.  We don't know
2208   // how to export them from some other block.
2209   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2210     // Can export from current BB.
2211     if (VI->getParent() == FromBB)
2212       return true;
2213 
2214     // Is already exported, noop.
2215     return FuncInfo.isExportedInst(V);
2216   }
2217 
2218   // If this is an argument, we can export it if the BB is the entry block or
2219   // if it is already exported.
2220   if (isa<Argument>(V)) {
2221     if (FromBB->isEntryBlock())
2222       return true;
2223 
2224     // Otherwise, can only export this if it is already exported.
2225     return FuncInfo.isExportedInst(V);
2226   }
2227 
2228   // Otherwise, constants can always be exported.
2229   return true;
2230 }
2231 
2232 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2233 BranchProbability
2234 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2235                                         const MachineBasicBlock *Dst) const {
2236   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2237   const BasicBlock *SrcBB = Src->getBasicBlock();
2238   const BasicBlock *DstBB = Dst->getBasicBlock();
2239   if (!BPI) {
2240     // If BPI is not available, set the default probability as 1 / N, where N is
2241     // the number of successors.
2242     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2243     return BranchProbability(1, SuccSize);
2244   }
2245   return BPI->getEdgeProbability(SrcBB, DstBB);
2246 }
2247 
2248 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2249                                                MachineBasicBlock *Dst,
2250                                                BranchProbability Prob) {
2251   if (!FuncInfo.BPI)
2252     Src->addSuccessorWithoutProb(Dst);
2253   else {
2254     if (Prob.isUnknown())
2255       Prob = getEdgeProbability(Src, Dst);
2256     Src->addSuccessor(Dst, Prob);
2257   }
2258 }
2259 
2260 static bool InBlock(const Value *V, const BasicBlock *BB) {
2261   if (const Instruction *I = dyn_cast<Instruction>(V))
2262     return I->getParent() == BB;
2263   return true;
2264 }
2265 
2266 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2267 /// This function emits a branch and is used at the leaves of an OR or an
2268 /// AND operator tree.
2269 void
2270 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2271                                                   MachineBasicBlock *TBB,
2272                                                   MachineBasicBlock *FBB,
2273                                                   MachineBasicBlock *CurBB,
2274                                                   MachineBasicBlock *SwitchBB,
2275                                                   BranchProbability TProb,
2276                                                   BranchProbability FProb,
2277                                                   bool InvertCond) {
2278   const BasicBlock *BB = CurBB->getBasicBlock();
2279 
2280   // If the leaf of the tree is a comparison, merge the condition into
2281   // the caseblock.
2282   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2283     // The operands of the cmp have to be in this block.  We don't know
2284     // how to export them from some other block.  If this is the first block
2285     // of the sequence, no exporting is needed.
2286     if (CurBB == SwitchBB ||
2287         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2288          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2289       ISD::CondCode Condition;
2290       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2291         ICmpInst::Predicate Pred =
2292             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2293         Condition = getICmpCondCode(Pred);
2294       } else {
2295         const FCmpInst *FC = cast<FCmpInst>(Cond);
2296         FCmpInst::Predicate Pred =
2297             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2298         Condition = getFCmpCondCode(Pred);
2299         if (TM.Options.NoNaNsFPMath)
2300           Condition = getFCmpCodeWithoutNaN(Condition);
2301       }
2302 
2303       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2304                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2305       SL->SwitchCases.push_back(CB);
2306       return;
2307     }
2308   }
2309 
2310   // Create a CaseBlock record representing this branch.
2311   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2312   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2313                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2314   SL->SwitchCases.push_back(CB);
2315 }
2316 
2317 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2318                                                MachineBasicBlock *TBB,
2319                                                MachineBasicBlock *FBB,
2320                                                MachineBasicBlock *CurBB,
2321                                                MachineBasicBlock *SwitchBB,
2322                                                Instruction::BinaryOps Opc,
2323                                                BranchProbability TProb,
2324                                                BranchProbability FProb,
2325                                                bool InvertCond) {
2326   // Skip over not part of the tree and remember to invert op and operands at
2327   // next level.
2328   Value *NotCond;
2329   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2330       InBlock(NotCond, CurBB->getBasicBlock())) {
2331     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2332                          !InvertCond);
2333     return;
2334   }
2335 
2336   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2337   const Value *BOpOp0, *BOpOp1;
2338   // Compute the effective opcode for Cond, taking into account whether it needs
2339   // to be inverted, e.g.
2340   //   and (not (or A, B)), C
2341   // gets lowered as
2342   //   and (and (not A, not B), C)
2343   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2344   if (BOp) {
2345     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2346                ? Instruction::And
2347                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2348                       ? Instruction::Or
2349                       : (Instruction::BinaryOps)0);
2350     if (InvertCond) {
2351       if (BOpc == Instruction::And)
2352         BOpc = Instruction::Or;
2353       else if (BOpc == Instruction::Or)
2354         BOpc = Instruction::And;
2355     }
2356   }
2357 
2358   // If this node is not part of the or/and tree, emit it as a branch.
2359   // Note that all nodes in the tree should have same opcode.
2360   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2361   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2362       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2363       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2364     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2365                                  TProb, FProb, InvertCond);
2366     return;
2367   }
2368 
2369   //  Create TmpBB after CurBB.
2370   MachineFunction::iterator BBI(CurBB);
2371   MachineFunction &MF = DAG.getMachineFunction();
2372   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2373   CurBB->getParent()->insert(++BBI, TmpBB);
2374 
2375   if (Opc == Instruction::Or) {
2376     // Codegen X | Y as:
2377     // BB1:
2378     //   jmp_if_X TBB
2379     //   jmp TmpBB
2380     // TmpBB:
2381     //   jmp_if_Y TBB
2382     //   jmp FBB
2383     //
2384 
2385     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2386     // The requirement is that
2387     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2388     //     = TrueProb for original BB.
2389     // Assuming the original probabilities are A and B, one choice is to set
2390     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2391     // A/(1+B) and 2B/(1+B). This choice assumes that
2392     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2393     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2394     // TmpBB, but the math is more complicated.
2395 
2396     auto NewTrueProb = TProb / 2;
2397     auto NewFalseProb = TProb / 2 + FProb;
2398     // Emit the LHS condition.
2399     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2400                          NewFalseProb, InvertCond);
2401 
2402     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2403     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2404     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2405     // Emit the RHS condition into TmpBB.
2406     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2407                          Probs[1], InvertCond);
2408   } else {
2409     assert(Opc == Instruction::And && "Unknown merge op!");
2410     // Codegen X & Y as:
2411     // BB1:
2412     //   jmp_if_X TmpBB
2413     //   jmp FBB
2414     // TmpBB:
2415     //   jmp_if_Y TBB
2416     //   jmp FBB
2417     //
2418     //  This requires creation of TmpBB after CurBB.
2419 
2420     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2421     // The requirement is that
2422     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2423     //     = FalseProb for original BB.
2424     // Assuming the original probabilities are A and B, one choice is to set
2425     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2426     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2427     // TrueProb for BB1 * FalseProb for TmpBB.
2428 
2429     auto NewTrueProb = TProb + FProb / 2;
2430     auto NewFalseProb = FProb / 2;
2431     // Emit the LHS condition.
2432     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2433                          NewFalseProb, InvertCond);
2434 
2435     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2436     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2437     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2438     // Emit the RHS condition into TmpBB.
2439     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2440                          Probs[1], InvertCond);
2441   }
2442 }
2443 
2444 /// If the set of cases should be emitted as a series of branches, return true.
2445 /// If we should emit this as a bunch of and/or'd together conditions, return
2446 /// false.
2447 bool
2448 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2449   if (Cases.size() != 2) return true;
2450 
2451   // If this is two comparisons of the same values or'd or and'd together, they
2452   // will get folded into a single comparison, so don't emit two blocks.
2453   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2454        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2455       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2456        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2457     return false;
2458   }
2459 
2460   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2461   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2462   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2463       Cases[0].CC == Cases[1].CC &&
2464       isa<Constant>(Cases[0].CmpRHS) &&
2465       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2466     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2467       return false;
2468     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2469       return false;
2470   }
2471 
2472   return true;
2473 }
2474 
2475 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2476   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2477 
2478   // Update machine-CFG edges.
2479   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2480 
2481   if (I.isUnconditional()) {
2482     // Update machine-CFG edges.
2483     BrMBB->addSuccessor(Succ0MBB);
2484 
2485     // If this is not a fall-through branch or optimizations are switched off,
2486     // emit the branch.
2487     if (Succ0MBB != NextBlock(BrMBB) ||
2488         TM.getOptLevel() == CodeGenOptLevel::None) {
2489       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2490                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2491       setValue(&I, Br);
2492       DAG.setRoot(Br);
2493     }
2494 
2495     return;
2496   }
2497 
2498   // If this condition is one of the special cases we handle, do special stuff
2499   // now.
2500   const Value *CondVal = I.getCondition();
2501   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2502 
2503   // If this is a series of conditions that are or'd or and'd together, emit
2504   // this as a sequence of branches instead of setcc's with and/or operations.
2505   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2506   // unpredictable branches, and vector extracts because those jumps are likely
2507   // expensive for any target), this should improve performance.
2508   // For example, instead of something like:
2509   //     cmp A, B
2510   //     C = seteq
2511   //     cmp D, E
2512   //     F = setle
2513   //     or C, F
2514   //     jnz foo
2515   // Emit:
2516   //     cmp A, B
2517   //     je foo
2518   //     cmp D, E
2519   //     jle foo
2520   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2521   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2522       BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2523     Value *Vec;
2524     const Value *BOp0, *BOp1;
2525     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2526     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2527       Opcode = Instruction::And;
2528     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2529       Opcode = Instruction::Or;
2530 
2531     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2532                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2533       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2534                            getEdgeProbability(BrMBB, Succ0MBB),
2535                            getEdgeProbability(BrMBB, Succ1MBB),
2536                            /*InvertCond=*/false);
2537       // If the compares in later blocks need to use values not currently
2538       // exported from this block, export them now.  This block should always
2539       // be the first entry.
2540       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2541 
2542       // Allow some cases to be rejected.
2543       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2544         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2545           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2546           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2547         }
2548 
2549         // Emit the branch for this block.
2550         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2551         SL->SwitchCases.erase(SL->SwitchCases.begin());
2552         return;
2553       }
2554 
2555       // Okay, we decided not to do this, remove any inserted MBB's and clear
2556       // SwitchCases.
2557       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2558         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2559 
2560       SL->SwitchCases.clear();
2561     }
2562   }
2563 
2564   // Create a CaseBlock record representing this branch.
2565   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2566                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2567 
2568   // Use visitSwitchCase to actually insert the fast branch sequence for this
2569   // cond branch.
2570   visitSwitchCase(CB, BrMBB);
2571 }
2572 
2573 /// visitSwitchCase - Emits the necessary code to represent a single node in
2574 /// the binary search tree resulting from lowering a switch instruction.
2575 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2576                                           MachineBasicBlock *SwitchBB) {
2577   SDValue Cond;
2578   SDValue CondLHS = getValue(CB.CmpLHS);
2579   SDLoc dl = CB.DL;
2580 
2581   if (CB.CC == ISD::SETTRUE) {
2582     // Branch or fall through to TrueBB.
2583     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2584     SwitchBB->normalizeSuccProbs();
2585     if (CB.TrueBB != NextBlock(SwitchBB)) {
2586       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2587                               DAG.getBasicBlock(CB.TrueBB)));
2588     }
2589     return;
2590   }
2591 
2592   auto &TLI = DAG.getTargetLoweringInfo();
2593   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2594 
2595   // Build the setcc now.
2596   if (!CB.CmpMHS) {
2597     // Fold "(X == true)" to X and "(X == false)" to !X to
2598     // handle common cases produced by branch lowering.
2599     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2600         CB.CC == ISD::SETEQ)
2601       Cond = CondLHS;
2602     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2603              CB.CC == ISD::SETEQ) {
2604       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2605       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2606     } else {
2607       SDValue CondRHS = getValue(CB.CmpRHS);
2608 
2609       // If a pointer's DAG type is larger than its memory type then the DAG
2610       // values are zero-extended. This breaks signed comparisons so truncate
2611       // back to the underlying type before doing the compare.
2612       if (CondLHS.getValueType() != MemVT) {
2613         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2614         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2615       }
2616       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2617     }
2618   } else {
2619     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2620 
2621     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2622     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2623 
2624     SDValue CmpOp = getValue(CB.CmpMHS);
2625     EVT VT = CmpOp.getValueType();
2626 
2627     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2628       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2629                           ISD::SETLE);
2630     } else {
2631       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2632                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2633       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2634                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2635     }
2636   }
2637 
2638   // Update successor info
2639   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2640   // TrueBB and FalseBB are always different unless the incoming IR is
2641   // degenerate. This only happens when running llc on weird IR.
2642   if (CB.TrueBB != CB.FalseBB)
2643     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2644   SwitchBB->normalizeSuccProbs();
2645 
2646   // If the lhs block is the next block, invert the condition so that we can
2647   // fall through to the lhs instead of the rhs block.
2648   if (CB.TrueBB == NextBlock(SwitchBB)) {
2649     std::swap(CB.TrueBB, CB.FalseBB);
2650     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2651     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2652   }
2653 
2654   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2655                                MVT::Other, getControlRoot(), Cond,
2656                                DAG.getBasicBlock(CB.TrueBB));
2657 
2658   setValue(CurInst, BrCond);
2659 
2660   // Insert the false branch. Do this even if it's a fall through branch,
2661   // this makes it easier to do DAG optimizations which require inverting
2662   // the branch condition.
2663   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2664                        DAG.getBasicBlock(CB.FalseBB));
2665 
2666   DAG.setRoot(BrCond);
2667 }
2668 
2669 /// visitJumpTable - Emit JumpTable node in the current MBB
2670 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2671   // Emit the code for the jump table
2672   assert(JT.Reg != -1U && "Should lower JT Header first!");
2673   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2674   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2675                                      JT.Reg, PTy);
2676   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2677   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2678                                     MVT::Other, Index.getValue(1),
2679                                     Table, Index);
2680   DAG.setRoot(BrJumpTable);
2681 }
2682 
2683 /// visitJumpTableHeader - This function emits necessary code to produce index
2684 /// in the JumpTable from switch case.
2685 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2686                                                JumpTableHeader &JTH,
2687                                                MachineBasicBlock *SwitchBB) {
2688   SDLoc dl = getCurSDLoc();
2689 
2690   // Subtract the lowest switch case value from the value being switched on.
2691   SDValue SwitchOp = getValue(JTH.SValue);
2692   EVT VT = SwitchOp.getValueType();
2693   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2694                             DAG.getConstant(JTH.First, dl, VT));
2695 
2696   // The SDNode we just created, which holds the value being switched on minus
2697   // the smallest case value, needs to be copied to a virtual register so it
2698   // can be used as an index into the jump table in a subsequent basic block.
2699   // This value may be smaller or larger than the target's pointer type, and
2700   // therefore require extension or truncating.
2701   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2702   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2703 
2704   unsigned JumpTableReg =
2705       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2706   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2707                                     JumpTableReg, SwitchOp);
2708   JT.Reg = JumpTableReg;
2709 
2710   if (!JTH.FallthroughUnreachable) {
2711     // Emit the range check for the jump table, and branch to the default block
2712     // for the switch statement if the value being switched on exceeds the
2713     // largest case in the switch.
2714     SDValue CMP = DAG.getSetCC(
2715         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2716                                    Sub.getValueType()),
2717         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2718 
2719     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2720                                  MVT::Other, CopyTo, CMP,
2721                                  DAG.getBasicBlock(JT.Default));
2722 
2723     // Avoid emitting unnecessary branches to the next block.
2724     if (JT.MBB != NextBlock(SwitchBB))
2725       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2726                            DAG.getBasicBlock(JT.MBB));
2727 
2728     DAG.setRoot(BrCond);
2729   } else {
2730     // Avoid emitting unnecessary branches to the next block.
2731     if (JT.MBB != NextBlock(SwitchBB))
2732       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2733                               DAG.getBasicBlock(JT.MBB)));
2734     else
2735       DAG.setRoot(CopyTo);
2736   }
2737 }
2738 
2739 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2740 /// variable if there exists one.
2741 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2742                                  SDValue &Chain) {
2743   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2744   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2745   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2746   MachineFunction &MF = DAG.getMachineFunction();
2747   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2748   MachineSDNode *Node =
2749       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2750   if (Global) {
2751     MachinePointerInfo MPInfo(Global);
2752     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2753                  MachineMemOperand::MODereferenceable;
2754     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2755         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2756     DAG.setNodeMemRefs(Node, {MemRef});
2757   }
2758   if (PtrTy != PtrMemTy)
2759     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2760   return SDValue(Node, 0);
2761 }
2762 
2763 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2764 /// tail spliced into a stack protector check success bb.
2765 ///
2766 /// For a high level explanation of how this fits into the stack protector
2767 /// generation see the comment on the declaration of class
2768 /// StackProtectorDescriptor.
2769 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2770                                                   MachineBasicBlock *ParentBB) {
2771 
2772   // First create the loads to the guard/stack slot for the comparison.
2773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2774   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2775   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2776 
2777   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2778   int FI = MFI.getStackProtectorIndex();
2779 
2780   SDValue Guard;
2781   SDLoc dl = getCurSDLoc();
2782   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2783   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2784   Align Align =
2785       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2786 
2787   // Generate code to load the content of the guard slot.
2788   SDValue GuardVal = DAG.getLoad(
2789       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2790       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2791       MachineMemOperand::MOVolatile);
2792 
2793   if (TLI.useStackGuardXorFP())
2794     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2795 
2796   // Retrieve guard check function, nullptr if instrumentation is inlined.
2797   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2798     // The target provides a guard check function to validate the guard value.
2799     // Generate a call to that function with the content of the guard slot as
2800     // argument.
2801     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2802     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2803 
2804     TargetLowering::ArgListTy Args;
2805     TargetLowering::ArgListEntry Entry;
2806     Entry.Node = GuardVal;
2807     Entry.Ty = FnTy->getParamType(0);
2808     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2809       Entry.IsInReg = true;
2810     Args.push_back(Entry);
2811 
2812     TargetLowering::CallLoweringInfo CLI(DAG);
2813     CLI.setDebugLoc(getCurSDLoc())
2814         .setChain(DAG.getEntryNode())
2815         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2816                    getValue(GuardCheckFn), std::move(Args));
2817 
2818     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2819     DAG.setRoot(Result.second);
2820     return;
2821   }
2822 
2823   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2824   // Otherwise, emit a volatile load to retrieve the stack guard value.
2825   SDValue Chain = DAG.getEntryNode();
2826   if (TLI.useLoadStackGuardNode()) {
2827     Guard = getLoadStackGuard(DAG, dl, Chain);
2828   } else {
2829     const Value *IRGuard = TLI.getSDagStackGuard(M);
2830     SDValue GuardPtr = getValue(IRGuard);
2831 
2832     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2833                         MachinePointerInfo(IRGuard, 0), Align,
2834                         MachineMemOperand::MOVolatile);
2835   }
2836 
2837   // Perform the comparison via a getsetcc.
2838   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2839                                                         *DAG.getContext(),
2840                                                         Guard.getValueType()),
2841                              Guard, GuardVal, ISD::SETNE);
2842 
2843   // If the guard/stackslot do not equal, branch to failure MBB.
2844   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2845                                MVT::Other, GuardVal.getOperand(0),
2846                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2847   // Otherwise branch to success MBB.
2848   SDValue Br = DAG.getNode(ISD::BR, dl,
2849                            MVT::Other, BrCond,
2850                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2851 
2852   DAG.setRoot(Br);
2853 }
2854 
2855 /// Codegen the failure basic block for a stack protector check.
2856 ///
2857 /// A failure stack protector machine basic block consists simply of a call to
2858 /// __stack_chk_fail().
2859 ///
2860 /// For a high level explanation of how this fits into the stack protector
2861 /// generation see the comment on the declaration of class
2862 /// StackProtectorDescriptor.
2863 void
2864 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2865   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2866   TargetLowering::MakeLibCallOptions CallOptions;
2867   CallOptions.setDiscardResult(true);
2868   SDValue Chain =
2869       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2870                       std::nullopt, CallOptions, getCurSDLoc())
2871           .second;
2872   // On PS4/PS5, the "return address" must still be within the calling
2873   // function, even if it's at the very end, so emit an explicit TRAP here.
2874   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2875   if (TM.getTargetTriple().isPS())
2876     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2877   // WebAssembly needs an unreachable instruction after a non-returning call,
2878   // because the function return type can be different from __stack_chk_fail's
2879   // return type (void).
2880   if (TM.getTargetTriple().isWasm())
2881     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2882 
2883   DAG.setRoot(Chain);
2884 }
2885 
2886 /// visitBitTestHeader - This function emits necessary code to produce value
2887 /// suitable for "bit tests"
2888 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2889                                              MachineBasicBlock *SwitchBB) {
2890   SDLoc dl = getCurSDLoc();
2891 
2892   // Subtract the minimum value.
2893   SDValue SwitchOp = getValue(B.SValue);
2894   EVT VT = SwitchOp.getValueType();
2895   SDValue RangeSub =
2896       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2897 
2898   // Determine the type of the test operands.
2899   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2900   bool UsePtrType = false;
2901   if (!TLI.isTypeLegal(VT)) {
2902     UsePtrType = true;
2903   } else {
2904     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2905       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2906         // Switch table case range are encoded into series of masks.
2907         // Just use pointer type, it's guaranteed to fit.
2908         UsePtrType = true;
2909         break;
2910       }
2911   }
2912   SDValue Sub = RangeSub;
2913   if (UsePtrType) {
2914     VT = TLI.getPointerTy(DAG.getDataLayout());
2915     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2916   }
2917 
2918   B.RegVT = VT.getSimpleVT();
2919   B.Reg = FuncInfo.CreateReg(B.RegVT);
2920   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2921 
2922   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2923 
2924   if (!B.FallthroughUnreachable)
2925     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2926   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2927   SwitchBB->normalizeSuccProbs();
2928 
2929   SDValue Root = CopyTo;
2930   if (!B.FallthroughUnreachable) {
2931     // Conditional branch to the default block.
2932     SDValue RangeCmp = DAG.getSetCC(dl,
2933         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2934                                RangeSub.getValueType()),
2935         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2936         ISD::SETUGT);
2937 
2938     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2939                        DAG.getBasicBlock(B.Default));
2940   }
2941 
2942   // Avoid emitting unnecessary branches to the next block.
2943   if (MBB != NextBlock(SwitchBB))
2944     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2945 
2946   DAG.setRoot(Root);
2947 }
2948 
2949 /// visitBitTestCase - this function produces one "bit test"
2950 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2951                                            MachineBasicBlock* NextMBB,
2952                                            BranchProbability BranchProbToNext,
2953                                            unsigned Reg,
2954                                            BitTestCase &B,
2955                                            MachineBasicBlock *SwitchBB) {
2956   SDLoc dl = getCurSDLoc();
2957   MVT VT = BB.RegVT;
2958   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2959   SDValue Cmp;
2960   unsigned PopCount = llvm::popcount(B.Mask);
2961   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2962   if (PopCount == 1) {
2963     // Testing for a single bit; just compare the shift count with what it
2964     // would need to be to shift a 1 bit in that position.
2965     Cmp = DAG.getSetCC(
2966         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2967         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
2968         ISD::SETEQ);
2969   } else if (PopCount == BB.Range) {
2970     // There is only one zero bit in the range, test for it directly.
2971     Cmp = DAG.getSetCC(
2972         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2973         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
2974   } else {
2975     // Make desired shift
2976     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2977                                     DAG.getConstant(1, dl, VT), ShiftOp);
2978 
2979     // Emit bit tests and jumps
2980     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2981                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2982     Cmp = DAG.getSetCC(
2983         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2984         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2985   }
2986 
2987   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2988   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2989   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2990   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2991   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2992   // one as they are relative probabilities (and thus work more like weights),
2993   // and hence we need to normalize them to let the sum of them become one.
2994   SwitchBB->normalizeSuccProbs();
2995 
2996   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2997                               MVT::Other, getControlRoot(),
2998                               Cmp, DAG.getBasicBlock(B.TargetBB));
2999 
3000   // Avoid emitting unnecessary branches to the next block.
3001   if (NextMBB != NextBlock(SwitchBB))
3002     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3003                         DAG.getBasicBlock(NextMBB));
3004 
3005   DAG.setRoot(BrAnd);
3006 }
3007 
3008 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3009   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3010 
3011   // Retrieve successors. Look through artificial IR level blocks like
3012   // catchswitch for successors.
3013   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3014   const BasicBlock *EHPadBB = I.getSuccessor(1);
3015   MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3016 
3017   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3018   // have to do anything here to lower funclet bundles.
3019   assert(!I.hasOperandBundlesOtherThan(
3020              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3021               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3022               LLVMContext::OB_cfguardtarget,
3023               LLVMContext::OB_clang_arc_attachedcall}) &&
3024          "Cannot lower invokes with arbitrary operand bundles yet!");
3025 
3026   const Value *Callee(I.getCalledOperand());
3027   const Function *Fn = dyn_cast<Function>(Callee);
3028   if (isa<InlineAsm>(Callee))
3029     visitInlineAsm(I, EHPadBB);
3030   else if (Fn && Fn->isIntrinsic()) {
3031     switch (Fn->getIntrinsicID()) {
3032     default:
3033       llvm_unreachable("Cannot invoke this intrinsic");
3034     case Intrinsic::donothing:
3035       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3036     case Intrinsic::seh_try_begin:
3037     case Intrinsic::seh_scope_begin:
3038     case Intrinsic::seh_try_end:
3039     case Intrinsic::seh_scope_end:
3040       if (EHPadMBB)
3041           // a block referenced by EH table
3042           // so dtor-funclet not removed by opts
3043           EHPadMBB->setMachineBlockAddressTaken();
3044       break;
3045     case Intrinsic::experimental_patchpoint_void:
3046     case Intrinsic::experimental_patchpoint_i64:
3047       visitPatchpoint(I, EHPadBB);
3048       break;
3049     case Intrinsic::experimental_gc_statepoint:
3050       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3051       break;
3052     case Intrinsic::wasm_rethrow: {
3053       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3054       // special because it can be invoked, so we manually lower it to a DAG
3055       // node here.
3056       SmallVector<SDValue, 8> Ops;
3057       Ops.push_back(getRoot()); // inchain
3058       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3059       Ops.push_back(
3060           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3061                                 TLI.getPointerTy(DAG.getDataLayout())));
3062       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3063       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3064       break;
3065     }
3066     }
3067   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3068     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3069     // Eventually we will support lowering the @llvm.experimental.deoptimize
3070     // intrinsic, and right now there are no plans to support other intrinsics
3071     // with deopt state.
3072     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3073   } else {
3074     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3075   }
3076 
3077   // If the value of the invoke is used outside of its defining block, make it
3078   // available as a virtual register.
3079   // We already took care of the exported value for the statepoint instruction
3080   // during call to the LowerStatepoint.
3081   if (!isa<GCStatepointInst>(I)) {
3082     CopyToExportRegsIfNeeded(&I);
3083   }
3084 
3085   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3086   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3087   BranchProbability EHPadBBProb =
3088       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3089           : BranchProbability::getZero();
3090   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3091 
3092   // Update successor info.
3093   addSuccessorWithProb(InvokeMBB, Return);
3094   for (auto &UnwindDest : UnwindDests) {
3095     UnwindDest.first->setIsEHPad();
3096     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3097   }
3098   InvokeMBB->normalizeSuccProbs();
3099 
3100   // Drop into normal successor.
3101   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3102                           DAG.getBasicBlock(Return)));
3103 }
3104 
3105 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3106   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3107 
3108   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3109   // have to do anything here to lower funclet bundles.
3110   assert(!I.hasOperandBundlesOtherThan(
3111              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3112          "Cannot lower callbrs with arbitrary operand bundles yet!");
3113 
3114   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3115   visitInlineAsm(I);
3116   CopyToExportRegsIfNeeded(&I);
3117 
3118   // Retrieve successors.
3119   SmallPtrSet<BasicBlock *, 8> Dests;
3120   Dests.insert(I.getDefaultDest());
3121   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3122 
3123   // Update successor info.
3124   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3125   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3126     BasicBlock *Dest = I.getIndirectDest(i);
3127     MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3128     Target->setIsInlineAsmBrIndirectTarget();
3129     Target->setMachineBlockAddressTaken();
3130     Target->setLabelMustBeEmitted();
3131     // Don't add duplicate machine successors.
3132     if (Dests.insert(Dest).second)
3133       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3134   }
3135   CallBrMBB->normalizeSuccProbs();
3136 
3137   // Drop into default successor.
3138   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3139                           MVT::Other, getControlRoot(),
3140                           DAG.getBasicBlock(Return)));
3141 }
3142 
3143 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3144   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3145 }
3146 
3147 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3148   assert(FuncInfo.MBB->isEHPad() &&
3149          "Call to landingpad not in landing pad!");
3150 
3151   // If there aren't registers to copy the values into (e.g., during SjLj
3152   // exceptions), then don't bother to create these DAG nodes.
3153   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3154   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3155   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3156       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3157     return;
3158 
3159   // If landingpad's return type is token type, we don't create DAG nodes
3160   // for its exception pointer and selector value. The extraction of exception
3161   // pointer or selector value from token type landingpads is not currently
3162   // supported.
3163   if (LP.getType()->isTokenTy())
3164     return;
3165 
3166   SmallVector<EVT, 2> ValueVTs;
3167   SDLoc dl = getCurSDLoc();
3168   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3169   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3170 
3171   // Get the two live-in registers as SDValues. The physregs have already been
3172   // copied into virtual registers.
3173   SDValue Ops[2];
3174   if (FuncInfo.ExceptionPointerVirtReg) {
3175     Ops[0] = DAG.getZExtOrTrunc(
3176         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3177                            FuncInfo.ExceptionPointerVirtReg,
3178                            TLI.getPointerTy(DAG.getDataLayout())),
3179         dl, ValueVTs[0]);
3180   } else {
3181     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3182   }
3183   Ops[1] = DAG.getZExtOrTrunc(
3184       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3185                          FuncInfo.ExceptionSelectorVirtReg,
3186                          TLI.getPointerTy(DAG.getDataLayout())),
3187       dl, ValueVTs[1]);
3188 
3189   // Merge into one.
3190   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3191                             DAG.getVTList(ValueVTs), Ops);
3192   setValue(&LP, Res);
3193 }
3194 
3195 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3196                                            MachineBasicBlock *Last) {
3197   // Update JTCases.
3198   for (JumpTableBlock &JTB : SL->JTCases)
3199     if (JTB.first.HeaderBB == First)
3200       JTB.first.HeaderBB = Last;
3201 
3202   // Update BitTestCases.
3203   for (BitTestBlock &BTB : SL->BitTestCases)
3204     if (BTB.Parent == First)
3205       BTB.Parent = Last;
3206 }
3207 
3208 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3209   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3210 
3211   // Update machine-CFG edges with unique successors.
3212   SmallSet<BasicBlock*, 32> Done;
3213   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3214     BasicBlock *BB = I.getSuccessor(i);
3215     bool Inserted = Done.insert(BB).second;
3216     if (!Inserted)
3217         continue;
3218 
3219     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3220     addSuccessorWithProb(IndirectBrMBB, Succ);
3221   }
3222   IndirectBrMBB->normalizeSuccProbs();
3223 
3224   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3225                           MVT::Other, getControlRoot(),
3226                           getValue(I.getAddress())));
3227 }
3228 
3229 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3230   if (!DAG.getTarget().Options.TrapUnreachable)
3231     return;
3232 
3233   // We may be able to ignore unreachable behind a noreturn call.
3234   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3235     if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
3236       if (Call->doesNotReturn())
3237         return;
3238     }
3239   }
3240 
3241   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3242 }
3243 
3244 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3245   SDNodeFlags Flags;
3246   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3247     Flags.copyFMF(*FPOp);
3248 
3249   SDValue Op = getValue(I.getOperand(0));
3250   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3251                                     Op, Flags);
3252   setValue(&I, UnNodeValue);
3253 }
3254 
3255 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3256   SDNodeFlags Flags;
3257   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3258     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3259     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3260   }
3261   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3262     Flags.setExact(ExactOp->isExact());
3263   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3264     Flags.copyFMF(*FPOp);
3265 
3266   SDValue Op1 = getValue(I.getOperand(0));
3267   SDValue Op2 = getValue(I.getOperand(1));
3268   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3269                                      Op1, Op2, Flags);
3270   setValue(&I, BinNodeValue);
3271 }
3272 
3273 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3274   SDValue Op1 = getValue(I.getOperand(0));
3275   SDValue Op2 = getValue(I.getOperand(1));
3276 
3277   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3278       Op1.getValueType(), DAG.getDataLayout());
3279 
3280   // Coerce the shift amount to the right type if we can. This exposes the
3281   // truncate or zext to optimization early.
3282   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3283     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3284            "Unexpected shift type");
3285     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3286   }
3287 
3288   bool nuw = false;
3289   bool nsw = false;
3290   bool exact = false;
3291 
3292   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3293 
3294     if (const OverflowingBinaryOperator *OFBinOp =
3295             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3296       nuw = OFBinOp->hasNoUnsignedWrap();
3297       nsw = OFBinOp->hasNoSignedWrap();
3298     }
3299     if (const PossiblyExactOperator *ExactOp =
3300             dyn_cast<const PossiblyExactOperator>(&I))
3301       exact = ExactOp->isExact();
3302   }
3303   SDNodeFlags Flags;
3304   Flags.setExact(exact);
3305   Flags.setNoSignedWrap(nsw);
3306   Flags.setNoUnsignedWrap(nuw);
3307   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3308                             Flags);
3309   setValue(&I, Res);
3310 }
3311 
3312 void SelectionDAGBuilder::visitSDiv(const User &I) {
3313   SDValue Op1 = getValue(I.getOperand(0));
3314   SDValue Op2 = getValue(I.getOperand(1));
3315 
3316   SDNodeFlags Flags;
3317   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3318                  cast<PossiblyExactOperator>(&I)->isExact());
3319   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3320                            Op2, Flags));
3321 }
3322 
3323 void SelectionDAGBuilder::visitICmp(const User &I) {
3324   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3325   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3326     predicate = IC->getPredicate();
3327   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3328     predicate = ICmpInst::Predicate(IC->getPredicate());
3329   SDValue Op1 = getValue(I.getOperand(0));
3330   SDValue Op2 = getValue(I.getOperand(1));
3331   ISD::CondCode Opcode = getICmpCondCode(predicate);
3332 
3333   auto &TLI = DAG.getTargetLoweringInfo();
3334   EVT MemVT =
3335       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3336 
3337   // If a pointer's DAG type is larger than its memory type then the DAG values
3338   // are zero-extended. This breaks signed comparisons so truncate back to the
3339   // underlying type before doing the compare.
3340   if (Op1.getValueType() != MemVT) {
3341     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3342     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3343   }
3344 
3345   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3346                                                         I.getType());
3347   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3348 }
3349 
3350 void SelectionDAGBuilder::visitFCmp(const User &I) {
3351   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3352   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3353     predicate = FC->getPredicate();
3354   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3355     predicate = FCmpInst::Predicate(FC->getPredicate());
3356   SDValue Op1 = getValue(I.getOperand(0));
3357   SDValue Op2 = getValue(I.getOperand(1));
3358 
3359   ISD::CondCode Condition = getFCmpCondCode(predicate);
3360   auto *FPMO = cast<FPMathOperator>(&I);
3361   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3362     Condition = getFCmpCodeWithoutNaN(Condition);
3363 
3364   SDNodeFlags Flags;
3365   Flags.copyFMF(*FPMO);
3366   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3367 
3368   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3369                                                         I.getType());
3370   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3371 }
3372 
3373 // Check if the condition of the select has one use or two users that are both
3374 // selects with the same condition.
3375 static bool hasOnlySelectUsers(const Value *Cond) {
3376   return llvm::all_of(Cond->users(), [](const Value *V) {
3377     return isa<SelectInst>(V);
3378   });
3379 }
3380 
3381 void SelectionDAGBuilder::visitSelect(const User &I) {
3382   SmallVector<EVT, 4> ValueVTs;
3383   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3384                   ValueVTs);
3385   unsigned NumValues = ValueVTs.size();
3386   if (NumValues == 0) return;
3387 
3388   SmallVector<SDValue, 4> Values(NumValues);
3389   SDValue Cond     = getValue(I.getOperand(0));
3390   SDValue LHSVal   = getValue(I.getOperand(1));
3391   SDValue RHSVal   = getValue(I.getOperand(2));
3392   SmallVector<SDValue, 1> BaseOps(1, Cond);
3393   ISD::NodeType OpCode =
3394       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3395 
3396   bool IsUnaryAbs = false;
3397   bool Negate = false;
3398 
3399   SDNodeFlags Flags;
3400   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3401     Flags.copyFMF(*FPOp);
3402 
3403   Flags.setUnpredictable(
3404       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3405 
3406   // Min/max matching is only viable if all output VTs are the same.
3407   if (all_equal(ValueVTs)) {
3408     EVT VT = ValueVTs[0];
3409     LLVMContext &Ctx = *DAG.getContext();
3410     auto &TLI = DAG.getTargetLoweringInfo();
3411 
3412     // We care about the legality of the operation after it has been type
3413     // legalized.
3414     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3415       VT = TLI.getTypeToTransformTo(Ctx, VT);
3416 
3417     // If the vselect is legal, assume we want to leave this as a vector setcc +
3418     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3419     // min/max is legal on the scalar type.
3420     bool UseScalarMinMax = VT.isVector() &&
3421       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3422 
3423     // ValueTracking's select pattern matching does not account for -0.0,
3424     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3425     // -0.0 is less than +0.0.
3426     Value *LHS, *RHS;
3427     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3428     ISD::NodeType Opc = ISD::DELETED_NODE;
3429     switch (SPR.Flavor) {
3430     case SPF_UMAX:    Opc = ISD::UMAX; break;
3431     case SPF_UMIN:    Opc = ISD::UMIN; break;
3432     case SPF_SMAX:    Opc = ISD::SMAX; break;
3433     case SPF_SMIN:    Opc = ISD::SMIN; break;
3434     case SPF_FMINNUM:
3435       switch (SPR.NaNBehavior) {
3436       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3437       case SPNB_RETURNS_NAN: break;
3438       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3439       case SPNB_RETURNS_ANY:
3440         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3441             (UseScalarMinMax &&
3442              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3443           Opc = ISD::FMINNUM;
3444         break;
3445       }
3446       break;
3447     case SPF_FMAXNUM:
3448       switch (SPR.NaNBehavior) {
3449       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3450       case SPNB_RETURNS_NAN: break;
3451       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3452       case SPNB_RETURNS_ANY:
3453         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3454             (UseScalarMinMax &&
3455              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3456           Opc = ISD::FMAXNUM;
3457         break;
3458       }
3459       break;
3460     case SPF_NABS:
3461       Negate = true;
3462       [[fallthrough]];
3463     case SPF_ABS:
3464       IsUnaryAbs = true;
3465       Opc = ISD::ABS;
3466       break;
3467     default: break;
3468     }
3469 
3470     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3471         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3472          (UseScalarMinMax &&
3473           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3474         // If the underlying comparison instruction is used by any other
3475         // instruction, the consumed instructions won't be destroyed, so it is
3476         // not profitable to convert to a min/max.
3477         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3478       OpCode = Opc;
3479       LHSVal = getValue(LHS);
3480       RHSVal = getValue(RHS);
3481       BaseOps.clear();
3482     }
3483 
3484     if (IsUnaryAbs) {
3485       OpCode = Opc;
3486       LHSVal = getValue(LHS);
3487       BaseOps.clear();
3488     }
3489   }
3490 
3491   if (IsUnaryAbs) {
3492     for (unsigned i = 0; i != NumValues; ++i) {
3493       SDLoc dl = getCurSDLoc();
3494       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3495       Values[i] =
3496           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3497       if (Negate)
3498         Values[i] = DAG.getNegative(Values[i], dl, VT);
3499     }
3500   } else {
3501     for (unsigned i = 0; i != NumValues; ++i) {
3502       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3503       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3504       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3505       Values[i] = DAG.getNode(
3506           OpCode, getCurSDLoc(),
3507           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3508     }
3509   }
3510 
3511   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3512                            DAG.getVTList(ValueVTs), Values));
3513 }
3514 
3515 void SelectionDAGBuilder::visitTrunc(const User &I) {
3516   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3517   SDValue N = getValue(I.getOperand(0));
3518   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3519                                                         I.getType());
3520   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3521 }
3522 
3523 void SelectionDAGBuilder::visitZExt(const User &I) {
3524   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3525   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3526   SDValue N = getValue(I.getOperand(0));
3527   auto &TLI = DAG.getTargetLoweringInfo();
3528   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3529 
3530   // Since we don't yet have a representation of zext nneg in SDAG or MI,
3531   // eagerly use the information to canonicalize towards sign_extend if
3532   // that is the target's preference.  TODO: Add nneg support to the
3533   // SDAG and MI representations.
3534   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I);
3535       PNI && PNI->hasNonNeg() &&
3536       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3537     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3538     return;
3539   }
3540 
3541   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3542 }
3543 
3544 void SelectionDAGBuilder::visitSExt(const User &I) {
3545   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3546   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3547   SDValue N = getValue(I.getOperand(0));
3548   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3549                                                         I.getType());
3550   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3551 }
3552 
3553 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3554   // FPTrunc is never a no-op cast, no need to check
3555   SDValue N = getValue(I.getOperand(0));
3556   SDLoc dl = getCurSDLoc();
3557   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3558   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3559   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3560                            DAG.getTargetConstant(
3561                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3562 }
3563 
3564 void SelectionDAGBuilder::visitFPExt(const User &I) {
3565   // FPExt is never a no-op cast, no need to check
3566   SDValue N = getValue(I.getOperand(0));
3567   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3568                                                         I.getType());
3569   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3570 }
3571 
3572 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3573   // FPToUI is never a no-op cast, no need to check
3574   SDValue N = getValue(I.getOperand(0));
3575   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3576                                                         I.getType());
3577   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3578 }
3579 
3580 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3581   // FPToSI is never a no-op cast, no need to check
3582   SDValue N = getValue(I.getOperand(0));
3583   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3584                                                         I.getType());
3585   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3586 }
3587 
3588 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3589   // UIToFP is never a no-op cast, no need to check
3590   SDValue N = getValue(I.getOperand(0));
3591   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3592                                                         I.getType());
3593   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3594 }
3595 
3596 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3597   // SIToFP is never a no-op cast, no need to check
3598   SDValue N = getValue(I.getOperand(0));
3599   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3600                                                         I.getType());
3601   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3602 }
3603 
3604 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3605   // What to do depends on the size of the integer and the size of the pointer.
3606   // We can either truncate, zero extend, or no-op, accordingly.
3607   SDValue N = getValue(I.getOperand(0));
3608   auto &TLI = DAG.getTargetLoweringInfo();
3609   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3610                                                         I.getType());
3611   EVT PtrMemVT =
3612       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3613   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3614   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3615   setValue(&I, N);
3616 }
3617 
3618 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3619   // What to do depends on the size of the integer and the size of the pointer.
3620   // We can either truncate, zero extend, or no-op, accordingly.
3621   SDValue N = getValue(I.getOperand(0));
3622   auto &TLI = DAG.getTargetLoweringInfo();
3623   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3624   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3625   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3626   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3627   setValue(&I, N);
3628 }
3629 
3630 void SelectionDAGBuilder::visitBitCast(const User &I) {
3631   SDValue N = getValue(I.getOperand(0));
3632   SDLoc dl = getCurSDLoc();
3633   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3634                                                         I.getType());
3635 
3636   // BitCast assures us that source and destination are the same size so this is
3637   // either a BITCAST or a no-op.
3638   if (DestVT != N.getValueType())
3639     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3640                              DestVT, N)); // convert types.
3641   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3642   // might fold any kind of constant expression to an integer constant and that
3643   // is not what we are looking for. Only recognize a bitcast of a genuine
3644   // constant integer as an opaque constant.
3645   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3646     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3647                                  /*isOpaque*/true));
3648   else
3649     setValue(&I, N);            // noop cast.
3650 }
3651 
3652 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3653   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3654   const Value *SV = I.getOperand(0);
3655   SDValue N = getValue(SV);
3656   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3657 
3658   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3659   unsigned DestAS = I.getType()->getPointerAddressSpace();
3660 
3661   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3662     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3663 
3664   setValue(&I, N);
3665 }
3666 
3667 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3668   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3669   SDValue InVec = getValue(I.getOperand(0));
3670   SDValue InVal = getValue(I.getOperand(1));
3671   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3672                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3673   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3674                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3675                            InVec, InVal, InIdx));
3676 }
3677 
3678 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3679   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3680   SDValue InVec = getValue(I.getOperand(0));
3681   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3682                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3683   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3684                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3685                            InVec, InIdx));
3686 }
3687 
3688 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3689   SDValue Src1 = getValue(I.getOperand(0));
3690   SDValue Src2 = getValue(I.getOperand(1));
3691   ArrayRef<int> Mask;
3692   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3693     Mask = SVI->getShuffleMask();
3694   else
3695     Mask = cast<ConstantExpr>(I).getShuffleMask();
3696   SDLoc DL = getCurSDLoc();
3697   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3698   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3699   EVT SrcVT = Src1.getValueType();
3700 
3701   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3702       VT.isScalableVector()) {
3703     // Canonical splat form of first element of first input vector.
3704     SDValue FirstElt =
3705         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3706                     DAG.getVectorIdxConstant(0, DL));
3707     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3708     return;
3709   }
3710 
3711   // For now, we only handle splats for scalable vectors.
3712   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3713   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3714   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3715 
3716   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3717   unsigned MaskNumElts = Mask.size();
3718 
3719   if (SrcNumElts == MaskNumElts) {
3720     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3721     return;
3722   }
3723 
3724   // Normalize the shuffle vector since mask and vector length don't match.
3725   if (SrcNumElts < MaskNumElts) {
3726     // Mask is longer than the source vectors. We can use concatenate vector to
3727     // make the mask and vectors lengths match.
3728 
3729     if (MaskNumElts % SrcNumElts == 0) {
3730       // Mask length is a multiple of the source vector length.
3731       // Check if the shuffle is some kind of concatenation of the input
3732       // vectors.
3733       unsigned NumConcat = MaskNumElts / SrcNumElts;
3734       bool IsConcat = true;
3735       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3736       for (unsigned i = 0; i != MaskNumElts; ++i) {
3737         int Idx = Mask[i];
3738         if (Idx < 0)
3739           continue;
3740         // Ensure the indices in each SrcVT sized piece are sequential and that
3741         // the same source is used for the whole piece.
3742         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3743             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3744              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3745           IsConcat = false;
3746           break;
3747         }
3748         // Remember which source this index came from.
3749         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3750       }
3751 
3752       // The shuffle is concatenating multiple vectors together. Just emit
3753       // a CONCAT_VECTORS operation.
3754       if (IsConcat) {
3755         SmallVector<SDValue, 8> ConcatOps;
3756         for (auto Src : ConcatSrcs) {
3757           if (Src < 0)
3758             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3759           else if (Src == 0)
3760             ConcatOps.push_back(Src1);
3761           else
3762             ConcatOps.push_back(Src2);
3763         }
3764         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3765         return;
3766       }
3767     }
3768 
3769     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3770     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3771     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3772                                     PaddedMaskNumElts);
3773 
3774     // Pad both vectors with undefs to make them the same length as the mask.
3775     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3776 
3777     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3778     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3779     MOps1[0] = Src1;
3780     MOps2[0] = Src2;
3781 
3782     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3783     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3784 
3785     // Readjust mask for new input vector length.
3786     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3787     for (unsigned i = 0; i != MaskNumElts; ++i) {
3788       int Idx = Mask[i];
3789       if (Idx >= (int)SrcNumElts)
3790         Idx -= SrcNumElts - PaddedMaskNumElts;
3791       MappedOps[i] = Idx;
3792     }
3793 
3794     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3795 
3796     // If the concatenated vector was padded, extract a subvector with the
3797     // correct number of elements.
3798     if (MaskNumElts != PaddedMaskNumElts)
3799       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3800                            DAG.getVectorIdxConstant(0, DL));
3801 
3802     setValue(&I, Result);
3803     return;
3804   }
3805 
3806   if (SrcNumElts > MaskNumElts) {
3807     // Analyze the access pattern of the vector to see if we can extract
3808     // two subvectors and do the shuffle.
3809     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3810     bool CanExtract = true;
3811     for (int Idx : Mask) {
3812       unsigned Input = 0;
3813       if (Idx < 0)
3814         continue;
3815 
3816       if (Idx >= (int)SrcNumElts) {
3817         Input = 1;
3818         Idx -= SrcNumElts;
3819       }
3820 
3821       // If all the indices come from the same MaskNumElts sized portion of
3822       // the sources we can use extract. Also make sure the extract wouldn't
3823       // extract past the end of the source.
3824       int NewStartIdx = alignDown(Idx, MaskNumElts);
3825       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3826           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3827         CanExtract = false;
3828       // Make sure we always update StartIdx as we use it to track if all
3829       // elements are undef.
3830       StartIdx[Input] = NewStartIdx;
3831     }
3832 
3833     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3834       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3835       return;
3836     }
3837     if (CanExtract) {
3838       // Extract appropriate subvector and generate a vector shuffle
3839       for (unsigned Input = 0; Input < 2; ++Input) {
3840         SDValue &Src = Input == 0 ? Src1 : Src2;
3841         if (StartIdx[Input] < 0)
3842           Src = DAG.getUNDEF(VT);
3843         else {
3844           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3845                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3846         }
3847       }
3848 
3849       // Calculate new mask.
3850       SmallVector<int, 8> MappedOps(Mask);
3851       for (int &Idx : MappedOps) {
3852         if (Idx >= (int)SrcNumElts)
3853           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3854         else if (Idx >= 0)
3855           Idx -= StartIdx[0];
3856       }
3857 
3858       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3859       return;
3860     }
3861   }
3862 
3863   // We can't use either concat vectors or extract subvectors so fall back to
3864   // replacing the shuffle with extract and build vector.
3865   // to insert and build vector.
3866   EVT EltVT = VT.getVectorElementType();
3867   SmallVector<SDValue,8> Ops;
3868   for (int Idx : Mask) {
3869     SDValue Res;
3870 
3871     if (Idx < 0) {
3872       Res = DAG.getUNDEF(EltVT);
3873     } else {
3874       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3875       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3876 
3877       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3878                         DAG.getVectorIdxConstant(Idx, DL));
3879     }
3880 
3881     Ops.push_back(Res);
3882   }
3883 
3884   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3885 }
3886 
3887 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3888   ArrayRef<unsigned> Indices = I.getIndices();
3889   const Value *Op0 = I.getOperand(0);
3890   const Value *Op1 = I.getOperand(1);
3891   Type *AggTy = I.getType();
3892   Type *ValTy = Op1->getType();
3893   bool IntoUndef = isa<UndefValue>(Op0);
3894   bool FromUndef = isa<UndefValue>(Op1);
3895 
3896   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3897 
3898   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3899   SmallVector<EVT, 4> AggValueVTs;
3900   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3901   SmallVector<EVT, 4> ValValueVTs;
3902   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3903 
3904   unsigned NumAggValues = AggValueVTs.size();
3905   unsigned NumValValues = ValValueVTs.size();
3906   SmallVector<SDValue, 4> Values(NumAggValues);
3907 
3908   // Ignore an insertvalue that produces an empty object
3909   if (!NumAggValues) {
3910     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3911     return;
3912   }
3913 
3914   SDValue Agg = getValue(Op0);
3915   unsigned i = 0;
3916   // Copy the beginning value(s) from the original aggregate.
3917   for (; i != LinearIndex; ++i)
3918     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3919                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3920   // Copy values from the inserted value(s).
3921   if (NumValValues) {
3922     SDValue Val = getValue(Op1);
3923     for (; i != LinearIndex + NumValValues; ++i)
3924       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3925                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3926   }
3927   // Copy remaining value(s) from the original aggregate.
3928   for (; i != NumAggValues; ++i)
3929     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3930                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3931 
3932   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3933                            DAG.getVTList(AggValueVTs), Values));
3934 }
3935 
3936 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3937   ArrayRef<unsigned> Indices = I.getIndices();
3938   const Value *Op0 = I.getOperand(0);
3939   Type *AggTy = Op0->getType();
3940   Type *ValTy = I.getType();
3941   bool OutOfUndef = isa<UndefValue>(Op0);
3942 
3943   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3944 
3945   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3946   SmallVector<EVT, 4> ValValueVTs;
3947   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3948 
3949   unsigned NumValValues = ValValueVTs.size();
3950 
3951   // Ignore a extractvalue that produces an empty object
3952   if (!NumValValues) {
3953     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3954     return;
3955   }
3956 
3957   SmallVector<SDValue, 4> Values(NumValValues);
3958 
3959   SDValue Agg = getValue(Op0);
3960   // Copy out the selected value(s).
3961   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3962     Values[i - LinearIndex] =
3963       OutOfUndef ?
3964         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3965         SDValue(Agg.getNode(), Agg.getResNo() + i);
3966 
3967   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3968                            DAG.getVTList(ValValueVTs), Values));
3969 }
3970 
3971 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3972   Value *Op0 = I.getOperand(0);
3973   // Note that the pointer operand may be a vector of pointers. Take the scalar
3974   // element which holds a pointer.
3975   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3976   SDValue N = getValue(Op0);
3977   SDLoc dl = getCurSDLoc();
3978   auto &TLI = DAG.getTargetLoweringInfo();
3979 
3980   // Normalize Vector GEP - all scalar operands should be converted to the
3981   // splat vector.
3982   bool IsVectorGEP = I.getType()->isVectorTy();
3983   ElementCount VectorElementCount =
3984       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3985                   : ElementCount::getFixed(0);
3986 
3987   if (IsVectorGEP && !N.getValueType().isVector()) {
3988     LLVMContext &Context = *DAG.getContext();
3989     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3990     N = DAG.getSplat(VT, dl, N);
3991   }
3992 
3993   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3994        GTI != E; ++GTI) {
3995     const Value *Idx = GTI.getOperand();
3996     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3997       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3998       if (Field) {
3999         // N = N + Offset
4000         uint64_t Offset =
4001             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4002 
4003         // In an inbounds GEP with an offset that is nonnegative even when
4004         // interpreted as signed, assume there is no unsigned overflow.
4005         SDNodeFlags Flags;
4006         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
4007           Flags.setNoUnsignedWrap(true);
4008 
4009         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4010                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4011       }
4012     } else {
4013       // IdxSize is the width of the arithmetic according to IR semantics.
4014       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4015       // (and fix up the result later).
4016       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4017       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4018       TypeSize ElementSize =
4019           DAG.getDataLayout().getTypeAllocSize(GTI.getIndexedType());
4020       // We intentionally mask away the high bits here; ElementSize may not
4021       // fit in IdxTy.
4022       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4023       bool ElementScalable = ElementSize.isScalable();
4024 
4025       // If this is a scalar constant or a splat vector of constants,
4026       // handle it quickly.
4027       const auto *C = dyn_cast<Constant>(Idx);
4028       if (C && isa<VectorType>(C->getType()))
4029         C = C->getSplatValue();
4030 
4031       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4032       if (CI && CI->isZero())
4033         continue;
4034       if (CI && !ElementScalable) {
4035         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4036         LLVMContext &Context = *DAG.getContext();
4037         SDValue OffsVal;
4038         if (IsVectorGEP)
4039           OffsVal = DAG.getConstant(
4040               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4041         else
4042           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4043 
4044         // In an inbounds GEP with an offset that is nonnegative even when
4045         // interpreted as signed, assume there is no unsigned overflow.
4046         SDNodeFlags Flags;
4047         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4048           Flags.setNoUnsignedWrap(true);
4049 
4050         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4051 
4052         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4053         continue;
4054       }
4055 
4056       // N = N + Idx * ElementMul;
4057       SDValue IdxN = getValue(Idx);
4058 
4059       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4060         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4061                                   VectorElementCount);
4062         IdxN = DAG.getSplat(VT, dl, IdxN);
4063       }
4064 
4065       // If the index is smaller or larger than intptr_t, truncate or extend
4066       // it.
4067       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4068 
4069       if (ElementScalable) {
4070         EVT VScaleTy = N.getValueType().getScalarType();
4071         SDValue VScale = DAG.getNode(
4072             ISD::VSCALE, dl, VScaleTy,
4073             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4074         if (IsVectorGEP)
4075           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4076         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4077       } else {
4078         // If this is a multiply by a power of two, turn it into a shl
4079         // immediately.  This is a very common case.
4080         if (ElementMul != 1) {
4081           if (ElementMul.isPowerOf2()) {
4082             unsigned Amt = ElementMul.logBase2();
4083             IdxN = DAG.getNode(ISD::SHL, dl,
4084                                N.getValueType(), IdxN,
4085                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
4086           } else {
4087             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4088                                             IdxN.getValueType());
4089             IdxN = DAG.getNode(ISD::MUL, dl,
4090                                N.getValueType(), IdxN, Scale);
4091           }
4092         }
4093       }
4094 
4095       N = DAG.getNode(ISD::ADD, dl,
4096                       N.getValueType(), N, IdxN);
4097     }
4098   }
4099 
4100   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4101   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4102   if (IsVectorGEP) {
4103     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4104     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4105   }
4106 
4107   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4108     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4109 
4110   setValue(&I, N);
4111 }
4112 
4113 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4114   // If this is a fixed sized alloca in the entry block of the function,
4115   // allocate it statically on the stack.
4116   if (FuncInfo.StaticAllocaMap.count(&I))
4117     return;   // getValue will auto-populate this.
4118 
4119   SDLoc dl = getCurSDLoc();
4120   Type *Ty = I.getAllocatedType();
4121   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4122   auto &DL = DAG.getDataLayout();
4123   TypeSize TySize = DL.getTypeAllocSize(Ty);
4124   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4125 
4126   SDValue AllocSize = getValue(I.getArraySize());
4127 
4128   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), I.getAddressSpace());
4129   if (AllocSize.getValueType() != IntPtr)
4130     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4131 
4132   if (TySize.isScalable())
4133     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4134                             DAG.getVScale(dl, IntPtr,
4135                                           APInt(IntPtr.getScalarSizeInBits(),
4136                                                 TySize.getKnownMinValue())));
4137   else
4138     AllocSize =
4139         DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4140                     DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
4141 
4142   // Handle alignment.  If the requested alignment is less than or equal to
4143   // the stack alignment, ignore it.  If the size is greater than or equal to
4144   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4145   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4146   if (*Alignment <= StackAlign)
4147     Alignment = std::nullopt;
4148 
4149   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4150   // Round the size of the allocation up to the stack alignment size
4151   // by add SA-1 to the size. This doesn't overflow because we're computing
4152   // an address inside an alloca.
4153   SDNodeFlags Flags;
4154   Flags.setNoUnsignedWrap(true);
4155   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4156                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4157 
4158   // Mask out the low bits for alignment purposes.
4159   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4160                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
4161 
4162   SDValue Ops[] = {
4163       getRoot(), AllocSize,
4164       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4165   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4166   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4167   setValue(&I, DSA);
4168   DAG.setRoot(DSA.getValue(1));
4169 
4170   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4171 }
4172 
4173 static const MDNode *getRangeMetadata(const Instruction &I) {
4174   // If !noundef is not present, then !range violation results in a poison
4175   // value rather than immediate undefined behavior. In theory, transferring
4176   // these annotations to SDAG is fine, but in practice there are key SDAG
4177   // transforms that are known not to be poison-safe, such as folding logical
4178   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4179   // also present.
4180   if (!I.hasMetadata(LLVMContext::MD_noundef))
4181     return nullptr;
4182   return I.getMetadata(LLVMContext::MD_range);
4183 }
4184 
4185 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4186   if (I.isAtomic())
4187     return visitAtomicLoad(I);
4188 
4189   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4190   const Value *SV = I.getOperand(0);
4191   if (TLI.supportSwiftError()) {
4192     // Swifterror values can come from either a function parameter with
4193     // swifterror attribute or an alloca with swifterror attribute.
4194     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4195       if (Arg->hasSwiftErrorAttr())
4196         return visitLoadFromSwiftError(I);
4197     }
4198 
4199     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4200       if (Alloca->isSwiftError())
4201         return visitLoadFromSwiftError(I);
4202     }
4203   }
4204 
4205   SDValue Ptr = getValue(SV);
4206 
4207   Type *Ty = I.getType();
4208   SmallVector<EVT, 4> ValueVTs, MemVTs;
4209   SmallVector<TypeSize, 4> Offsets;
4210   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0);
4211   unsigned NumValues = ValueVTs.size();
4212   if (NumValues == 0)
4213     return;
4214 
4215   Align Alignment = I.getAlign();
4216   AAMDNodes AAInfo = I.getAAMetadata();
4217   const MDNode *Ranges = getRangeMetadata(I);
4218   bool isVolatile = I.isVolatile();
4219   MachineMemOperand::Flags MMOFlags =
4220       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4221 
4222   SDValue Root;
4223   bool ConstantMemory = false;
4224   if (isVolatile)
4225     // Serialize volatile loads with other side effects.
4226     Root = getRoot();
4227   else if (NumValues > MaxParallelChains)
4228     Root = getMemoryRoot();
4229   else if (AA &&
4230            AA->pointsToConstantMemory(MemoryLocation(
4231                SV,
4232                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4233                AAInfo))) {
4234     // Do not serialize (non-volatile) loads of constant memory with anything.
4235     Root = DAG.getEntryNode();
4236     ConstantMemory = true;
4237     MMOFlags |= MachineMemOperand::MOInvariant;
4238   } else {
4239     // Do not serialize non-volatile loads against each other.
4240     Root = DAG.getRoot();
4241   }
4242 
4243   SDLoc dl = getCurSDLoc();
4244 
4245   if (isVolatile)
4246     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4247 
4248   SmallVector<SDValue, 4> Values(NumValues);
4249   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4250 
4251   unsigned ChainI = 0;
4252   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4253     // Serializing loads here may result in excessive register pressure, and
4254     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4255     // could recover a bit by hoisting nodes upward in the chain by recognizing
4256     // they are side-effect free or do not alias. The optimizer should really
4257     // avoid this case by converting large object/array copies to llvm.memcpy
4258     // (MaxParallelChains should always remain as failsafe).
4259     if (ChainI == MaxParallelChains) {
4260       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4261       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4262                                   ArrayRef(Chains.data(), ChainI));
4263       Root = Chain;
4264       ChainI = 0;
4265     }
4266 
4267     // TODO: MachinePointerInfo only supports a fixed length offset.
4268     MachinePointerInfo PtrInfo =
4269         !Offsets[i].isScalable() || Offsets[i].isZero()
4270             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4271             : MachinePointerInfo();
4272 
4273     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4274     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4275                             MMOFlags, AAInfo, Ranges);
4276     Chains[ChainI] = L.getValue(1);
4277 
4278     if (MemVTs[i] != ValueVTs[i])
4279       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4280 
4281     Values[i] = L;
4282   }
4283 
4284   if (!ConstantMemory) {
4285     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4286                                 ArrayRef(Chains.data(), ChainI));
4287     if (isVolatile)
4288       DAG.setRoot(Chain);
4289     else
4290       PendingLoads.push_back(Chain);
4291   }
4292 
4293   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4294                            DAG.getVTList(ValueVTs), Values));
4295 }
4296 
4297 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4298   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4299          "call visitStoreToSwiftError when backend supports swifterror");
4300 
4301   SmallVector<EVT, 4> ValueVTs;
4302   SmallVector<uint64_t, 4> Offsets;
4303   const Value *SrcV = I.getOperand(0);
4304   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4305                   SrcV->getType(), ValueVTs, &Offsets, 0);
4306   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4307          "expect a single EVT for swifterror");
4308 
4309   SDValue Src = getValue(SrcV);
4310   // Create a virtual register, then update the virtual register.
4311   Register VReg =
4312       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4313   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4314   // Chain can be getRoot or getControlRoot.
4315   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4316                                       SDValue(Src.getNode(), Src.getResNo()));
4317   DAG.setRoot(CopyNode);
4318 }
4319 
4320 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4321   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4322          "call visitLoadFromSwiftError when backend supports swifterror");
4323 
4324   assert(!I.isVolatile() &&
4325          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4326          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4327          "Support volatile, non temporal, invariant for load_from_swift_error");
4328 
4329   const Value *SV = I.getOperand(0);
4330   Type *Ty = I.getType();
4331   assert(
4332       (!AA ||
4333        !AA->pointsToConstantMemory(MemoryLocation(
4334            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4335            I.getAAMetadata()))) &&
4336       "load_from_swift_error should not be constant memory");
4337 
4338   SmallVector<EVT, 4> ValueVTs;
4339   SmallVector<uint64_t, 4> Offsets;
4340   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4341                   ValueVTs, &Offsets, 0);
4342   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4343          "expect a single EVT for swifterror");
4344 
4345   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4346   SDValue L = DAG.getCopyFromReg(
4347       getRoot(), getCurSDLoc(),
4348       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4349 
4350   setValue(&I, L);
4351 }
4352 
4353 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4354   if (I.isAtomic())
4355     return visitAtomicStore(I);
4356 
4357   const Value *SrcV = I.getOperand(0);
4358   const Value *PtrV = I.getOperand(1);
4359 
4360   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4361   if (TLI.supportSwiftError()) {
4362     // Swifterror values can come from either a function parameter with
4363     // swifterror attribute or an alloca with swifterror attribute.
4364     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4365       if (Arg->hasSwiftErrorAttr())
4366         return visitStoreToSwiftError(I);
4367     }
4368 
4369     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4370       if (Alloca->isSwiftError())
4371         return visitStoreToSwiftError(I);
4372     }
4373   }
4374 
4375   SmallVector<EVT, 4> ValueVTs, MemVTs;
4376   SmallVector<TypeSize, 4> Offsets;
4377   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4378                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0);
4379   unsigned NumValues = ValueVTs.size();
4380   if (NumValues == 0)
4381     return;
4382 
4383   // Get the lowered operands. Note that we do this after
4384   // checking if NumResults is zero, because with zero results
4385   // the operands won't have values in the map.
4386   SDValue Src = getValue(SrcV);
4387   SDValue Ptr = getValue(PtrV);
4388 
4389   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4390   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4391   SDLoc dl = getCurSDLoc();
4392   Align Alignment = I.getAlign();
4393   AAMDNodes AAInfo = I.getAAMetadata();
4394 
4395   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4396 
4397   unsigned ChainI = 0;
4398   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4399     // See visitLoad comments.
4400     if (ChainI == MaxParallelChains) {
4401       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4402                                   ArrayRef(Chains.data(), ChainI));
4403       Root = Chain;
4404       ChainI = 0;
4405     }
4406 
4407     // TODO: MachinePointerInfo only supports a fixed length offset.
4408     MachinePointerInfo PtrInfo =
4409         !Offsets[i].isScalable() || Offsets[i].isZero()
4410             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4411             : MachinePointerInfo();
4412 
4413     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4414     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4415     if (MemVTs[i] != ValueVTs[i])
4416       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4417     SDValue St =
4418         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4419     Chains[ChainI] = St;
4420   }
4421 
4422   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4423                                   ArrayRef(Chains.data(), ChainI));
4424   setValue(&I, StoreNode);
4425   DAG.setRoot(StoreNode);
4426 }
4427 
4428 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4429                                            bool IsCompressing) {
4430   SDLoc sdl = getCurSDLoc();
4431 
4432   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4433                                MaybeAlign &Alignment) {
4434     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4435     Src0 = I.getArgOperand(0);
4436     Ptr = I.getArgOperand(1);
4437     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4438     Mask = I.getArgOperand(3);
4439   };
4440   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4441                                     MaybeAlign &Alignment) {
4442     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4443     Src0 = I.getArgOperand(0);
4444     Ptr = I.getArgOperand(1);
4445     Mask = I.getArgOperand(2);
4446     Alignment = std::nullopt;
4447   };
4448 
4449   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4450   MaybeAlign Alignment;
4451   if (IsCompressing)
4452     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4453   else
4454     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4455 
4456   SDValue Ptr = getValue(PtrOperand);
4457   SDValue Src0 = getValue(Src0Operand);
4458   SDValue Mask = getValue(MaskOperand);
4459   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4460 
4461   EVT VT = Src0.getValueType();
4462   if (!Alignment)
4463     Alignment = DAG.getEVTAlign(VT);
4464 
4465   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4466       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4467       MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4468   SDValue StoreNode =
4469       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4470                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4471   DAG.setRoot(StoreNode);
4472   setValue(&I, StoreNode);
4473 }
4474 
4475 // Get a uniform base for the Gather/Scatter intrinsic.
4476 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4477 // We try to represent it as a base pointer + vector of indices.
4478 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4479 // The first operand of the GEP may be a single pointer or a vector of pointers
4480 // Example:
4481 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4482 //  or
4483 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4484 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4485 //
4486 // When the first GEP operand is a single pointer - it is the uniform base we
4487 // are looking for. If first operand of the GEP is a splat vector - we
4488 // extract the splat value and use it as a uniform base.
4489 // In all other cases the function returns 'false'.
4490 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4491                            ISD::MemIndexType &IndexType, SDValue &Scale,
4492                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4493                            uint64_t ElemSize) {
4494   SelectionDAG& DAG = SDB->DAG;
4495   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4496   const DataLayout &DL = DAG.getDataLayout();
4497 
4498   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4499 
4500   // Handle splat constant pointer.
4501   if (auto *C = dyn_cast<Constant>(Ptr)) {
4502     C = C->getSplatValue();
4503     if (!C)
4504       return false;
4505 
4506     Base = SDB->getValue(C);
4507 
4508     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4509     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4510     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4511     IndexType = ISD::SIGNED_SCALED;
4512     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4513     return true;
4514   }
4515 
4516   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4517   if (!GEP || GEP->getParent() != CurBB)
4518     return false;
4519 
4520   if (GEP->getNumOperands() != 2)
4521     return false;
4522 
4523   const Value *BasePtr = GEP->getPointerOperand();
4524   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4525 
4526   // Make sure the base is scalar and the index is a vector.
4527   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4528     return false;
4529 
4530   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4531   if (ScaleVal.isScalable())
4532     return false;
4533 
4534   // Target may not support the required addressing mode.
4535   if (ScaleVal != 1 &&
4536       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4537     return false;
4538 
4539   Base = SDB->getValue(BasePtr);
4540   Index = SDB->getValue(IndexVal);
4541   IndexType = ISD::SIGNED_SCALED;
4542 
4543   Scale =
4544       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4545   return true;
4546 }
4547 
4548 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4549   SDLoc sdl = getCurSDLoc();
4550 
4551   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4552   const Value *Ptr = I.getArgOperand(1);
4553   SDValue Src0 = getValue(I.getArgOperand(0));
4554   SDValue Mask = getValue(I.getArgOperand(3));
4555   EVT VT = Src0.getValueType();
4556   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4557                         ->getMaybeAlignValue()
4558                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4559   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4560 
4561   SDValue Base;
4562   SDValue Index;
4563   ISD::MemIndexType IndexType;
4564   SDValue Scale;
4565   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4566                                     I.getParent(), VT.getScalarStoreSize());
4567 
4568   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4569   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4570       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4571       // TODO: Make MachineMemOperands aware of scalable
4572       // vectors.
4573       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4574   if (!UniformBase) {
4575     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4576     Index = getValue(Ptr);
4577     IndexType = ISD::SIGNED_SCALED;
4578     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4579   }
4580 
4581   EVT IdxVT = Index.getValueType();
4582   EVT EltTy = IdxVT.getVectorElementType();
4583   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4584     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4585     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4586   }
4587 
4588   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4589   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4590                                          Ops, MMO, IndexType, false);
4591   DAG.setRoot(Scatter);
4592   setValue(&I, Scatter);
4593 }
4594 
4595 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4596   SDLoc sdl = getCurSDLoc();
4597 
4598   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4599                               MaybeAlign &Alignment) {
4600     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4601     Ptr = I.getArgOperand(0);
4602     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4603     Mask = I.getArgOperand(2);
4604     Src0 = I.getArgOperand(3);
4605   };
4606   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4607                                  MaybeAlign &Alignment) {
4608     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4609     Ptr = I.getArgOperand(0);
4610     Alignment = std::nullopt;
4611     Mask = I.getArgOperand(1);
4612     Src0 = I.getArgOperand(2);
4613   };
4614 
4615   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4616   MaybeAlign Alignment;
4617   if (IsExpanding)
4618     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4619   else
4620     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4621 
4622   SDValue Ptr = getValue(PtrOperand);
4623   SDValue Src0 = getValue(Src0Operand);
4624   SDValue Mask = getValue(MaskOperand);
4625   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4626 
4627   EVT VT = Src0.getValueType();
4628   if (!Alignment)
4629     Alignment = DAG.getEVTAlign(VT);
4630 
4631   AAMDNodes AAInfo = I.getAAMetadata();
4632   const MDNode *Ranges = getRangeMetadata(I);
4633 
4634   // Do not serialize masked loads of constant memory with anything.
4635   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4636   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4637 
4638   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4639 
4640   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4641       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4642       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4643 
4644   SDValue Load =
4645       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4646                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4647   if (AddToChain)
4648     PendingLoads.push_back(Load.getValue(1));
4649   setValue(&I, Load);
4650 }
4651 
4652 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4653   SDLoc sdl = getCurSDLoc();
4654 
4655   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4656   const Value *Ptr = I.getArgOperand(0);
4657   SDValue Src0 = getValue(I.getArgOperand(3));
4658   SDValue Mask = getValue(I.getArgOperand(2));
4659 
4660   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4661   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4662   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4663                         ->getMaybeAlignValue()
4664                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4665 
4666   const MDNode *Ranges = getRangeMetadata(I);
4667 
4668   SDValue Root = DAG.getRoot();
4669   SDValue Base;
4670   SDValue Index;
4671   ISD::MemIndexType IndexType;
4672   SDValue Scale;
4673   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4674                                     I.getParent(), VT.getScalarStoreSize());
4675   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4676   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4677       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4678       // TODO: Make MachineMemOperands aware of scalable
4679       // vectors.
4680       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4681 
4682   if (!UniformBase) {
4683     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4684     Index = getValue(Ptr);
4685     IndexType = ISD::SIGNED_SCALED;
4686     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4687   }
4688 
4689   EVT IdxVT = Index.getValueType();
4690   EVT EltTy = IdxVT.getVectorElementType();
4691   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4692     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4693     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4694   }
4695 
4696   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4697   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4698                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4699 
4700   PendingLoads.push_back(Gather.getValue(1));
4701   setValue(&I, Gather);
4702 }
4703 
4704 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4705   SDLoc dl = getCurSDLoc();
4706   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4707   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4708   SyncScope::ID SSID = I.getSyncScopeID();
4709 
4710   SDValue InChain = getRoot();
4711 
4712   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4713   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4714 
4715   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4716   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4717 
4718   MachineFunction &MF = DAG.getMachineFunction();
4719   MachineMemOperand *MMO = MF.getMachineMemOperand(
4720       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4721       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4722       FailureOrdering);
4723 
4724   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4725                                    dl, MemVT, VTs, InChain,
4726                                    getValue(I.getPointerOperand()),
4727                                    getValue(I.getCompareOperand()),
4728                                    getValue(I.getNewValOperand()), MMO);
4729 
4730   SDValue OutChain = L.getValue(2);
4731 
4732   setValue(&I, L);
4733   DAG.setRoot(OutChain);
4734 }
4735 
4736 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4737   SDLoc dl = getCurSDLoc();
4738   ISD::NodeType NT;
4739   switch (I.getOperation()) {
4740   default: llvm_unreachable("Unknown atomicrmw operation");
4741   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4742   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4743   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4744   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4745   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4746   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4747   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4748   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4749   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4750   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4751   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4752   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4753   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4754   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4755   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4756   case AtomicRMWInst::UIncWrap:
4757     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4758     break;
4759   case AtomicRMWInst::UDecWrap:
4760     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4761     break;
4762   }
4763   AtomicOrdering Ordering = I.getOrdering();
4764   SyncScope::ID SSID = I.getSyncScopeID();
4765 
4766   SDValue InChain = getRoot();
4767 
4768   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4769   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4770   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4771 
4772   MachineFunction &MF = DAG.getMachineFunction();
4773   MachineMemOperand *MMO = MF.getMachineMemOperand(
4774       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4775       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4776 
4777   SDValue L =
4778     DAG.getAtomic(NT, dl, MemVT, InChain,
4779                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4780                   MMO);
4781 
4782   SDValue OutChain = L.getValue(1);
4783 
4784   setValue(&I, L);
4785   DAG.setRoot(OutChain);
4786 }
4787 
4788 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4789   SDLoc dl = getCurSDLoc();
4790   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4791   SDValue Ops[3];
4792   Ops[0] = getRoot();
4793   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4794                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4795   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4796                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4797   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4798   setValue(&I, N);
4799   DAG.setRoot(N);
4800 }
4801 
4802 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4803   SDLoc dl = getCurSDLoc();
4804   AtomicOrdering Order = I.getOrdering();
4805   SyncScope::ID SSID = I.getSyncScopeID();
4806 
4807   SDValue InChain = getRoot();
4808 
4809   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4810   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4811   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4812 
4813   if (!TLI.supportsUnalignedAtomics() &&
4814       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4815     report_fatal_error("Cannot generate unaligned atomic load");
4816 
4817   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4818 
4819   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4820       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4821       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4822 
4823   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4824 
4825   SDValue Ptr = getValue(I.getPointerOperand());
4826 
4827   if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4828     // TODO: Once this is better exercised by tests, it should be merged with
4829     // the normal path for loads to prevent future divergence.
4830     SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4831     if (MemVT != VT)
4832       L = DAG.getPtrExtOrTrunc(L, dl, VT);
4833 
4834     setValue(&I, L);
4835     SDValue OutChain = L.getValue(1);
4836     if (!I.isUnordered())
4837       DAG.setRoot(OutChain);
4838     else
4839       PendingLoads.push_back(OutChain);
4840     return;
4841   }
4842 
4843   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4844                             Ptr, MMO);
4845 
4846   SDValue OutChain = L.getValue(1);
4847   if (MemVT != VT)
4848     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4849 
4850   setValue(&I, L);
4851   DAG.setRoot(OutChain);
4852 }
4853 
4854 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4855   SDLoc dl = getCurSDLoc();
4856 
4857   AtomicOrdering Ordering = I.getOrdering();
4858   SyncScope::ID SSID = I.getSyncScopeID();
4859 
4860   SDValue InChain = getRoot();
4861 
4862   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4863   EVT MemVT =
4864       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4865 
4866   if (!TLI.supportsUnalignedAtomics() &&
4867       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4868     report_fatal_error("Cannot generate unaligned atomic store");
4869 
4870   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4871 
4872   MachineFunction &MF = DAG.getMachineFunction();
4873   MachineMemOperand *MMO = MF.getMachineMemOperand(
4874       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4875       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4876 
4877   SDValue Val = getValue(I.getValueOperand());
4878   if (Val.getValueType() != MemVT)
4879     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4880   SDValue Ptr = getValue(I.getPointerOperand());
4881 
4882   if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4883     // TODO: Once this is better exercised by tests, it should be merged with
4884     // the normal path for stores to prevent future divergence.
4885     SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4886     setValue(&I, S);
4887     DAG.setRoot(S);
4888     return;
4889   }
4890   SDValue OutChain =
4891       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
4892 
4893   setValue(&I, OutChain);
4894   DAG.setRoot(OutChain);
4895 }
4896 
4897 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4898 /// node.
4899 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4900                                                unsigned Intrinsic) {
4901   // Ignore the callsite's attributes. A specific call site may be marked with
4902   // readnone, but the lowering code will expect the chain based on the
4903   // definition.
4904   const Function *F = I.getCalledFunction();
4905   bool HasChain = !F->doesNotAccessMemory();
4906   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4907 
4908   // Build the operand list.
4909   SmallVector<SDValue, 8> Ops;
4910   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4911     if (OnlyLoad) {
4912       // We don't need to serialize loads against other loads.
4913       Ops.push_back(DAG.getRoot());
4914     } else {
4915       Ops.push_back(getRoot());
4916     }
4917   }
4918 
4919   // Info is set by getTgtMemIntrinsic
4920   TargetLowering::IntrinsicInfo Info;
4921   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4922   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4923                                                DAG.getMachineFunction(),
4924                                                Intrinsic);
4925 
4926   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4927   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4928       Info.opc == ISD::INTRINSIC_W_CHAIN)
4929     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4930                                         TLI.getPointerTy(DAG.getDataLayout())));
4931 
4932   // Add all operands of the call to the operand list.
4933   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
4934     const Value *Arg = I.getArgOperand(i);
4935     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4936       Ops.push_back(getValue(Arg));
4937       continue;
4938     }
4939 
4940     // Use TargetConstant instead of a regular constant for immarg.
4941     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
4942     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4943       assert(CI->getBitWidth() <= 64 &&
4944              "large intrinsic immediates not handled");
4945       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4946     } else {
4947       Ops.push_back(
4948           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4949     }
4950   }
4951 
4952   SmallVector<EVT, 4> ValueVTs;
4953   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4954 
4955   if (HasChain)
4956     ValueVTs.push_back(MVT::Other);
4957 
4958   SDVTList VTs = DAG.getVTList(ValueVTs);
4959 
4960   // Propagate fast-math-flags from IR to node(s).
4961   SDNodeFlags Flags;
4962   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
4963     Flags.copyFMF(*FPMO);
4964   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
4965 
4966   // Create the node.
4967   SDValue Result;
4968   // In some cases, custom collection of operands from CallInst I may be needed.
4969   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
4970   if (IsTgtIntrinsic) {
4971     // This is target intrinsic that touches memory
4972     //
4973     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
4974     //       didn't yield anything useful.
4975     MachinePointerInfo MPI;
4976     if (Info.ptrVal)
4977       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
4978     else if (Info.fallbackAddressSpace)
4979       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
4980     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
4981                                      Info.memVT, MPI, Info.align, Info.flags,
4982                                      Info.size, I.getAAMetadata());
4983   } else if (!HasChain) {
4984     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4985   } else if (!I.getType()->isVoidTy()) {
4986     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4987   } else {
4988     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4989   }
4990 
4991   if (HasChain) {
4992     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4993     if (OnlyLoad)
4994       PendingLoads.push_back(Chain);
4995     else
4996       DAG.setRoot(Chain);
4997   }
4998 
4999   if (!I.getType()->isVoidTy()) {
5000     if (!isa<VectorType>(I.getType()))
5001       Result = lowerRangeToAssertZExt(DAG, I, Result);
5002 
5003     MaybeAlign Alignment = I.getRetAlign();
5004 
5005     // Insert `assertalign` node if there's an alignment.
5006     if (InsertAssertAlign && Alignment) {
5007       Result =
5008           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5009     }
5010 
5011     setValue(&I, Result);
5012   }
5013 }
5014 
5015 /// GetSignificand - Get the significand and build it into a floating-point
5016 /// number with exponent of 1:
5017 ///
5018 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5019 ///
5020 /// where Op is the hexadecimal representation of floating point value.
5021 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5022   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5023                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5024   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5025                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5026   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5027 }
5028 
5029 /// GetExponent - Get the exponent:
5030 ///
5031 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5032 ///
5033 /// where Op is the hexadecimal representation of floating point value.
5034 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5035                            const TargetLowering &TLI, const SDLoc &dl) {
5036   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5037                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5038   SDValue t1 = DAG.getNode(
5039       ISD::SRL, dl, MVT::i32, t0,
5040       DAG.getConstant(23, dl,
5041                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5042   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5043                            DAG.getConstant(127, dl, MVT::i32));
5044   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5045 }
5046 
5047 /// getF32Constant - Get 32-bit floating point constant.
5048 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5049                               const SDLoc &dl) {
5050   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5051                            MVT::f32);
5052 }
5053 
5054 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5055                                        SelectionDAG &DAG) {
5056   // TODO: What fast-math-flags should be set on the floating-point nodes?
5057 
5058   //   IntegerPartOfX = ((int32_t)(t0);
5059   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5060 
5061   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5062   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5063   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5064 
5065   //   IntegerPartOfX <<= 23;
5066   IntegerPartOfX =
5067       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5068                   DAG.getConstant(23, dl,
5069                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5070                                       MVT::i32, DAG.getDataLayout())));
5071 
5072   SDValue TwoToFractionalPartOfX;
5073   if (LimitFloatPrecision <= 6) {
5074     // For floating-point precision of 6:
5075     //
5076     //   TwoToFractionalPartOfX =
5077     //     0.997535578f +
5078     //       (0.735607626f + 0.252464424f * x) * x;
5079     //
5080     // error 0.0144103317, which is 6 bits
5081     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5082                              getF32Constant(DAG, 0x3e814304, dl));
5083     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5084                              getF32Constant(DAG, 0x3f3c50c8, dl));
5085     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5086     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5087                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5088   } else if (LimitFloatPrecision <= 12) {
5089     // For floating-point precision of 12:
5090     //
5091     //   TwoToFractionalPartOfX =
5092     //     0.999892986f +
5093     //       (0.696457318f +
5094     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5095     //
5096     // error 0.000107046256, which is 13 to 14 bits
5097     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5098                              getF32Constant(DAG, 0x3da235e3, dl));
5099     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5100                              getF32Constant(DAG, 0x3e65b8f3, dl));
5101     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5102     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5103                              getF32Constant(DAG, 0x3f324b07, dl));
5104     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5105     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5106                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5107   } else { // LimitFloatPrecision <= 18
5108     // For floating-point precision of 18:
5109     //
5110     //   TwoToFractionalPartOfX =
5111     //     0.999999982f +
5112     //       (0.693148872f +
5113     //         (0.240227044f +
5114     //           (0.554906021e-1f +
5115     //             (0.961591928e-2f +
5116     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5117     // error 2.47208000*10^(-7), which is better than 18 bits
5118     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5119                              getF32Constant(DAG, 0x3924b03e, dl));
5120     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5121                              getF32Constant(DAG, 0x3ab24b87, dl));
5122     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5123     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5124                              getF32Constant(DAG, 0x3c1d8c17, dl));
5125     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5126     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5127                              getF32Constant(DAG, 0x3d634a1d, dl));
5128     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5129     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5130                              getF32Constant(DAG, 0x3e75fe14, dl));
5131     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5132     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5133                               getF32Constant(DAG, 0x3f317234, dl));
5134     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5135     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5136                                          getF32Constant(DAG, 0x3f800000, dl));
5137   }
5138 
5139   // Add the exponent into the result in integer domain.
5140   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5141   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5142                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5143 }
5144 
5145 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5146 /// limited-precision mode.
5147 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5148                          const TargetLowering &TLI, SDNodeFlags Flags) {
5149   if (Op.getValueType() == MVT::f32 &&
5150       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5151 
5152     // Put the exponent in the right bit position for later addition to the
5153     // final result:
5154     //
5155     // t0 = Op * log2(e)
5156 
5157     // TODO: What fast-math-flags should be set here?
5158     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5159                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5160     return getLimitedPrecisionExp2(t0, dl, DAG);
5161   }
5162 
5163   // No special expansion.
5164   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5165 }
5166 
5167 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5168 /// limited-precision mode.
5169 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5170                          const TargetLowering &TLI, SDNodeFlags Flags) {
5171   // TODO: What fast-math-flags should be set on the floating-point nodes?
5172 
5173   if (Op.getValueType() == MVT::f32 &&
5174       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5175     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5176 
5177     // Scale the exponent by log(2).
5178     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5179     SDValue LogOfExponent =
5180         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5181                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5182 
5183     // Get the significand and build it into a floating-point number with
5184     // exponent of 1.
5185     SDValue X = GetSignificand(DAG, Op1, dl);
5186 
5187     SDValue LogOfMantissa;
5188     if (LimitFloatPrecision <= 6) {
5189       // For floating-point precision of 6:
5190       //
5191       //   LogofMantissa =
5192       //     -1.1609546f +
5193       //       (1.4034025f - 0.23903021f * x) * x;
5194       //
5195       // error 0.0034276066, which is better than 8 bits
5196       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5197                                getF32Constant(DAG, 0xbe74c456, dl));
5198       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5199                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5200       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5201       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5202                                   getF32Constant(DAG, 0x3f949a29, dl));
5203     } else if (LimitFloatPrecision <= 12) {
5204       // For floating-point precision of 12:
5205       //
5206       //   LogOfMantissa =
5207       //     -1.7417939f +
5208       //       (2.8212026f +
5209       //         (-1.4699568f +
5210       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5211       //
5212       // error 0.000061011436, which is 14 bits
5213       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5214                                getF32Constant(DAG, 0xbd67b6d6, dl));
5215       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5216                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5217       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5218       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5219                                getF32Constant(DAG, 0x3fbc278b, dl));
5220       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5221       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5222                                getF32Constant(DAG, 0x40348e95, dl));
5223       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5224       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5225                                   getF32Constant(DAG, 0x3fdef31a, dl));
5226     } else { // LimitFloatPrecision <= 18
5227       // For floating-point precision of 18:
5228       //
5229       //   LogOfMantissa =
5230       //     -2.1072184f +
5231       //       (4.2372794f +
5232       //         (-3.7029485f +
5233       //           (2.2781945f +
5234       //             (-0.87823314f +
5235       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5236       //
5237       // error 0.0000023660568, which is better than 18 bits
5238       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5239                                getF32Constant(DAG, 0xbc91e5ac, dl));
5240       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5241                                getF32Constant(DAG, 0x3e4350aa, dl));
5242       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5243       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5244                                getF32Constant(DAG, 0x3f60d3e3, dl));
5245       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5246       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5247                                getF32Constant(DAG, 0x4011cdf0, dl));
5248       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5249       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5250                                getF32Constant(DAG, 0x406cfd1c, dl));
5251       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5252       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5253                                getF32Constant(DAG, 0x408797cb, dl));
5254       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5255       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5256                                   getF32Constant(DAG, 0x4006dcab, dl));
5257     }
5258 
5259     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5260   }
5261 
5262   // No special expansion.
5263   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5264 }
5265 
5266 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5267 /// limited-precision mode.
5268 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5269                           const TargetLowering &TLI, SDNodeFlags Flags) {
5270   // TODO: What fast-math-flags should be set on the floating-point nodes?
5271 
5272   if (Op.getValueType() == MVT::f32 &&
5273       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5274     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5275 
5276     // Get the exponent.
5277     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5278 
5279     // Get the significand and build it into a floating-point number with
5280     // exponent of 1.
5281     SDValue X = GetSignificand(DAG, Op1, dl);
5282 
5283     // Different possible minimax approximations of significand in
5284     // floating-point for various degrees of accuracy over [1,2].
5285     SDValue Log2ofMantissa;
5286     if (LimitFloatPrecision <= 6) {
5287       // For floating-point precision of 6:
5288       //
5289       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5290       //
5291       // error 0.0049451742, which is more than 7 bits
5292       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5293                                getF32Constant(DAG, 0xbeb08fe0, dl));
5294       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5295                                getF32Constant(DAG, 0x40019463, dl));
5296       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5297       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5298                                    getF32Constant(DAG, 0x3fd6633d, dl));
5299     } else if (LimitFloatPrecision <= 12) {
5300       // For floating-point precision of 12:
5301       //
5302       //   Log2ofMantissa =
5303       //     -2.51285454f +
5304       //       (4.07009056f +
5305       //         (-2.12067489f +
5306       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5307       //
5308       // error 0.0000876136000, which is better than 13 bits
5309       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5310                                getF32Constant(DAG, 0xbda7262e, dl));
5311       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5312                                getF32Constant(DAG, 0x3f25280b, dl));
5313       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5314       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5315                                getF32Constant(DAG, 0x4007b923, dl));
5316       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5317       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5318                                getF32Constant(DAG, 0x40823e2f, dl));
5319       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5320       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5321                                    getF32Constant(DAG, 0x4020d29c, dl));
5322     } else { // LimitFloatPrecision <= 18
5323       // For floating-point precision of 18:
5324       //
5325       //   Log2ofMantissa =
5326       //     -3.0400495f +
5327       //       (6.1129976f +
5328       //         (-5.3420409f +
5329       //           (3.2865683f +
5330       //             (-1.2669343f +
5331       //               (0.27515199f -
5332       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5333       //
5334       // error 0.0000018516, which is better than 18 bits
5335       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5336                                getF32Constant(DAG, 0xbcd2769e, dl));
5337       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5338                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5339       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5340       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5341                                getF32Constant(DAG, 0x3fa22ae7, dl));
5342       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5343       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5344                                getF32Constant(DAG, 0x40525723, dl));
5345       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5346       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5347                                getF32Constant(DAG, 0x40aaf200, dl));
5348       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5349       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5350                                getF32Constant(DAG, 0x40c39dad, dl));
5351       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5352       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5353                                    getF32Constant(DAG, 0x4042902c, dl));
5354     }
5355 
5356     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5357   }
5358 
5359   // No special expansion.
5360   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5361 }
5362 
5363 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5364 /// limited-precision mode.
5365 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5366                            const TargetLowering &TLI, SDNodeFlags Flags) {
5367   // TODO: What fast-math-flags should be set on the floating-point nodes?
5368 
5369   if (Op.getValueType() == MVT::f32 &&
5370       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5371     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5372 
5373     // Scale the exponent by log10(2) [0.30102999f].
5374     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5375     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5376                                         getF32Constant(DAG, 0x3e9a209a, dl));
5377 
5378     // Get the significand and build it into a floating-point number with
5379     // exponent of 1.
5380     SDValue X = GetSignificand(DAG, Op1, dl);
5381 
5382     SDValue Log10ofMantissa;
5383     if (LimitFloatPrecision <= 6) {
5384       // For floating-point precision of 6:
5385       //
5386       //   Log10ofMantissa =
5387       //     -0.50419619f +
5388       //       (0.60948995f - 0.10380950f * x) * x;
5389       //
5390       // error 0.0014886165, which is 6 bits
5391       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5392                                getF32Constant(DAG, 0xbdd49a13, dl));
5393       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5394                                getF32Constant(DAG, 0x3f1c0789, dl));
5395       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5396       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5397                                     getF32Constant(DAG, 0x3f011300, dl));
5398     } else if (LimitFloatPrecision <= 12) {
5399       // For floating-point precision of 12:
5400       //
5401       //   Log10ofMantissa =
5402       //     -0.64831180f +
5403       //       (0.91751397f +
5404       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5405       //
5406       // error 0.00019228036, which is better than 12 bits
5407       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5408                                getF32Constant(DAG, 0x3d431f31, dl));
5409       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5410                                getF32Constant(DAG, 0x3ea21fb2, dl));
5411       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5412       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5413                                getF32Constant(DAG, 0x3f6ae232, dl));
5414       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5415       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5416                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5417     } else { // LimitFloatPrecision <= 18
5418       // For floating-point precision of 18:
5419       //
5420       //   Log10ofMantissa =
5421       //     -0.84299375f +
5422       //       (1.5327582f +
5423       //         (-1.0688956f +
5424       //           (0.49102474f +
5425       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5426       //
5427       // error 0.0000037995730, which is better than 18 bits
5428       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5429                                getF32Constant(DAG, 0x3c5d51ce, dl));
5430       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5431                                getF32Constant(DAG, 0x3e00685a, dl));
5432       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5433       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5434                                getF32Constant(DAG, 0x3efb6798, dl));
5435       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5436       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5437                                getF32Constant(DAG, 0x3f88d192, dl));
5438       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5439       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5440                                getF32Constant(DAG, 0x3fc4316c, dl));
5441       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5442       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5443                                     getF32Constant(DAG, 0x3f57ce70, dl));
5444     }
5445 
5446     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5447   }
5448 
5449   // No special expansion.
5450   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5451 }
5452 
5453 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5454 /// limited-precision mode.
5455 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5456                           const TargetLowering &TLI, SDNodeFlags Flags) {
5457   if (Op.getValueType() == MVT::f32 &&
5458       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5459     return getLimitedPrecisionExp2(Op, dl, DAG);
5460 
5461   // No special expansion.
5462   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5463 }
5464 
5465 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5466 /// limited-precision mode with x == 10.0f.
5467 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5468                          SelectionDAG &DAG, const TargetLowering &TLI,
5469                          SDNodeFlags Flags) {
5470   bool IsExp10 = false;
5471   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5472       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5473     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5474       APFloat Ten(10.0f);
5475       IsExp10 = LHSC->isExactlyValue(Ten);
5476     }
5477   }
5478 
5479   // TODO: What fast-math-flags should be set on the FMUL node?
5480   if (IsExp10) {
5481     // Put the exponent in the right bit position for later addition to the
5482     // final result:
5483     //
5484     //   #define LOG2OF10 3.3219281f
5485     //   t0 = Op * LOG2OF10;
5486     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5487                              getF32Constant(DAG, 0x40549a78, dl));
5488     return getLimitedPrecisionExp2(t0, dl, DAG);
5489   }
5490 
5491   // No special expansion.
5492   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5493 }
5494 
5495 /// ExpandPowI - Expand a llvm.powi intrinsic.
5496 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5497                           SelectionDAG &DAG) {
5498   // If RHS is a constant, we can expand this out to a multiplication tree if
5499   // it's beneficial on the target, otherwise we end up lowering to a call to
5500   // __powidf2 (for example).
5501   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5502     unsigned Val = RHSC->getSExtValue();
5503 
5504     // powi(x, 0) -> 1.0
5505     if (Val == 0)
5506       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5507 
5508     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5509             Val, DAG.shouldOptForSize())) {
5510       // Get the exponent as a positive value.
5511       if ((int)Val < 0)
5512         Val = -Val;
5513       // We use the simple binary decomposition method to generate the multiply
5514       // sequence.  There are more optimal ways to do this (for example,
5515       // powi(x,15) generates one more multiply than it should), but this has
5516       // the benefit of being both really simple and much better than a libcall.
5517       SDValue Res; // Logically starts equal to 1.0
5518       SDValue CurSquare = LHS;
5519       // TODO: Intrinsics should have fast-math-flags that propagate to these
5520       // nodes.
5521       while (Val) {
5522         if (Val & 1) {
5523           if (Res.getNode())
5524             Res =
5525                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5526           else
5527             Res = CurSquare; // 1.0*CurSquare.
5528         }
5529 
5530         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5531                                 CurSquare, CurSquare);
5532         Val >>= 1;
5533       }
5534 
5535       // If the original was negative, invert the result, producing 1/(x*x*x).
5536       if (RHSC->getSExtValue() < 0)
5537         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5538                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5539       return Res;
5540     }
5541   }
5542 
5543   // Otherwise, expand to a libcall.
5544   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5545 }
5546 
5547 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5548                             SDValue LHS, SDValue RHS, SDValue Scale,
5549                             SelectionDAG &DAG, const TargetLowering &TLI) {
5550   EVT VT = LHS.getValueType();
5551   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5552   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5553   LLVMContext &Ctx = *DAG.getContext();
5554 
5555   // If the type is legal but the operation isn't, this node might survive all
5556   // the way to operation legalization. If we end up there and we do not have
5557   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5558   // node.
5559 
5560   // Coax the legalizer into expanding the node during type legalization instead
5561   // by bumping the size by one bit. This will force it to Promote, enabling the
5562   // early expansion and avoiding the need to expand later.
5563 
5564   // We don't have to do this if Scale is 0; that can always be expanded, unless
5565   // it's a saturating signed operation. Those can experience true integer
5566   // division overflow, a case which we must avoid.
5567 
5568   // FIXME: We wouldn't have to do this (or any of the early
5569   // expansion/promotion) if it was possible to expand a libcall of an
5570   // illegal type during operation legalization. But it's not, so things
5571   // get a bit hacky.
5572   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5573   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5574       (TLI.isTypeLegal(VT) ||
5575        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5576     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5577         Opcode, VT, ScaleInt);
5578     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5579       EVT PromVT;
5580       if (VT.isScalarInteger())
5581         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5582       else if (VT.isVector()) {
5583         PromVT = VT.getVectorElementType();
5584         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5585         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5586       } else
5587         llvm_unreachable("Wrong VT for DIVFIX?");
5588       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5589       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5590       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5591       // For saturating operations, we need to shift up the LHS to get the
5592       // proper saturation width, and then shift down again afterwards.
5593       if (Saturating)
5594         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5595                           DAG.getConstant(1, DL, ShiftTy));
5596       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5597       if (Saturating)
5598         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5599                           DAG.getConstant(1, DL, ShiftTy));
5600       return DAG.getZExtOrTrunc(Res, DL, VT);
5601     }
5602   }
5603 
5604   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5605 }
5606 
5607 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5608 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5609 static void
5610 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5611                      const SDValue &N) {
5612   switch (N.getOpcode()) {
5613   case ISD::CopyFromReg: {
5614     SDValue Op = N.getOperand(1);
5615     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5616                       Op.getValueType().getSizeInBits());
5617     return;
5618   }
5619   case ISD::BITCAST:
5620   case ISD::AssertZext:
5621   case ISD::AssertSext:
5622   case ISD::TRUNCATE:
5623     getUnderlyingArgRegs(Regs, N.getOperand(0));
5624     return;
5625   case ISD::BUILD_PAIR:
5626   case ISD::BUILD_VECTOR:
5627   case ISD::CONCAT_VECTORS:
5628     for (SDValue Op : N->op_values())
5629       getUnderlyingArgRegs(Regs, Op);
5630     return;
5631   default:
5632     return;
5633   }
5634 }
5635 
5636 /// If the DbgValueInst is a dbg_value of a function argument, create the
5637 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5638 /// instruction selection, they will be inserted to the entry BB.
5639 /// We don't currently support this for variadic dbg_values, as they shouldn't
5640 /// appear for function arguments or in the prologue.
5641 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5642     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5643     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5644   const Argument *Arg = dyn_cast<Argument>(V);
5645   if (!Arg)
5646     return false;
5647 
5648   MachineFunction &MF = DAG.getMachineFunction();
5649   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5650 
5651   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5652   // we've been asked to pursue.
5653   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5654                               bool Indirect) {
5655     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5656       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5657       // pointing at the VReg, which will be patched up later.
5658       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5659       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5660           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5661           /* isKill */ false, /* isDead */ false,
5662           /* isUndef */ false, /* isEarlyClobber */ false,
5663           /* SubReg */ 0, /* isDebug */ true)});
5664 
5665       auto *NewDIExpr = FragExpr;
5666       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5667       // the DIExpression.
5668       if (Indirect)
5669         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5670       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5671       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5672       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5673     } else {
5674       // Create a completely standard DBG_VALUE.
5675       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5676       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5677     }
5678   };
5679 
5680   if (Kind == FuncArgumentDbgValueKind::Value) {
5681     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5682     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5683     // the entry block.
5684     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5685     if (!IsInEntryBlock)
5686       return false;
5687 
5688     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5689     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5690     // variable that also is a param.
5691     //
5692     // Although, if we are at the top of the entry block already, we can still
5693     // emit using ArgDbgValue. This might catch some situations when the
5694     // dbg.value refers to an argument that isn't used in the entry block, so
5695     // any CopyToReg node would be optimized out and the only way to express
5696     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5697     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5698     // we should only emit as ArgDbgValue if the Variable is an argument to the
5699     // current function, and the dbg.value intrinsic is found in the entry
5700     // block.
5701     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5702         !DL->getInlinedAt();
5703     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5704     if (!IsInPrologue && !VariableIsFunctionInputArg)
5705       return false;
5706 
5707     // Here we assume that a function argument on IR level only can be used to
5708     // describe one input parameter on source level. If we for example have
5709     // source code like this
5710     //
5711     //    struct A { long x, y; };
5712     //    void foo(struct A a, long b) {
5713     //      ...
5714     //      b = a.x;
5715     //      ...
5716     //    }
5717     //
5718     // and IR like this
5719     //
5720     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5721     //  entry:
5722     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5723     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5724     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5725     //    ...
5726     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5727     //    ...
5728     //
5729     // then the last dbg.value is describing a parameter "b" using a value that
5730     // is an argument. But since we already has used %a1 to describe a parameter
5731     // we should not handle that last dbg.value here (that would result in an
5732     // incorrect hoisting of the DBG_VALUE to the function entry).
5733     // Notice that we allow one dbg.value per IR level argument, to accommodate
5734     // for the situation with fragments above.
5735     if (VariableIsFunctionInputArg) {
5736       unsigned ArgNo = Arg->getArgNo();
5737       if (ArgNo >= FuncInfo.DescribedArgs.size())
5738         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5739       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5740         return false;
5741       FuncInfo.DescribedArgs.set(ArgNo);
5742     }
5743   }
5744 
5745   bool IsIndirect = false;
5746   std::optional<MachineOperand> Op;
5747   // Some arguments' frame index is recorded during argument lowering.
5748   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5749   if (FI != std::numeric_limits<int>::max())
5750     Op = MachineOperand::CreateFI(FI);
5751 
5752   SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5753   if (!Op && N.getNode()) {
5754     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5755     Register Reg;
5756     if (ArgRegsAndSizes.size() == 1)
5757       Reg = ArgRegsAndSizes.front().first;
5758 
5759     if (Reg && Reg.isVirtual()) {
5760       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5761       Register PR = RegInfo.getLiveInPhysReg(Reg);
5762       if (PR)
5763         Reg = PR;
5764     }
5765     if (Reg) {
5766       Op = MachineOperand::CreateReg(Reg, false);
5767       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5768     }
5769   }
5770 
5771   if (!Op && N.getNode()) {
5772     // Check if frame index is available.
5773     SDValue LCandidate = peekThroughBitcasts(N);
5774     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5775       if (FrameIndexSDNode *FINode =
5776           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5777         Op = MachineOperand::CreateFI(FINode->getIndex());
5778   }
5779 
5780   if (!Op) {
5781     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5782     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5783                                          SplitRegs) {
5784       unsigned Offset = 0;
5785       for (const auto &RegAndSize : SplitRegs) {
5786         // If the expression is already a fragment, the current register
5787         // offset+size might extend beyond the fragment. In this case, only
5788         // the register bits that are inside the fragment are relevant.
5789         int RegFragmentSizeInBits = RegAndSize.second;
5790         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5791           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5792           // The register is entirely outside the expression fragment,
5793           // so is irrelevant for debug info.
5794           if (Offset >= ExprFragmentSizeInBits)
5795             break;
5796           // The register is partially outside the expression fragment, only
5797           // the low bits within the fragment are relevant for debug info.
5798           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5799             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5800           }
5801         }
5802 
5803         auto FragmentExpr = DIExpression::createFragmentExpression(
5804             Expr, Offset, RegFragmentSizeInBits);
5805         Offset += RegAndSize.second;
5806         // If a valid fragment expression cannot be created, the variable's
5807         // correct value cannot be determined and so it is set as Undef.
5808         if (!FragmentExpr) {
5809           SDDbgValue *SDV = DAG.getConstantDbgValue(
5810               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5811           DAG.AddDbgValue(SDV, false);
5812           continue;
5813         }
5814         MachineInstr *NewMI =
5815             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5816                              Kind != FuncArgumentDbgValueKind::Value);
5817         FuncInfo.ArgDbgValues.push_back(NewMI);
5818       }
5819     };
5820 
5821     // Check if ValueMap has reg number.
5822     DenseMap<const Value *, Register>::const_iterator
5823       VMI = FuncInfo.ValueMap.find(V);
5824     if (VMI != FuncInfo.ValueMap.end()) {
5825       const auto &TLI = DAG.getTargetLoweringInfo();
5826       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5827                        V->getType(), std::nullopt);
5828       if (RFV.occupiesMultipleRegs()) {
5829         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5830         return true;
5831       }
5832 
5833       Op = MachineOperand::CreateReg(VMI->second, false);
5834       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5835     } else if (ArgRegsAndSizes.size() > 1) {
5836       // This was split due to the calling convention, and no virtual register
5837       // mapping exists for the value.
5838       splitMultiRegDbgValue(ArgRegsAndSizes);
5839       return true;
5840     }
5841   }
5842 
5843   if (!Op)
5844     return false;
5845 
5846   assert(Variable->isValidLocationForIntrinsic(DL) &&
5847          "Expected inlined-at fields to agree");
5848   MachineInstr *NewMI = nullptr;
5849 
5850   if (Op->isReg())
5851     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5852   else
5853     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5854                     Variable, Expr);
5855 
5856   // Otherwise, use ArgDbgValues.
5857   FuncInfo.ArgDbgValues.push_back(NewMI);
5858   return true;
5859 }
5860 
5861 /// Return the appropriate SDDbgValue based on N.
5862 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5863                                              DILocalVariable *Variable,
5864                                              DIExpression *Expr,
5865                                              const DebugLoc &dl,
5866                                              unsigned DbgSDNodeOrder) {
5867   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5868     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5869     // stack slot locations.
5870     //
5871     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5872     // debug values here after optimization:
5873     //
5874     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5875     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5876     //
5877     // Both describe the direct values of their associated variables.
5878     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5879                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5880   }
5881   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5882                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5883 }
5884 
5885 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5886   switch (Intrinsic) {
5887   case Intrinsic::smul_fix:
5888     return ISD::SMULFIX;
5889   case Intrinsic::umul_fix:
5890     return ISD::UMULFIX;
5891   case Intrinsic::smul_fix_sat:
5892     return ISD::SMULFIXSAT;
5893   case Intrinsic::umul_fix_sat:
5894     return ISD::UMULFIXSAT;
5895   case Intrinsic::sdiv_fix:
5896     return ISD::SDIVFIX;
5897   case Intrinsic::udiv_fix:
5898     return ISD::UDIVFIX;
5899   case Intrinsic::sdiv_fix_sat:
5900     return ISD::SDIVFIXSAT;
5901   case Intrinsic::udiv_fix_sat:
5902     return ISD::UDIVFIXSAT;
5903   default:
5904     llvm_unreachable("Unhandled fixed point intrinsic");
5905   }
5906 }
5907 
5908 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5909                                            const char *FunctionName) {
5910   assert(FunctionName && "FunctionName must not be nullptr");
5911   SDValue Callee = DAG.getExternalSymbol(
5912       FunctionName,
5913       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5914   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
5915 }
5916 
5917 /// Given a @llvm.call.preallocated.setup, return the corresponding
5918 /// preallocated call.
5919 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5920   assert(cast<CallBase>(PreallocatedSetup)
5921                  ->getCalledFunction()
5922                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5923          "expected call_preallocated_setup Value");
5924   for (const auto *U : PreallocatedSetup->users()) {
5925     auto *UseCall = cast<CallBase>(U);
5926     const Function *Fn = UseCall->getCalledFunction();
5927     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5928       return UseCall;
5929     }
5930   }
5931   llvm_unreachable("expected corresponding call to preallocated setup/arg");
5932 }
5933 
5934 /// If DI is a debug value with an EntryValue expression, lower it using the
5935 /// corresponding physical register of the associated Argument value
5936 /// (guaranteed to exist by the verifier).
5937 bool SelectionDAGBuilder::visitEntryValueDbgValue(const DbgValueInst &DI) {
5938   DILocalVariable *Variable = DI.getVariable();
5939   DIExpression *Expr = DI.getExpression();
5940   if (!Expr->isEntryValue() || !hasSingleElement(DI.getValues()))
5941     return false;
5942 
5943   // These properties are guaranteed by the verifier.
5944   Argument *Arg = cast<Argument>(DI.getValue(0));
5945   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
5946 
5947   auto ArgIt = FuncInfo.ValueMap.find(Arg);
5948   if (ArgIt == FuncInfo.ValueMap.end()) {
5949     LLVM_DEBUG(
5950         dbgs() << "Dropping dbg.value: expression is entry_value but "
5951                   "couldn't find an associated register for the Argument\n");
5952     return true;
5953   }
5954   Register ArgVReg = ArgIt->getSecond();
5955 
5956   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
5957     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
5958       SDDbgValue *SDV =
5959           DAG.getVRegDbgValue(Variable, Expr, PhysReg, false /*IsIndidrect*/,
5960                               DI.getDebugLoc(), SDNodeOrder);
5961       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
5962       return true;
5963     }
5964   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
5965                        "couldn't find a physical register\n");
5966   return true;
5967 }
5968 
5969 /// Lower the call to the specified intrinsic function.
5970 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5971                                              unsigned Intrinsic) {
5972   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5973   SDLoc sdl = getCurSDLoc();
5974   DebugLoc dl = getCurDebugLoc();
5975   SDValue Res;
5976 
5977   SDNodeFlags Flags;
5978   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
5979     Flags.copyFMF(*FPOp);
5980 
5981   switch (Intrinsic) {
5982   default:
5983     // By default, turn this into a target intrinsic node.
5984     visitTargetIntrinsic(I, Intrinsic);
5985     return;
5986   case Intrinsic::vscale: {
5987     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5988     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
5989     return;
5990   }
5991   case Intrinsic::vastart:  visitVAStart(I); return;
5992   case Intrinsic::vaend:    visitVAEnd(I); return;
5993   case Intrinsic::vacopy:   visitVACopy(I); return;
5994   case Intrinsic::returnaddress:
5995     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5996                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5997                              getValue(I.getArgOperand(0))));
5998     return;
5999   case Intrinsic::addressofreturnaddress:
6000     setValue(&I,
6001              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6002                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6003     return;
6004   case Intrinsic::sponentry:
6005     setValue(&I,
6006              DAG.getNode(ISD::SPONENTRY, sdl,
6007                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6008     return;
6009   case Intrinsic::frameaddress:
6010     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6011                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6012                              getValue(I.getArgOperand(0))));
6013     return;
6014   case Intrinsic::read_volatile_register:
6015   case Intrinsic::read_register: {
6016     Value *Reg = I.getArgOperand(0);
6017     SDValue Chain = getRoot();
6018     SDValue RegName =
6019         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6020     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6021     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6022       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6023     setValue(&I, Res);
6024     DAG.setRoot(Res.getValue(1));
6025     return;
6026   }
6027   case Intrinsic::write_register: {
6028     Value *Reg = I.getArgOperand(0);
6029     Value *RegValue = I.getArgOperand(1);
6030     SDValue Chain = getRoot();
6031     SDValue RegName =
6032         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6033     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6034                             RegName, getValue(RegValue)));
6035     return;
6036   }
6037   case Intrinsic::memcpy: {
6038     const auto &MCI = cast<MemCpyInst>(I);
6039     SDValue Op1 = getValue(I.getArgOperand(0));
6040     SDValue Op2 = getValue(I.getArgOperand(1));
6041     SDValue Op3 = getValue(I.getArgOperand(2));
6042     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6043     Align DstAlign = MCI.getDestAlign().valueOrOne();
6044     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6045     Align Alignment = std::min(DstAlign, SrcAlign);
6046     bool isVol = MCI.isVolatile();
6047     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6048     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6049     // node.
6050     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6051     SDValue MC = DAG.getMemcpy(
6052         Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6053         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6054         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6055     updateDAGForMaybeTailCall(MC);
6056     return;
6057   }
6058   case Intrinsic::memcpy_inline: {
6059     const auto &MCI = cast<MemCpyInlineInst>(I);
6060     SDValue Dst = getValue(I.getArgOperand(0));
6061     SDValue Src = getValue(I.getArgOperand(1));
6062     SDValue Size = getValue(I.getArgOperand(2));
6063     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6064     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6065     Align DstAlign = MCI.getDestAlign().valueOrOne();
6066     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6067     Align Alignment = std::min(DstAlign, SrcAlign);
6068     bool isVol = MCI.isVolatile();
6069     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6070     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6071     // node.
6072     SDValue MC = DAG.getMemcpy(
6073         getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6074         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6075         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6076     updateDAGForMaybeTailCall(MC);
6077     return;
6078   }
6079   case Intrinsic::memset: {
6080     const auto &MSI = cast<MemSetInst>(I);
6081     SDValue Op1 = getValue(I.getArgOperand(0));
6082     SDValue Op2 = getValue(I.getArgOperand(1));
6083     SDValue Op3 = getValue(I.getArgOperand(2));
6084     // @llvm.memset defines 0 and 1 to both mean no alignment.
6085     Align Alignment = MSI.getDestAlign().valueOrOne();
6086     bool isVol = MSI.isVolatile();
6087     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6088     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6089     SDValue MS = DAG.getMemset(
6090         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6091         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6092     updateDAGForMaybeTailCall(MS);
6093     return;
6094   }
6095   case Intrinsic::memset_inline: {
6096     const auto &MSII = cast<MemSetInlineInst>(I);
6097     SDValue Dst = getValue(I.getArgOperand(0));
6098     SDValue Value = getValue(I.getArgOperand(1));
6099     SDValue Size = getValue(I.getArgOperand(2));
6100     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6101     // @llvm.memset defines 0 and 1 to both mean no alignment.
6102     Align DstAlign = MSII.getDestAlign().valueOrOne();
6103     bool isVol = MSII.isVolatile();
6104     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6105     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6106     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6107                                /* AlwaysInline */ true, isTC,
6108                                MachinePointerInfo(I.getArgOperand(0)),
6109                                I.getAAMetadata());
6110     updateDAGForMaybeTailCall(MC);
6111     return;
6112   }
6113   case Intrinsic::memmove: {
6114     const auto &MMI = cast<MemMoveInst>(I);
6115     SDValue Op1 = getValue(I.getArgOperand(0));
6116     SDValue Op2 = getValue(I.getArgOperand(1));
6117     SDValue Op3 = getValue(I.getArgOperand(2));
6118     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6119     Align DstAlign = MMI.getDestAlign().valueOrOne();
6120     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6121     Align Alignment = std::min(DstAlign, SrcAlign);
6122     bool isVol = MMI.isVolatile();
6123     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6124     // FIXME: Support passing different dest/src alignments to the memmove DAG
6125     // node.
6126     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6127     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6128                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
6129                                 MachinePointerInfo(I.getArgOperand(1)),
6130                                 I.getAAMetadata(), AA);
6131     updateDAGForMaybeTailCall(MM);
6132     return;
6133   }
6134   case Intrinsic::memcpy_element_unordered_atomic: {
6135     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6136     SDValue Dst = getValue(MI.getRawDest());
6137     SDValue Src = getValue(MI.getRawSource());
6138     SDValue Length = getValue(MI.getLength());
6139 
6140     Type *LengthTy = MI.getLength()->getType();
6141     unsigned ElemSz = MI.getElementSizeInBytes();
6142     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6143     SDValue MC =
6144         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6145                             isTC, MachinePointerInfo(MI.getRawDest()),
6146                             MachinePointerInfo(MI.getRawSource()));
6147     updateDAGForMaybeTailCall(MC);
6148     return;
6149   }
6150   case Intrinsic::memmove_element_unordered_atomic: {
6151     auto &MI = cast<AtomicMemMoveInst>(I);
6152     SDValue Dst = getValue(MI.getRawDest());
6153     SDValue Src = getValue(MI.getRawSource());
6154     SDValue Length = getValue(MI.getLength());
6155 
6156     Type *LengthTy = MI.getLength()->getType();
6157     unsigned ElemSz = MI.getElementSizeInBytes();
6158     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6159     SDValue MC =
6160         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6161                              isTC, MachinePointerInfo(MI.getRawDest()),
6162                              MachinePointerInfo(MI.getRawSource()));
6163     updateDAGForMaybeTailCall(MC);
6164     return;
6165   }
6166   case Intrinsic::memset_element_unordered_atomic: {
6167     auto &MI = cast<AtomicMemSetInst>(I);
6168     SDValue Dst = getValue(MI.getRawDest());
6169     SDValue Val = getValue(MI.getValue());
6170     SDValue Length = getValue(MI.getLength());
6171 
6172     Type *LengthTy = MI.getLength()->getType();
6173     unsigned ElemSz = MI.getElementSizeInBytes();
6174     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6175     SDValue MC =
6176         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6177                             isTC, MachinePointerInfo(MI.getRawDest()));
6178     updateDAGForMaybeTailCall(MC);
6179     return;
6180   }
6181   case Intrinsic::call_preallocated_setup: {
6182     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6183     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6184     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6185                               getRoot(), SrcValue);
6186     setValue(&I, Res);
6187     DAG.setRoot(Res);
6188     return;
6189   }
6190   case Intrinsic::call_preallocated_arg: {
6191     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6192     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6193     SDValue Ops[3];
6194     Ops[0] = getRoot();
6195     Ops[1] = SrcValue;
6196     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6197                                    MVT::i32); // arg index
6198     SDValue Res = DAG.getNode(
6199         ISD::PREALLOCATED_ARG, sdl,
6200         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6201     setValue(&I, Res);
6202     DAG.setRoot(Res.getValue(1));
6203     return;
6204   }
6205   case Intrinsic::dbg_declare: {
6206     const auto &DI = cast<DbgDeclareInst>(I);
6207     // Debug intrinsics are handled separately in assignment tracking mode.
6208     // Some intrinsics are handled right after Argument lowering.
6209     if (AssignmentTrackingEnabled ||
6210         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6211       return;
6212     // Assume dbg.declare can not currently use DIArgList, i.e.
6213     // it is non-variadic.
6214     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6215     DILocalVariable *Variable = DI.getVariable();
6216     DIExpression *Expression = DI.getExpression();
6217     dropDanglingDebugInfo(Variable, Expression);
6218     assert(Variable && "Missing variable");
6219     LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
6220                       << "\n");
6221     // Check if address has undef value.
6222     const Value *Address = DI.getVariableLocationOp(0);
6223     if (!Address || isa<UndefValue>(Address) ||
6224         (Address->use_empty() && !isa<Argument>(Address))) {
6225       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6226                         << " (bad/undef/unused-arg address)\n");
6227       return;
6228     }
6229 
6230     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
6231 
6232     SDValue &N = NodeMap[Address];
6233     if (!N.getNode() && isa<Argument>(Address))
6234       // Check unused arguments map.
6235       N = UnusedArgNodeMap[Address];
6236     SDDbgValue *SDV;
6237     if (N.getNode()) {
6238       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
6239         Address = BCI->getOperand(0);
6240       // Parameters are handled specially.
6241       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
6242       if (isParameter && FINode) {
6243         // Byval parameter. We have a frame index at this point.
6244         SDV =
6245             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
6246                                       /*IsIndirect*/ true, dl, SDNodeOrder);
6247       } else if (isa<Argument>(Address)) {
6248         // Address is an argument, so try to emit its dbg value using
6249         // virtual register info from the FuncInfo.ValueMap.
6250         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6251                                  FuncArgumentDbgValueKind::Declare, N);
6252         return;
6253       } else {
6254         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
6255                               true, dl, SDNodeOrder);
6256       }
6257       DAG.AddDbgValue(SDV, isParameter);
6258     } else {
6259       // If Address is an argument then try to emit its dbg value using
6260       // virtual register info from the FuncInfo.ValueMap.
6261       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6262                                     FuncArgumentDbgValueKind::Declare, N)) {
6263         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6264                           << " (could not emit func-arg dbg_value)\n");
6265       }
6266     }
6267     return;
6268   }
6269   case Intrinsic::dbg_label: {
6270     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6271     DILabel *Label = DI.getLabel();
6272     assert(Label && "Missing label");
6273 
6274     SDDbgLabel *SDV;
6275     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6276     DAG.AddDbgLabel(SDV);
6277     return;
6278   }
6279   case Intrinsic::dbg_assign: {
6280     // Debug intrinsics are handled seperately in assignment tracking mode.
6281     if (AssignmentTrackingEnabled)
6282       return;
6283     // If assignment tracking hasn't been enabled then fall through and treat
6284     // the dbg.assign as a dbg.value.
6285     [[fallthrough]];
6286   }
6287   case Intrinsic::dbg_value: {
6288     // Debug intrinsics are handled seperately in assignment tracking mode.
6289     if (AssignmentTrackingEnabled)
6290       return;
6291     const DbgValueInst &DI = cast<DbgValueInst>(I);
6292     assert(DI.getVariable() && "Missing variable");
6293 
6294     DILocalVariable *Variable = DI.getVariable();
6295     DIExpression *Expression = DI.getExpression();
6296     dropDanglingDebugInfo(Variable, Expression);
6297 
6298     if (visitEntryValueDbgValue(DI))
6299       return;
6300 
6301     if (DI.isKillLocation()) {
6302       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6303       return;
6304     }
6305 
6306     SmallVector<Value *, 4> Values(DI.getValues());
6307     if (Values.empty())
6308       return;
6309 
6310     bool IsVariadic = DI.hasArgList();
6311     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6312                           SDNodeOrder, IsVariadic))
6313       addDanglingDebugInfo(&DI, SDNodeOrder);
6314     return;
6315   }
6316 
6317   case Intrinsic::eh_typeid_for: {
6318     // Find the type id for the given typeinfo.
6319     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6320     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6321     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6322     setValue(&I, Res);
6323     return;
6324   }
6325 
6326   case Intrinsic::eh_return_i32:
6327   case Intrinsic::eh_return_i64:
6328     DAG.getMachineFunction().setCallsEHReturn(true);
6329     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6330                             MVT::Other,
6331                             getControlRoot(),
6332                             getValue(I.getArgOperand(0)),
6333                             getValue(I.getArgOperand(1))));
6334     return;
6335   case Intrinsic::eh_unwind_init:
6336     DAG.getMachineFunction().setCallsUnwindInit(true);
6337     return;
6338   case Intrinsic::eh_dwarf_cfa:
6339     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6340                              TLI.getPointerTy(DAG.getDataLayout()),
6341                              getValue(I.getArgOperand(0))));
6342     return;
6343   case Intrinsic::eh_sjlj_callsite: {
6344     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6345     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6346     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6347 
6348     MMI.setCurrentCallSite(CI->getZExtValue());
6349     return;
6350   }
6351   case Intrinsic::eh_sjlj_functioncontext: {
6352     // Get and store the index of the function context.
6353     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6354     AllocaInst *FnCtx =
6355       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6356     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6357     MFI.setFunctionContextIndex(FI);
6358     return;
6359   }
6360   case Intrinsic::eh_sjlj_setjmp: {
6361     SDValue Ops[2];
6362     Ops[0] = getRoot();
6363     Ops[1] = getValue(I.getArgOperand(0));
6364     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6365                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6366     setValue(&I, Op.getValue(0));
6367     DAG.setRoot(Op.getValue(1));
6368     return;
6369   }
6370   case Intrinsic::eh_sjlj_longjmp:
6371     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6372                             getRoot(), getValue(I.getArgOperand(0))));
6373     return;
6374   case Intrinsic::eh_sjlj_setup_dispatch:
6375     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6376                             getRoot()));
6377     return;
6378   case Intrinsic::masked_gather:
6379     visitMaskedGather(I);
6380     return;
6381   case Intrinsic::masked_load:
6382     visitMaskedLoad(I);
6383     return;
6384   case Intrinsic::masked_scatter:
6385     visitMaskedScatter(I);
6386     return;
6387   case Intrinsic::masked_store:
6388     visitMaskedStore(I);
6389     return;
6390   case Intrinsic::masked_expandload:
6391     visitMaskedLoad(I, true /* IsExpanding */);
6392     return;
6393   case Intrinsic::masked_compressstore:
6394     visitMaskedStore(I, true /* IsCompressing */);
6395     return;
6396   case Intrinsic::powi:
6397     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6398                             getValue(I.getArgOperand(1)), DAG));
6399     return;
6400   case Intrinsic::log:
6401     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6402     return;
6403   case Intrinsic::log2:
6404     setValue(&I,
6405              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6406     return;
6407   case Intrinsic::log10:
6408     setValue(&I,
6409              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6410     return;
6411   case Intrinsic::exp:
6412     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6413     return;
6414   case Intrinsic::exp2:
6415     setValue(&I,
6416              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6417     return;
6418   case Intrinsic::pow:
6419     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6420                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6421     return;
6422   case Intrinsic::sqrt:
6423   case Intrinsic::fabs:
6424   case Intrinsic::sin:
6425   case Intrinsic::cos:
6426   case Intrinsic::exp10:
6427   case Intrinsic::floor:
6428   case Intrinsic::ceil:
6429   case Intrinsic::trunc:
6430   case Intrinsic::rint:
6431   case Intrinsic::nearbyint:
6432   case Intrinsic::round:
6433   case Intrinsic::roundeven:
6434   case Intrinsic::canonicalize: {
6435     unsigned Opcode;
6436     switch (Intrinsic) {
6437     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6438     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6439     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6440     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6441     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6442     case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6443     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6444     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6445     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6446     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6447     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6448     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6449     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6450     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6451     }
6452 
6453     setValue(&I, DAG.getNode(Opcode, sdl,
6454                              getValue(I.getArgOperand(0)).getValueType(),
6455                              getValue(I.getArgOperand(0)), Flags));
6456     return;
6457   }
6458   case Intrinsic::lround:
6459   case Intrinsic::llround:
6460   case Intrinsic::lrint:
6461   case Intrinsic::llrint: {
6462     unsigned Opcode;
6463     switch (Intrinsic) {
6464     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6465     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6466     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6467     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6468     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6469     }
6470 
6471     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6472     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6473                              getValue(I.getArgOperand(0))));
6474     return;
6475   }
6476   case Intrinsic::minnum:
6477     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6478                              getValue(I.getArgOperand(0)).getValueType(),
6479                              getValue(I.getArgOperand(0)),
6480                              getValue(I.getArgOperand(1)), Flags));
6481     return;
6482   case Intrinsic::maxnum:
6483     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6484                              getValue(I.getArgOperand(0)).getValueType(),
6485                              getValue(I.getArgOperand(0)),
6486                              getValue(I.getArgOperand(1)), Flags));
6487     return;
6488   case Intrinsic::minimum:
6489     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6490                              getValue(I.getArgOperand(0)).getValueType(),
6491                              getValue(I.getArgOperand(0)),
6492                              getValue(I.getArgOperand(1)), Flags));
6493     return;
6494   case Intrinsic::maximum:
6495     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6496                              getValue(I.getArgOperand(0)).getValueType(),
6497                              getValue(I.getArgOperand(0)),
6498                              getValue(I.getArgOperand(1)), Flags));
6499     return;
6500   case Intrinsic::copysign:
6501     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6502                              getValue(I.getArgOperand(0)).getValueType(),
6503                              getValue(I.getArgOperand(0)),
6504                              getValue(I.getArgOperand(1)), Flags));
6505     return;
6506   case Intrinsic::ldexp:
6507     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6508                              getValue(I.getArgOperand(0)).getValueType(),
6509                              getValue(I.getArgOperand(0)),
6510                              getValue(I.getArgOperand(1)), Flags));
6511     return;
6512   case Intrinsic::frexp: {
6513     SmallVector<EVT, 2> ValueVTs;
6514     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6515     SDVTList VTs = DAG.getVTList(ValueVTs);
6516     setValue(&I,
6517              DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6518     return;
6519   }
6520   case Intrinsic::arithmetic_fence: {
6521     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6522                              getValue(I.getArgOperand(0)).getValueType(),
6523                              getValue(I.getArgOperand(0)), Flags));
6524     return;
6525   }
6526   case Intrinsic::fma:
6527     setValue(&I, DAG.getNode(
6528                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6529                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6530                      getValue(I.getArgOperand(2)), Flags));
6531     return;
6532 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6533   case Intrinsic::INTRINSIC:
6534 #include "llvm/IR/ConstrainedOps.def"
6535     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6536     return;
6537 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6538 #include "llvm/IR/VPIntrinsics.def"
6539     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6540     return;
6541   case Intrinsic::fptrunc_round: {
6542     // Get the last argument, the metadata and convert it to an integer in the
6543     // call
6544     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6545     std::optional<RoundingMode> RoundMode =
6546         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6547 
6548     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6549 
6550     // Propagate fast-math-flags from IR to node(s).
6551     SDNodeFlags Flags;
6552     Flags.copyFMF(*cast<FPMathOperator>(&I));
6553     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6554 
6555     SDValue Result;
6556     Result = DAG.getNode(
6557         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6558         DAG.getTargetConstant((int)*RoundMode, sdl,
6559                               TLI.getPointerTy(DAG.getDataLayout())));
6560     setValue(&I, Result);
6561 
6562     return;
6563   }
6564   case Intrinsic::fmuladd: {
6565     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6566     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6567         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6568       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6569                                getValue(I.getArgOperand(0)).getValueType(),
6570                                getValue(I.getArgOperand(0)),
6571                                getValue(I.getArgOperand(1)),
6572                                getValue(I.getArgOperand(2)), Flags));
6573     } else {
6574       // TODO: Intrinsic calls should have fast-math-flags.
6575       SDValue Mul = DAG.getNode(
6576           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6577           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6578       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6579                                 getValue(I.getArgOperand(0)).getValueType(),
6580                                 Mul, getValue(I.getArgOperand(2)), Flags);
6581       setValue(&I, Add);
6582     }
6583     return;
6584   }
6585   case Intrinsic::convert_to_fp16:
6586     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6587                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6588                                          getValue(I.getArgOperand(0)),
6589                                          DAG.getTargetConstant(0, sdl,
6590                                                                MVT::i32))));
6591     return;
6592   case Intrinsic::convert_from_fp16:
6593     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6594                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6595                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6596                                          getValue(I.getArgOperand(0)))));
6597     return;
6598   case Intrinsic::fptosi_sat: {
6599     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6600     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6601                              getValue(I.getArgOperand(0)),
6602                              DAG.getValueType(VT.getScalarType())));
6603     return;
6604   }
6605   case Intrinsic::fptoui_sat: {
6606     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6607     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6608                              getValue(I.getArgOperand(0)),
6609                              DAG.getValueType(VT.getScalarType())));
6610     return;
6611   }
6612   case Intrinsic::set_rounding:
6613     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6614                       {getRoot(), getValue(I.getArgOperand(0))});
6615     setValue(&I, Res);
6616     DAG.setRoot(Res.getValue(0));
6617     return;
6618   case Intrinsic::is_fpclass: {
6619     const DataLayout DLayout = DAG.getDataLayout();
6620     EVT DestVT = TLI.getValueType(DLayout, I.getType());
6621     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6622     FPClassTest Test = static_cast<FPClassTest>(
6623         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6624     MachineFunction &MF = DAG.getMachineFunction();
6625     const Function &F = MF.getFunction();
6626     SDValue Op = getValue(I.getArgOperand(0));
6627     SDNodeFlags Flags;
6628     Flags.setNoFPExcept(
6629         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6630     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6631     // expansion can use illegal types. Making expansion early allows
6632     // legalizing these types prior to selection.
6633     if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6634       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6635       setValue(&I, Result);
6636       return;
6637     }
6638 
6639     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6640     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6641     setValue(&I, V);
6642     return;
6643   }
6644   case Intrinsic::get_fpenv: {
6645     const DataLayout DLayout = DAG.getDataLayout();
6646     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6647     Align TempAlign = DAG.getEVTAlign(EnvVT);
6648     SDValue Chain = getRoot();
6649     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6650     // and temporary storage in stack.
6651     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6652       Res = DAG.getNode(
6653           ISD::GET_FPENV, sdl,
6654           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6655                         MVT::Other),
6656           Chain);
6657     } else {
6658       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6659       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6660       auto MPI =
6661           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6662       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6663           MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6664           TempAlign);
6665       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6666       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6667     }
6668     setValue(&I, Res);
6669     DAG.setRoot(Res.getValue(1));
6670     return;
6671   }
6672   case Intrinsic::set_fpenv: {
6673     const DataLayout DLayout = DAG.getDataLayout();
6674     SDValue Env = getValue(I.getArgOperand(0));
6675     EVT EnvVT = Env.getValueType();
6676     Align TempAlign = DAG.getEVTAlign(EnvVT);
6677     SDValue Chain = getRoot();
6678     // If SET_FPENV is custom or legal, use it. Otherwise use loading
6679     // environment from memory.
6680     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6681       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6682     } else {
6683       // Allocate space in stack, copy environment bits into it and use this
6684       // memory in SET_FPENV_MEM.
6685       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6686       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6687       auto MPI =
6688           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6689       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6690                            MachineMemOperand::MOStore);
6691       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6692           MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6693           TempAlign);
6694       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6695     }
6696     DAG.setRoot(Chain);
6697     return;
6698   }
6699   case Intrinsic::reset_fpenv:
6700     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6701     return;
6702   case Intrinsic::get_fpmode:
6703     Res = DAG.getNode(
6704         ISD::GET_FPMODE, sdl,
6705         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6706                       MVT::Other),
6707         DAG.getRoot());
6708     setValue(&I, Res);
6709     DAG.setRoot(Res.getValue(1));
6710     return;
6711   case Intrinsic::set_fpmode:
6712     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6713                       getValue(I.getArgOperand(0)));
6714     DAG.setRoot(Res);
6715     return;
6716   case Intrinsic::reset_fpmode: {
6717     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6718     DAG.setRoot(Res);
6719     return;
6720   }
6721   case Intrinsic::pcmarker: {
6722     SDValue Tmp = getValue(I.getArgOperand(0));
6723     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6724     return;
6725   }
6726   case Intrinsic::readcyclecounter: {
6727     SDValue Op = getRoot();
6728     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6729                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6730     setValue(&I, Res);
6731     DAG.setRoot(Res.getValue(1));
6732     return;
6733   }
6734   case Intrinsic::bitreverse:
6735     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6736                              getValue(I.getArgOperand(0)).getValueType(),
6737                              getValue(I.getArgOperand(0))));
6738     return;
6739   case Intrinsic::bswap:
6740     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6741                              getValue(I.getArgOperand(0)).getValueType(),
6742                              getValue(I.getArgOperand(0))));
6743     return;
6744   case Intrinsic::cttz: {
6745     SDValue Arg = getValue(I.getArgOperand(0));
6746     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6747     EVT Ty = Arg.getValueType();
6748     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6749                              sdl, Ty, Arg));
6750     return;
6751   }
6752   case Intrinsic::ctlz: {
6753     SDValue Arg = getValue(I.getArgOperand(0));
6754     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6755     EVT Ty = Arg.getValueType();
6756     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6757                              sdl, Ty, Arg));
6758     return;
6759   }
6760   case Intrinsic::ctpop: {
6761     SDValue Arg = getValue(I.getArgOperand(0));
6762     EVT Ty = Arg.getValueType();
6763     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6764     return;
6765   }
6766   case Intrinsic::fshl:
6767   case Intrinsic::fshr: {
6768     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6769     SDValue X = getValue(I.getArgOperand(0));
6770     SDValue Y = getValue(I.getArgOperand(1));
6771     SDValue Z = getValue(I.getArgOperand(2));
6772     EVT VT = X.getValueType();
6773 
6774     if (X == Y) {
6775       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6776       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6777     } else {
6778       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6779       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6780     }
6781     return;
6782   }
6783   case Intrinsic::sadd_sat: {
6784     SDValue Op1 = getValue(I.getArgOperand(0));
6785     SDValue Op2 = getValue(I.getArgOperand(1));
6786     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6787     return;
6788   }
6789   case Intrinsic::uadd_sat: {
6790     SDValue Op1 = getValue(I.getArgOperand(0));
6791     SDValue Op2 = getValue(I.getArgOperand(1));
6792     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6793     return;
6794   }
6795   case Intrinsic::ssub_sat: {
6796     SDValue Op1 = getValue(I.getArgOperand(0));
6797     SDValue Op2 = getValue(I.getArgOperand(1));
6798     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6799     return;
6800   }
6801   case Intrinsic::usub_sat: {
6802     SDValue Op1 = getValue(I.getArgOperand(0));
6803     SDValue Op2 = getValue(I.getArgOperand(1));
6804     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6805     return;
6806   }
6807   case Intrinsic::sshl_sat: {
6808     SDValue Op1 = getValue(I.getArgOperand(0));
6809     SDValue Op2 = getValue(I.getArgOperand(1));
6810     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6811     return;
6812   }
6813   case Intrinsic::ushl_sat: {
6814     SDValue Op1 = getValue(I.getArgOperand(0));
6815     SDValue Op2 = getValue(I.getArgOperand(1));
6816     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6817     return;
6818   }
6819   case Intrinsic::smul_fix:
6820   case Intrinsic::umul_fix:
6821   case Intrinsic::smul_fix_sat:
6822   case Intrinsic::umul_fix_sat: {
6823     SDValue Op1 = getValue(I.getArgOperand(0));
6824     SDValue Op2 = getValue(I.getArgOperand(1));
6825     SDValue Op3 = getValue(I.getArgOperand(2));
6826     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6827                              Op1.getValueType(), Op1, Op2, Op3));
6828     return;
6829   }
6830   case Intrinsic::sdiv_fix:
6831   case Intrinsic::udiv_fix:
6832   case Intrinsic::sdiv_fix_sat:
6833   case Intrinsic::udiv_fix_sat: {
6834     SDValue Op1 = getValue(I.getArgOperand(0));
6835     SDValue Op2 = getValue(I.getArgOperand(1));
6836     SDValue Op3 = getValue(I.getArgOperand(2));
6837     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6838                               Op1, Op2, Op3, DAG, TLI));
6839     return;
6840   }
6841   case Intrinsic::smax: {
6842     SDValue Op1 = getValue(I.getArgOperand(0));
6843     SDValue Op2 = getValue(I.getArgOperand(1));
6844     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6845     return;
6846   }
6847   case Intrinsic::smin: {
6848     SDValue Op1 = getValue(I.getArgOperand(0));
6849     SDValue Op2 = getValue(I.getArgOperand(1));
6850     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6851     return;
6852   }
6853   case Intrinsic::umax: {
6854     SDValue Op1 = getValue(I.getArgOperand(0));
6855     SDValue Op2 = getValue(I.getArgOperand(1));
6856     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6857     return;
6858   }
6859   case Intrinsic::umin: {
6860     SDValue Op1 = getValue(I.getArgOperand(0));
6861     SDValue Op2 = getValue(I.getArgOperand(1));
6862     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6863     return;
6864   }
6865   case Intrinsic::abs: {
6866     // TODO: Preserve "int min is poison" arg in SDAG?
6867     SDValue Op1 = getValue(I.getArgOperand(0));
6868     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6869     return;
6870   }
6871   case Intrinsic::stacksave: {
6872     SDValue Op = getRoot();
6873     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6874     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6875     setValue(&I, Res);
6876     DAG.setRoot(Res.getValue(1));
6877     return;
6878   }
6879   case Intrinsic::stackrestore:
6880     Res = getValue(I.getArgOperand(0));
6881     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6882     return;
6883   case Intrinsic::get_dynamic_area_offset: {
6884     SDValue Op = getRoot();
6885     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6886     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6887     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6888     // target.
6889     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6890       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6891                          " intrinsic!");
6892     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6893                       Op);
6894     DAG.setRoot(Op);
6895     setValue(&I, Res);
6896     return;
6897   }
6898   case Intrinsic::stackguard: {
6899     MachineFunction &MF = DAG.getMachineFunction();
6900     const Module &M = *MF.getFunction().getParent();
6901     SDValue Chain = getRoot();
6902     if (TLI.useLoadStackGuardNode()) {
6903       Res = getLoadStackGuard(DAG, sdl, Chain);
6904     } else {
6905       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6906       const Value *Global = TLI.getSDagStackGuard(M);
6907       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6908       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6909                         MachinePointerInfo(Global, 0), Align,
6910                         MachineMemOperand::MOVolatile);
6911     }
6912     if (TLI.useStackGuardXorFP())
6913       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6914     DAG.setRoot(Chain);
6915     setValue(&I, Res);
6916     return;
6917   }
6918   case Intrinsic::stackprotector: {
6919     // Emit code into the DAG to store the stack guard onto the stack.
6920     MachineFunction &MF = DAG.getMachineFunction();
6921     MachineFrameInfo &MFI = MF.getFrameInfo();
6922     SDValue Src, Chain = getRoot();
6923 
6924     if (TLI.useLoadStackGuardNode())
6925       Src = getLoadStackGuard(DAG, sdl, Chain);
6926     else
6927       Src = getValue(I.getArgOperand(0));   // The guard's value.
6928 
6929     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6930 
6931     int FI = FuncInfo.StaticAllocaMap[Slot];
6932     MFI.setStackProtectorIndex(FI);
6933     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6934 
6935     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6936 
6937     // Store the stack protector onto the stack.
6938     Res = DAG.getStore(
6939         Chain, sdl, Src, FIN,
6940         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6941         MaybeAlign(), MachineMemOperand::MOVolatile);
6942     setValue(&I, Res);
6943     DAG.setRoot(Res);
6944     return;
6945   }
6946   case Intrinsic::objectsize:
6947     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6948 
6949   case Intrinsic::is_constant:
6950     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6951 
6952   case Intrinsic::annotation:
6953   case Intrinsic::ptr_annotation:
6954   case Intrinsic::launder_invariant_group:
6955   case Intrinsic::strip_invariant_group:
6956     // Drop the intrinsic, but forward the value
6957     setValue(&I, getValue(I.getOperand(0)));
6958     return;
6959 
6960   case Intrinsic::assume:
6961   case Intrinsic::experimental_noalias_scope_decl:
6962   case Intrinsic::var_annotation:
6963   case Intrinsic::sideeffect:
6964     // Discard annotate attributes, noalias scope declarations, assumptions, and
6965     // artificial side-effects.
6966     return;
6967 
6968   case Intrinsic::codeview_annotation: {
6969     // Emit a label associated with this metadata.
6970     MachineFunction &MF = DAG.getMachineFunction();
6971     MCSymbol *Label =
6972         MF.getMMI().getContext().createTempSymbol("annotation", true);
6973     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6974     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6975     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6976     DAG.setRoot(Res);
6977     return;
6978   }
6979 
6980   case Intrinsic::init_trampoline: {
6981     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6982 
6983     SDValue Ops[6];
6984     Ops[0] = getRoot();
6985     Ops[1] = getValue(I.getArgOperand(0));
6986     Ops[2] = getValue(I.getArgOperand(1));
6987     Ops[3] = getValue(I.getArgOperand(2));
6988     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6989     Ops[5] = DAG.getSrcValue(F);
6990 
6991     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6992 
6993     DAG.setRoot(Res);
6994     return;
6995   }
6996   case Intrinsic::adjust_trampoline:
6997     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6998                              TLI.getPointerTy(DAG.getDataLayout()),
6999                              getValue(I.getArgOperand(0))));
7000     return;
7001   case Intrinsic::gcroot: {
7002     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7003            "only valid in functions with gc specified, enforced by Verifier");
7004     assert(GFI && "implied by previous");
7005     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7006     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7007 
7008     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7009     GFI->addStackRoot(FI->getIndex(), TypeMap);
7010     return;
7011   }
7012   case Intrinsic::gcread:
7013   case Intrinsic::gcwrite:
7014     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7015   case Intrinsic::get_rounding:
7016     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7017     setValue(&I, Res);
7018     DAG.setRoot(Res.getValue(1));
7019     return;
7020 
7021   case Intrinsic::expect:
7022     // Just replace __builtin_expect(exp, c) with EXP.
7023     setValue(&I, getValue(I.getArgOperand(0)));
7024     return;
7025 
7026   case Intrinsic::ubsantrap:
7027   case Intrinsic::debugtrap:
7028   case Intrinsic::trap: {
7029     StringRef TrapFuncName =
7030         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7031     if (TrapFuncName.empty()) {
7032       switch (Intrinsic) {
7033       case Intrinsic::trap:
7034         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7035         break;
7036       case Intrinsic::debugtrap:
7037         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7038         break;
7039       case Intrinsic::ubsantrap:
7040         DAG.setRoot(DAG.getNode(
7041             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7042             DAG.getTargetConstant(
7043                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7044                 MVT::i32)));
7045         break;
7046       default: llvm_unreachable("unknown trap intrinsic");
7047       }
7048       return;
7049     }
7050     TargetLowering::ArgListTy Args;
7051     if (Intrinsic == Intrinsic::ubsantrap) {
7052       Args.push_back(TargetLoweringBase::ArgListEntry());
7053       Args[0].Val = I.getArgOperand(0);
7054       Args[0].Node = getValue(Args[0].Val);
7055       Args[0].Ty = Args[0].Val->getType();
7056     }
7057 
7058     TargetLowering::CallLoweringInfo CLI(DAG);
7059     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7060         CallingConv::C, I.getType(),
7061         DAG.getExternalSymbol(TrapFuncName.data(),
7062                               TLI.getPointerTy(DAG.getDataLayout())),
7063         std::move(Args));
7064 
7065     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7066     DAG.setRoot(Result.second);
7067     return;
7068   }
7069 
7070   case Intrinsic::uadd_with_overflow:
7071   case Intrinsic::sadd_with_overflow:
7072   case Intrinsic::usub_with_overflow:
7073   case Intrinsic::ssub_with_overflow:
7074   case Intrinsic::umul_with_overflow:
7075   case Intrinsic::smul_with_overflow: {
7076     ISD::NodeType Op;
7077     switch (Intrinsic) {
7078     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7079     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7080     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7081     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7082     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7083     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7084     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7085     }
7086     SDValue Op1 = getValue(I.getArgOperand(0));
7087     SDValue Op2 = getValue(I.getArgOperand(1));
7088 
7089     EVT ResultVT = Op1.getValueType();
7090     EVT OverflowVT = MVT::i1;
7091     if (ResultVT.isVector())
7092       OverflowVT = EVT::getVectorVT(
7093           *Context, OverflowVT, ResultVT.getVectorElementCount());
7094 
7095     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7096     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7097     return;
7098   }
7099   case Intrinsic::prefetch: {
7100     SDValue Ops[5];
7101     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7102     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7103     Ops[0] = DAG.getRoot();
7104     Ops[1] = getValue(I.getArgOperand(0));
7105     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7106                                    MVT::i32);
7107     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7108                                    MVT::i32);
7109     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7110                                    MVT::i32);
7111     SDValue Result = DAG.getMemIntrinsicNode(
7112         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7113         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7114         /* align */ std::nullopt, Flags);
7115 
7116     // Chain the prefetch in parallell with any pending loads, to stay out of
7117     // the way of later optimizations.
7118     PendingLoads.push_back(Result);
7119     Result = getRoot();
7120     DAG.setRoot(Result);
7121     return;
7122   }
7123   case Intrinsic::lifetime_start:
7124   case Intrinsic::lifetime_end: {
7125     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7126     // Stack coloring is not enabled in O0, discard region information.
7127     if (TM.getOptLevel() == CodeGenOptLevel::None)
7128       return;
7129 
7130     const int64_t ObjectSize =
7131         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7132     Value *const ObjectPtr = I.getArgOperand(1);
7133     SmallVector<const Value *, 4> Allocas;
7134     getUnderlyingObjects(ObjectPtr, Allocas);
7135 
7136     for (const Value *Alloca : Allocas) {
7137       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7138 
7139       // Could not find an Alloca.
7140       if (!LifetimeObject)
7141         continue;
7142 
7143       // First check that the Alloca is static, otherwise it won't have a
7144       // valid frame index.
7145       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7146       if (SI == FuncInfo.StaticAllocaMap.end())
7147         return;
7148 
7149       const int FrameIndex = SI->second;
7150       int64_t Offset;
7151       if (GetPointerBaseWithConstantOffset(
7152               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7153         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7154       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7155                                 Offset);
7156       DAG.setRoot(Res);
7157     }
7158     return;
7159   }
7160   case Intrinsic::pseudoprobe: {
7161     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7162     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7163     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7164     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7165     DAG.setRoot(Res);
7166     return;
7167   }
7168   case Intrinsic::invariant_start:
7169     // Discard region information.
7170     setValue(&I,
7171              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7172     return;
7173   case Intrinsic::invariant_end:
7174     // Discard region information.
7175     return;
7176   case Intrinsic::clear_cache:
7177     /// FunctionName may be null.
7178     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7179       lowerCallToExternalSymbol(I, FunctionName);
7180     return;
7181   case Intrinsic::donothing:
7182   case Intrinsic::seh_try_begin:
7183   case Intrinsic::seh_scope_begin:
7184   case Intrinsic::seh_try_end:
7185   case Intrinsic::seh_scope_end:
7186     // ignore
7187     return;
7188   case Intrinsic::experimental_stackmap:
7189     visitStackmap(I);
7190     return;
7191   case Intrinsic::experimental_patchpoint_void:
7192   case Intrinsic::experimental_patchpoint_i64:
7193     visitPatchpoint(I);
7194     return;
7195   case Intrinsic::experimental_gc_statepoint:
7196     LowerStatepoint(cast<GCStatepointInst>(I));
7197     return;
7198   case Intrinsic::experimental_gc_result:
7199     visitGCResult(cast<GCResultInst>(I));
7200     return;
7201   case Intrinsic::experimental_gc_relocate:
7202     visitGCRelocate(cast<GCRelocateInst>(I));
7203     return;
7204   case Intrinsic::instrprof_cover:
7205     llvm_unreachable("instrprof failed to lower a cover");
7206   case Intrinsic::instrprof_increment:
7207     llvm_unreachable("instrprof failed to lower an increment");
7208   case Intrinsic::instrprof_timestamp:
7209     llvm_unreachable("instrprof failed to lower a timestamp");
7210   case Intrinsic::instrprof_value_profile:
7211     llvm_unreachable("instrprof failed to lower a value profiling call");
7212   case Intrinsic::instrprof_mcdc_parameters:
7213     llvm_unreachable("instrprof failed to lower mcdc parameters");
7214   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7215     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7216   case Intrinsic::instrprof_mcdc_condbitmap_update:
7217     llvm_unreachable("instrprof failed to lower an mcdc condbitmap update");
7218   case Intrinsic::localescape: {
7219     MachineFunction &MF = DAG.getMachineFunction();
7220     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7221 
7222     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7223     // is the same on all targets.
7224     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7225       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7226       if (isa<ConstantPointerNull>(Arg))
7227         continue; // Skip null pointers. They represent a hole in index space.
7228       AllocaInst *Slot = cast<AllocaInst>(Arg);
7229       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7230              "can only escape static allocas");
7231       int FI = FuncInfo.StaticAllocaMap[Slot];
7232       MCSymbol *FrameAllocSym =
7233           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7234               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7235       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7236               TII->get(TargetOpcode::LOCAL_ESCAPE))
7237           .addSym(FrameAllocSym)
7238           .addFrameIndex(FI);
7239     }
7240 
7241     return;
7242   }
7243 
7244   case Intrinsic::localrecover: {
7245     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7246     MachineFunction &MF = DAG.getMachineFunction();
7247 
7248     // Get the symbol that defines the frame offset.
7249     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7250     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7251     unsigned IdxVal =
7252         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7253     MCSymbol *FrameAllocSym =
7254         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7255             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7256 
7257     Value *FP = I.getArgOperand(1);
7258     SDValue FPVal = getValue(FP);
7259     EVT PtrVT = FPVal.getValueType();
7260 
7261     // Create a MCSymbol for the label to avoid any target lowering
7262     // that would make this PC relative.
7263     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7264     SDValue OffsetVal =
7265         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7266 
7267     // Add the offset to the FP.
7268     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7269     setValue(&I, Add);
7270 
7271     return;
7272   }
7273 
7274   case Intrinsic::eh_exceptionpointer:
7275   case Intrinsic::eh_exceptioncode: {
7276     // Get the exception pointer vreg, copy from it, and resize it to fit.
7277     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7278     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7279     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7280     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7281     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7282     if (Intrinsic == Intrinsic::eh_exceptioncode)
7283       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7284     setValue(&I, N);
7285     return;
7286   }
7287   case Intrinsic::xray_customevent: {
7288     // Here we want to make sure that the intrinsic behaves as if it has a
7289     // specific calling convention.
7290     const auto &Triple = DAG.getTarget().getTargetTriple();
7291     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7292       return;
7293 
7294     SmallVector<SDValue, 8> Ops;
7295 
7296     // We want to say that we always want the arguments in registers.
7297     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7298     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7299     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7300     SDValue Chain = getRoot();
7301     Ops.push_back(LogEntryVal);
7302     Ops.push_back(StrSizeVal);
7303     Ops.push_back(Chain);
7304 
7305     // We need to enforce the calling convention for the callsite, so that
7306     // argument ordering is enforced correctly, and that register allocation can
7307     // see that some registers may be assumed clobbered and have to preserve
7308     // them across calls to the intrinsic.
7309     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7310                                            sdl, NodeTys, Ops);
7311     SDValue patchableNode = SDValue(MN, 0);
7312     DAG.setRoot(patchableNode);
7313     setValue(&I, patchableNode);
7314     return;
7315   }
7316   case Intrinsic::xray_typedevent: {
7317     // Here we want to make sure that the intrinsic behaves as if it has a
7318     // specific calling convention.
7319     const auto &Triple = DAG.getTarget().getTargetTriple();
7320     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7321       return;
7322 
7323     SmallVector<SDValue, 8> Ops;
7324 
7325     // We want to say that we always want the arguments in registers.
7326     // It's unclear to me how manipulating the selection DAG here forces callers
7327     // to provide arguments in registers instead of on the stack.
7328     SDValue LogTypeId = getValue(I.getArgOperand(0));
7329     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7330     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7331     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7332     SDValue Chain = getRoot();
7333     Ops.push_back(LogTypeId);
7334     Ops.push_back(LogEntryVal);
7335     Ops.push_back(StrSizeVal);
7336     Ops.push_back(Chain);
7337 
7338     // We need to enforce the calling convention for the callsite, so that
7339     // argument ordering is enforced correctly, and that register allocation can
7340     // see that some registers may be assumed clobbered and have to preserve
7341     // them across calls to the intrinsic.
7342     MachineSDNode *MN = DAG.getMachineNode(
7343         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7344     SDValue patchableNode = SDValue(MN, 0);
7345     DAG.setRoot(patchableNode);
7346     setValue(&I, patchableNode);
7347     return;
7348   }
7349   case Intrinsic::experimental_deoptimize:
7350     LowerDeoptimizeCall(&I);
7351     return;
7352   case Intrinsic::experimental_stepvector:
7353     visitStepVector(I);
7354     return;
7355   case Intrinsic::vector_reduce_fadd:
7356   case Intrinsic::vector_reduce_fmul:
7357   case Intrinsic::vector_reduce_add:
7358   case Intrinsic::vector_reduce_mul:
7359   case Intrinsic::vector_reduce_and:
7360   case Intrinsic::vector_reduce_or:
7361   case Intrinsic::vector_reduce_xor:
7362   case Intrinsic::vector_reduce_smax:
7363   case Intrinsic::vector_reduce_smin:
7364   case Intrinsic::vector_reduce_umax:
7365   case Intrinsic::vector_reduce_umin:
7366   case Intrinsic::vector_reduce_fmax:
7367   case Intrinsic::vector_reduce_fmin:
7368   case Intrinsic::vector_reduce_fmaximum:
7369   case Intrinsic::vector_reduce_fminimum:
7370     visitVectorReduce(I, Intrinsic);
7371     return;
7372 
7373   case Intrinsic::icall_branch_funnel: {
7374     SmallVector<SDValue, 16> Ops;
7375     Ops.push_back(getValue(I.getArgOperand(0)));
7376 
7377     int64_t Offset;
7378     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7379         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7380     if (!Base)
7381       report_fatal_error(
7382           "llvm.icall.branch.funnel operand must be a GlobalValue");
7383     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7384 
7385     struct BranchFunnelTarget {
7386       int64_t Offset;
7387       SDValue Target;
7388     };
7389     SmallVector<BranchFunnelTarget, 8> Targets;
7390 
7391     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7392       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7393           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7394       if (ElemBase != Base)
7395         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7396                            "to the same GlobalValue");
7397 
7398       SDValue Val = getValue(I.getArgOperand(Op + 1));
7399       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7400       if (!GA)
7401         report_fatal_error(
7402             "llvm.icall.branch.funnel operand must be a GlobalValue");
7403       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7404                                      GA->getGlobal(), sdl, Val.getValueType(),
7405                                      GA->getOffset())});
7406     }
7407     llvm::sort(Targets,
7408                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7409                  return T1.Offset < T2.Offset;
7410                });
7411 
7412     for (auto &T : Targets) {
7413       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7414       Ops.push_back(T.Target);
7415     }
7416 
7417     Ops.push_back(DAG.getRoot()); // Chain
7418     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7419                                  MVT::Other, Ops),
7420               0);
7421     DAG.setRoot(N);
7422     setValue(&I, N);
7423     HasTailCall = true;
7424     return;
7425   }
7426 
7427   case Intrinsic::wasm_landingpad_index:
7428     // Information this intrinsic contained has been transferred to
7429     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7430     // delete it now.
7431     return;
7432 
7433   case Intrinsic::aarch64_settag:
7434   case Intrinsic::aarch64_settag_zero: {
7435     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7436     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7437     SDValue Val = TSI.EmitTargetCodeForSetTag(
7438         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7439         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7440         ZeroMemory);
7441     DAG.setRoot(Val);
7442     setValue(&I, Val);
7443     return;
7444   }
7445   case Intrinsic::ptrmask: {
7446     SDValue Ptr = getValue(I.getOperand(0));
7447     SDValue Mask = getValue(I.getOperand(1));
7448 
7449     EVT PtrVT = Ptr.getValueType();
7450     assert(PtrVT == Mask.getValueType() &&
7451            "Pointers with different index type are not supported by SDAG");
7452     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
7453     return;
7454   }
7455   case Intrinsic::threadlocal_address: {
7456     setValue(&I, getValue(I.getOperand(0)));
7457     return;
7458   }
7459   case Intrinsic::get_active_lane_mask: {
7460     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7461     SDValue Index = getValue(I.getOperand(0));
7462     EVT ElementVT = Index.getValueType();
7463 
7464     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7465       visitTargetIntrinsic(I, Intrinsic);
7466       return;
7467     }
7468 
7469     SDValue TripCount = getValue(I.getOperand(1));
7470     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7471                                  CCVT.getVectorElementCount());
7472 
7473     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7474     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7475     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7476     SDValue VectorInduction = DAG.getNode(
7477         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7478     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7479                                  VectorTripCount, ISD::CondCode::SETULT);
7480     setValue(&I, SetCC);
7481     return;
7482   }
7483   case Intrinsic::experimental_get_vector_length: {
7484     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7485            "Expected positive VF");
7486     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7487     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7488 
7489     SDValue Count = getValue(I.getOperand(0));
7490     EVT CountVT = Count.getValueType();
7491 
7492     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7493       visitTargetIntrinsic(I, Intrinsic);
7494       return;
7495     }
7496 
7497     // Expand to a umin between the trip count and the maximum elements the type
7498     // can hold.
7499     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7500 
7501     // Extend the trip count to at least the result VT.
7502     if (CountVT.bitsLT(VT)) {
7503       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7504       CountVT = VT;
7505     }
7506 
7507     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7508                                          ElementCount::get(VF, IsScalable));
7509 
7510     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7511     // Clip to the result type if needed.
7512     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7513 
7514     setValue(&I, Trunc);
7515     return;
7516   }
7517   case Intrinsic::vector_insert: {
7518     SDValue Vec = getValue(I.getOperand(0));
7519     SDValue SubVec = getValue(I.getOperand(1));
7520     SDValue Index = getValue(I.getOperand(2));
7521 
7522     // The intrinsic's index type is i64, but the SDNode requires an index type
7523     // suitable for the target. Convert the index as required.
7524     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7525     if (Index.getValueType() != VectorIdxTy)
7526       Index = DAG.getVectorIdxConstant(
7527           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7528 
7529     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7530     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7531                              Index));
7532     return;
7533   }
7534   case Intrinsic::vector_extract: {
7535     SDValue Vec = getValue(I.getOperand(0));
7536     SDValue Index = getValue(I.getOperand(1));
7537     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7538 
7539     // The intrinsic's index type is i64, but the SDNode requires an index type
7540     // suitable for the target. Convert the index as required.
7541     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7542     if (Index.getValueType() != VectorIdxTy)
7543       Index = DAG.getVectorIdxConstant(
7544           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7545 
7546     setValue(&I,
7547              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7548     return;
7549   }
7550   case Intrinsic::experimental_vector_reverse:
7551     visitVectorReverse(I);
7552     return;
7553   case Intrinsic::experimental_vector_splice:
7554     visitVectorSplice(I);
7555     return;
7556   case Intrinsic::callbr_landingpad:
7557     visitCallBrLandingPad(I);
7558     return;
7559   case Intrinsic::experimental_vector_interleave2:
7560     visitVectorInterleave(I);
7561     return;
7562   case Intrinsic::experimental_vector_deinterleave2:
7563     visitVectorDeinterleave(I);
7564     return;
7565   }
7566 }
7567 
7568 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7569     const ConstrainedFPIntrinsic &FPI) {
7570   SDLoc sdl = getCurSDLoc();
7571 
7572   // We do not need to serialize constrained FP intrinsics against
7573   // each other or against (nonvolatile) loads, so they can be
7574   // chained like loads.
7575   SDValue Chain = DAG.getRoot();
7576   SmallVector<SDValue, 4> Opers;
7577   Opers.push_back(Chain);
7578   if (FPI.isUnaryOp()) {
7579     Opers.push_back(getValue(FPI.getArgOperand(0)));
7580   } else if (FPI.isTernaryOp()) {
7581     Opers.push_back(getValue(FPI.getArgOperand(0)));
7582     Opers.push_back(getValue(FPI.getArgOperand(1)));
7583     Opers.push_back(getValue(FPI.getArgOperand(2)));
7584   } else {
7585     Opers.push_back(getValue(FPI.getArgOperand(0)));
7586     Opers.push_back(getValue(FPI.getArgOperand(1)));
7587   }
7588 
7589   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7590     assert(Result.getNode()->getNumValues() == 2);
7591 
7592     // Push node to the appropriate list so that future instructions can be
7593     // chained up correctly.
7594     SDValue OutChain = Result.getValue(1);
7595     switch (EB) {
7596     case fp::ExceptionBehavior::ebIgnore:
7597       // The only reason why ebIgnore nodes still need to be chained is that
7598       // they might depend on the current rounding mode, and therefore must
7599       // not be moved across instruction that may change that mode.
7600       [[fallthrough]];
7601     case fp::ExceptionBehavior::ebMayTrap:
7602       // These must not be moved across calls or instructions that may change
7603       // floating-point exception masks.
7604       PendingConstrainedFP.push_back(OutChain);
7605       break;
7606     case fp::ExceptionBehavior::ebStrict:
7607       // These must not be moved across calls or instructions that may change
7608       // floating-point exception masks or read floating-point exception flags.
7609       // In addition, they cannot be optimized out even if unused.
7610       PendingConstrainedFPStrict.push_back(OutChain);
7611       break;
7612     }
7613   };
7614 
7615   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7616   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7617   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7618   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7619 
7620   SDNodeFlags Flags;
7621   if (EB == fp::ExceptionBehavior::ebIgnore)
7622     Flags.setNoFPExcept(true);
7623 
7624   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7625     Flags.copyFMF(*FPOp);
7626 
7627   unsigned Opcode;
7628   switch (FPI.getIntrinsicID()) {
7629   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7630 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7631   case Intrinsic::INTRINSIC:                                                   \
7632     Opcode = ISD::STRICT_##DAGN;                                               \
7633     break;
7634 #include "llvm/IR/ConstrainedOps.def"
7635   case Intrinsic::experimental_constrained_fmuladd: {
7636     Opcode = ISD::STRICT_FMA;
7637     // Break fmuladd into fmul and fadd.
7638     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7639         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7640       Opers.pop_back();
7641       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7642       pushOutChain(Mul, EB);
7643       Opcode = ISD::STRICT_FADD;
7644       Opers.clear();
7645       Opers.push_back(Mul.getValue(1));
7646       Opers.push_back(Mul.getValue(0));
7647       Opers.push_back(getValue(FPI.getArgOperand(2)));
7648     }
7649     break;
7650   }
7651   }
7652 
7653   // A few strict DAG nodes carry additional operands that are not
7654   // set up by the default code above.
7655   switch (Opcode) {
7656   default: break;
7657   case ISD::STRICT_FP_ROUND:
7658     Opers.push_back(
7659         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7660     break;
7661   case ISD::STRICT_FSETCC:
7662   case ISD::STRICT_FSETCCS: {
7663     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7664     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7665     if (TM.Options.NoNaNsFPMath)
7666       Condition = getFCmpCodeWithoutNaN(Condition);
7667     Opers.push_back(DAG.getCondCode(Condition));
7668     break;
7669   }
7670   }
7671 
7672   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7673   pushOutChain(Result, EB);
7674 
7675   SDValue FPResult = Result.getValue(0);
7676   setValue(&FPI, FPResult);
7677 }
7678 
7679 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7680   std::optional<unsigned> ResOPC;
7681   switch (VPIntrin.getIntrinsicID()) {
7682   case Intrinsic::vp_ctlz: {
7683     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7684     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7685     break;
7686   }
7687   case Intrinsic::vp_cttz: {
7688     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7689     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7690     break;
7691   }
7692 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7693   case Intrinsic::VPID:                                                        \
7694     ResOPC = ISD::VPSD;                                                        \
7695     break;
7696 #include "llvm/IR/VPIntrinsics.def"
7697   }
7698 
7699   if (!ResOPC)
7700     llvm_unreachable(
7701         "Inconsistency: no SDNode available for this VPIntrinsic!");
7702 
7703   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7704       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7705     if (VPIntrin.getFastMathFlags().allowReassoc())
7706       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7707                                                 : ISD::VP_REDUCE_FMUL;
7708   }
7709 
7710   return *ResOPC;
7711 }
7712 
7713 void SelectionDAGBuilder::visitVPLoad(
7714     const VPIntrinsic &VPIntrin, EVT VT,
7715     const SmallVectorImpl<SDValue> &OpValues) {
7716   SDLoc DL = getCurSDLoc();
7717   Value *PtrOperand = VPIntrin.getArgOperand(0);
7718   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7719   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7720   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7721   SDValue LD;
7722   // Do not serialize variable-length loads of constant memory with
7723   // anything.
7724   if (!Alignment)
7725     Alignment = DAG.getEVTAlign(VT);
7726   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7727   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7728   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7729   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7730       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7731       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7732   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7733                      MMO, false /*IsExpanding */);
7734   if (AddToChain)
7735     PendingLoads.push_back(LD.getValue(1));
7736   setValue(&VPIntrin, LD);
7737 }
7738 
7739 void SelectionDAGBuilder::visitVPGather(
7740     const VPIntrinsic &VPIntrin, EVT VT,
7741     const SmallVectorImpl<SDValue> &OpValues) {
7742   SDLoc DL = getCurSDLoc();
7743   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7744   Value *PtrOperand = VPIntrin.getArgOperand(0);
7745   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7746   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7747   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7748   SDValue LD;
7749   if (!Alignment)
7750     Alignment = DAG.getEVTAlign(VT.getScalarType());
7751   unsigned AS =
7752     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7753   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7754      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7755      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7756   SDValue Base, Index, Scale;
7757   ISD::MemIndexType IndexType;
7758   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7759                                     this, VPIntrin.getParent(),
7760                                     VT.getScalarStoreSize());
7761   if (!UniformBase) {
7762     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7763     Index = getValue(PtrOperand);
7764     IndexType = ISD::SIGNED_SCALED;
7765     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7766   }
7767   EVT IdxVT = Index.getValueType();
7768   EVT EltTy = IdxVT.getVectorElementType();
7769   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7770     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7771     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7772   }
7773   LD = DAG.getGatherVP(
7774       DAG.getVTList(VT, MVT::Other), VT, DL,
7775       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7776       IndexType);
7777   PendingLoads.push_back(LD.getValue(1));
7778   setValue(&VPIntrin, LD);
7779 }
7780 
7781 void SelectionDAGBuilder::visitVPStore(
7782     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7783   SDLoc DL = getCurSDLoc();
7784   Value *PtrOperand = VPIntrin.getArgOperand(1);
7785   EVT VT = OpValues[0].getValueType();
7786   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7787   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7788   SDValue ST;
7789   if (!Alignment)
7790     Alignment = DAG.getEVTAlign(VT);
7791   SDValue Ptr = OpValues[1];
7792   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7793   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7794       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7795       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7796   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7797                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7798                       /* IsTruncating */ false, /*IsCompressing*/ false);
7799   DAG.setRoot(ST);
7800   setValue(&VPIntrin, ST);
7801 }
7802 
7803 void SelectionDAGBuilder::visitVPScatter(
7804     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7805   SDLoc DL = getCurSDLoc();
7806   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7807   Value *PtrOperand = VPIntrin.getArgOperand(1);
7808   EVT VT = OpValues[0].getValueType();
7809   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7810   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7811   SDValue ST;
7812   if (!Alignment)
7813     Alignment = DAG.getEVTAlign(VT.getScalarType());
7814   unsigned AS =
7815       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7816   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7817       MachinePointerInfo(AS), MachineMemOperand::MOStore,
7818       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7819   SDValue Base, Index, Scale;
7820   ISD::MemIndexType IndexType;
7821   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7822                                     this, VPIntrin.getParent(),
7823                                     VT.getScalarStoreSize());
7824   if (!UniformBase) {
7825     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7826     Index = getValue(PtrOperand);
7827     IndexType = ISD::SIGNED_SCALED;
7828     Scale =
7829       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7830   }
7831   EVT IdxVT = Index.getValueType();
7832   EVT EltTy = IdxVT.getVectorElementType();
7833   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7834     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7835     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7836   }
7837   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7838                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7839                          OpValues[2], OpValues[3]},
7840                         MMO, IndexType);
7841   DAG.setRoot(ST);
7842   setValue(&VPIntrin, ST);
7843 }
7844 
7845 void SelectionDAGBuilder::visitVPStridedLoad(
7846     const VPIntrinsic &VPIntrin, EVT VT,
7847     const SmallVectorImpl<SDValue> &OpValues) {
7848   SDLoc DL = getCurSDLoc();
7849   Value *PtrOperand = VPIntrin.getArgOperand(0);
7850   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7851   if (!Alignment)
7852     Alignment = DAG.getEVTAlign(VT.getScalarType());
7853   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7854   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7855   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7856   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7857   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7858   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7859       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7860       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7861 
7862   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
7863                                     OpValues[2], OpValues[3], MMO,
7864                                     false /*IsExpanding*/);
7865 
7866   if (AddToChain)
7867     PendingLoads.push_back(LD.getValue(1));
7868   setValue(&VPIntrin, LD);
7869 }
7870 
7871 void SelectionDAGBuilder::visitVPStridedStore(
7872     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7873   SDLoc DL = getCurSDLoc();
7874   Value *PtrOperand = VPIntrin.getArgOperand(1);
7875   EVT VT = OpValues[0].getValueType();
7876   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7877   if (!Alignment)
7878     Alignment = DAG.getEVTAlign(VT.getScalarType());
7879   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7880   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7881       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7882       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7883 
7884   SDValue ST = DAG.getStridedStoreVP(
7885       getMemoryRoot(), DL, OpValues[0], OpValues[1],
7886       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
7887       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
7888       /*IsCompressing*/ false);
7889 
7890   DAG.setRoot(ST);
7891   setValue(&VPIntrin, ST);
7892 }
7893 
7894 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
7895   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7896   SDLoc DL = getCurSDLoc();
7897 
7898   ISD::CondCode Condition;
7899   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
7900   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
7901   if (IsFP) {
7902     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
7903     // flags, but calls that don't return floating-point types can't be
7904     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
7905     Condition = getFCmpCondCode(CondCode);
7906     if (TM.Options.NoNaNsFPMath)
7907       Condition = getFCmpCodeWithoutNaN(Condition);
7908   } else {
7909     Condition = getICmpCondCode(CondCode);
7910   }
7911 
7912   SDValue Op1 = getValue(VPIntrin.getOperand(0));
7913   SDValue Op2 = getValue(VPIntrin.getOperand(1));
7914   // #2 is the condition code
7915   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
7916   SDValue EVL = getValue(VPIntrin.getOperand(4));
7917   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
7918   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
7919          "Unexpected target EVL type");
7920   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
7921 
7922   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7923                                                         VPIntrin.getType());
7924   setValue(&VPIntrin,
7925            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
7926 }
7927 
7928 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
7929     const VPIntrinsic &VPIntrin) {
7930   SDLoc DL = getCurSDLoc();
7931   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
7932 
7933   auto IID = VPIntrin.getIntrinsicID();
7934 
7935   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
7936     return visitVPCmp(*CmpI);
7937 
7938   SmallVector<EVT, 4> ValueVTs;
7939   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7940   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
7941   SDVTList VTs = DAG.getVTList(ValueVTs);
7942 
7943   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
7944 
7945   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
7946   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
7947          "Unexpected target EVL type");
7948 
7949   // Request operands.
7950   SmallVector<SDValue, 7> OpValues;
7951   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
7952     auto Op = getValue(VPIntrin.getArgOperand(I));
7953     if (I == EVLParamPos)
7954       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
7955     OpValues.push_back(Op);
7956   }
7957 
7958   switch (Opcode) {
7959   default: {
7960     SDNodeFlags SDFlags;
7961     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
7962       SDFlags.copyFMF(*FPMO);
7963     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
7964     setValue(&VPIntrin, Result);
7965     break;
7966   }
7967   case ISD::VP_LOAD:
7968     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
7969     break;
7970   case ISD::VP_GATHER:
7971     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
7972     break;
7973   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
7974     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
7975     break;
7976   case ISD::VP_STORE:
7977     visitVPStore(VPIntrin, OpValues);
7978     break;
7979   case ISD::VP_SCATTER:
7980     visitVPScatter(VPIntrin, OpValues);
7981     break;
7982   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
7983     visitVPStridedStore(VPIntrin, OpValues);
7984     break;
7985   case ISD::VP_FMULADD: {
7986     assert(OpValues.size() == 5 && "Unexpected number of operands");
7987     SDNodeFlags SDFlags;
7988     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
7989       SDFlags.copyFMF(*FPMO);
7990     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7991         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
7992       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
7993     } else {
7994       SDValue Mul = DAG.getNode(
7995           ISD::VP_FMUL, DL, VTs,
7996           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
7997       SDValue Add =
7998           DAG.getNode(ISD::VP_FADD, DL, VTs,
7999                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8000       setValue(&VPIntrin, Add);
8001     }
8002     break;
8003   }
8004   case ISD::VP_IS_FPCLASS: {
8005     const DataLayout DLayout = DAG.getDataLayout();
8006     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8007     auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
8008     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8009     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8010                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8011     setValue(&VPIntrin, V);
8012     return;
8013   }
8014   case ISD::VP_INTTOPTR: {
8015     SDValue N = OpValues[0];
8016     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8017     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8018     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8019                                OpValues[2]);
8020     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8021                              OpValues[2]);
8022     setValue(&VPIntrin, N);
8023     break;
8024   }
8025   case ISD::VP_PTRTOINT: {
8026     SDValue N = OpValues[0];
8027     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8028                                                           VPIntrin.getType());
8029     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8030                                        VPIntrin.getOperand(0)->getType());
8031     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8032                                OpValues[2]);
8033     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8034                              OpValues[2]);
8035     setValue(&VPIntrin, N);
8036     break;
8037   }
8038   case ISD::VP_ABS:
8039   case ISD::VP_CTLZ:
8040   case ISD::VP_CTLZ_ZERO_UNDEF:
8041   case ISD::VP_CTTZ:
8042   case ISD::VP_CTTZ_ZERO_UNDEF: {
8043     SDValue Result =
8044         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8045     setValue(&VPIntrin, Result);
8046     break;
8047   }
8048   }
8049 }
8050 
8051 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8052                                           const BasicBlock *EHPadBB,
8053                                           MCSymbol *&BeginLabel) {
8054   MachineFunction &MF = DAG.getMachineFunction();
8055   MachineModuleInfo &MMI = MF.getMMI();
8056 
8057   // Insert a label before the invoke call to mark the try range.  This can be
8058   // used to detect deletion of the invoke via the MachineModuleInfo.
8059   BeginLabel = MMI.getContext().createTempSymbol();
8060 
8061   // For SjLj, keep track of which landing pads go with which invokes
8062   // so as to maintain the ordering of pads in the LSDA.
8063   unsigned CallSiteIndex = MMI.getCurrentCallSite();
8064   if (CallSiteIndex) {
8065     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8066     LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8067 
8068     // Now that the call site is handled, stop tracking it.
8069     MMI.setCurrentCallSite(0);
8070   }
8071 
8072   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8073 }
8074 
8075 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8076                                         const BasicBlock *EHPadBB,
8077                                         MCSymbol *BeginLabel) {
8078   assert(BeginLabel && "BeginLabel should've been set");
8079 
8080   MachineFunction &MF = DAG.getMachineFunction();
8081   MachineModuleInfo &MMI = MF.getMMI();
8082 
8083   // Insert a label at the end of the invoke call to mark the try range.  This
8084   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8085   MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8086   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8087 
8088   // Inform MachineModuleInfo of range.
8089   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8090   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8091   // actually use outlined funclets and their LSDA info style.
8092   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8093     assert(II && "II should've been set");
8094     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8095     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8096   } else if (!isScopedEHPersonality(Pers)) {
8097     assert(EHPadBB);
8098     MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8099   }
8100 
8101   return Chain;
8102 }
8103 
8104 std::pair<SDValue, SDValue>
8105 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8106                                     const BasicBlock *EHPadBB) {
8107   MCSymbol *BeginLabel = nullptr;
8108 
8109   if (EHPadBB) {
8110     // Both PendingLoads and PendingExports must be flushed here;
8111     // this call might not return.
8112     (void)getRoot();
8113     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8114     CLI.setChain(getRoot());
8115   }
8116 
8117   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8118   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8119 
8120   assert((CLI.IsTailCall || Result.second.getNode()) &&
8121          "Non-null chain expected with non-tail call!");
8122   assert((Result.second.getNode() || !Result.first.getNode()) &&
8123          "Null value expected with tail call!");
8124 
8125   if (!Result.second.getNode()) {
8126     // As a special case, a null chain means that a tail call has been emitted
8127     // and the DAG root is already updated.
8128     HasTailCall = true;
8129 
8130     // Since there's no actual continuation from this block, nothing can be
8131     // relying on us setting vregs for them.
8132     PendingExports.clear();
8133   } else {
8134     DAG.setRoot(Result.second);
8135   }
8136 
8137   if (EHPadBB) {
8138     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8139                            BeginLabel));
8140   }
8141 
8142   return Result;
8143 }
8144 
8145 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8146                                       bool isTailCall,
8147                                       bool isMustTailCall,
8148                                       const BasicBlock *EHPadBB) {
8149   auto &DL = DAG.getDataLayout();
8150   FunctionType *FTy = CB.getFunctionType();
8151   Type *RetTy = CB.getType();
8152 
8153   TargetLowering::ArgListTy Args;
8154   Args.reserve(CB.arg_size());
8155 
8156   const Value *SwiftErrorVal = nullptr;
8157   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8158 
8159   if (isTailCall) {
8160     // Avoid emitting tail calls in functions with the disable-tail-calls
8161     // attribute.
8162     auto *Caller = CB.getParent()->getParent();
8163     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8164         "true" && !isMustTailCall)
8165       isTailCall = false;
8166 
8167     // We can't tail call inside a function with a swifterror argument. Lowering
8168     // does not support this yet. It would have to move into the swifterror
8169     // register before the call.
8170     if (TLI.supportSwiftError() &&
8171         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8172       isTailCall = false;
8173   }
8174 
8175   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8176     TargetLowering::ArgListEntry Entry;
8177     const Value *V = *I;
8178 
8179     // Skip empty types
8180     if (V->getType()->isEmptyTy())
8181       continue;
8182 
8183     SDValue ArgNode = getValue(V);
8184     Entry.Node = ArgNode; Entry.Ty = V->getType();
8185 
8186     Entry.setAttributes(&CB, I - CB.arg_begin());
8187 
8188     // Use swifterror virtual register as input to the call.
8189     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8190       SwiftErrorVal = V;
8191       // We find the virtual register for the actual swifterror argument.
8192       // Instead of using the Value, we use the virtual register instead.
8193       Entry.Node =
8194           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8195                           EVT(TLI.getPointerTy(DL)));
8196     }
8197 
8198     Args.push_back(Entry);
8199 
8200     // If we have an explicit sret argument that is an Instruction, (i.e., it
8201     // might point to function-local memory), we can't meaningfully tail-call.
8202     if (Entry.IsSRet && isa<Instruction>(V))
8203       isTailCall = false;
8204   }
8205 
8206   // If call site has a cfguardtarget operand bundle, create and add an
8207   // additional ArgListEntry.
8208   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8209     TargetLowering::ArgListEntry Entry;
8210     Value *V = Bundle->Inputs[0];
8211     SDValue ArgNode = getValue(V);
8212     Entry.Node = ArgNode;
8213     Entry.Ty = V->getType();
8214     Entry.IsCFGuardTarget = true;
8215     Args.push_back(Entry);
8216   }
8217 
8218   // Check if target-independent constraints permit a tail call here.
8219   // Target-dependent constraints are checked within TLI->LowerCallTo.
8220   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8221     isTailCall = false;
8222 
8223   // Disable tail calls if there is an swifterror argument. Targets have not
8224   // been updated to support tail calls.
8225   if (TLI.supportSwiftError() && SwiftErrorVal)
8226     isTailCall = false;
8227 
8228   ConstantInt *CFIType = nullptr;
8229   if (CB.isIndirectCall()) {
8230     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8231       if (!TLI.supportKCFIBundles())
8232         report_fatal_error(
8233             "Target doesn't support calls with kcfi operand bundles.");
8234       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8235       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8236     }
8237   }
8238 
8239   TargetLowering::CallLoweringInfo CLI(DAG);
8240   CLI.setDebugLoc(getCurSDLoc())
8241       .setChain(getRoot())
8242       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8243       .setTailCall(isTailCall)
8244       .setConvergent(CB.isConvergent())
8245       .setIsPreallocated(
8246           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8247       .setCFIType(CFIType);
8248   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8249 
8250   if (Result.first.getNode()) {
8251     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8252     setValue(&CB, Result.first);
8253   }
8254 
8255   // The last element of CLI.InVals has the SDValue for swifterror return.
8256   // Here we copy it to a virtual register and update SwiftErrorMap for
8257   // book-keeping.
8258   if (SwiftErrorVal && TLI.supportSwiftError()) {
8259     // Get the last element of InVals.
8260     SDValue Src = CLI.InVals.back();
8261     Register VReg =
8262         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8263     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8264     DAG.setRoot(CopyNode);
8265   }
8266 }
8267 
8268 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8269                              SelectionDAGBuilder &Builder) {
8270   // Check to see if this load can be trivially constant folded, e.g. if the
8271   // input is from a string literal.
8272   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8273     // Cast pointer to the type we really want to load.
8274     Type *LoadTy =
8275         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8276     if (LoadVT.isVector())
8277       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8278 
8279     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8280                                          PointerType::getUnqual(LoadTy));
8281 
8282     if (const Constant *LoadCst =
8283             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8284                                          LoadTy, Builder.DAG.getDataLayout()))
8285       return Builder.getValue(LoadCst);
8286   }
8287 
8288   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8289   // still constant memory, the input chain can be the entry node.
8290   SDValue Root;
8291   bool ConstantMemory = false;
8292 
8293   // Do not serialize (non-volatile) loads of constant memory with anything.
8294   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8295     Root = Builder.DAG.getEntryNode();
8296     ConstantMemory = true;
8297   } else {
8298     // Do not serialize non-volatile loads against each other.
8299     Root = Builder.DAG.getRoot();
8300   }
8301 
8302   SDValue Ptr = Builder.getValue(PtrVal);
8303   SDValue LoadVal =
8304       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8305                           MachinePointerInfo(PtrVal), Align(1));
8306 
8307   if (!ConstantMemory)
8308     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8309   return LoadVal;
8310 }
8311 
8312 /// Record the value for an instruction that produces an integer result,
8313 /// converting the type where necessary.
8314 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8315                                                   SDValue Value,
8316                                                   bool IsSigned) {
8317   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8318                                                     I.getType(), true);
8319   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8320   setValue(&I, Value);
8321 }
8322 
8323 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8324 /// true and lower it. Otherwise return false, and it will be lowered like a
8325 /// normal call.
8326 /// The caller already checked that \p I calls the appropriate LibFunc with a
8327 /// correct prototype.
8328 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8329   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8330   const Value *Size = I.getArgOperand(2);
8331   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8332   if (CSize && CSize->getZExtValue() == 0) {
8333     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8334                                                           I.getType(), true);
8335     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8336     return true;
8337   }
8338 
8339   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8340   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8341       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8342       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8343   if (Res.first.getNode()) {
8344     processIntegerCallValue(I, Res.first, true);
8345     PendingLoads.push_back(Res.second);
8346     return true;
8347   }
8348 
8349   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8350   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8351   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8352     return false;
8353 
8354   // If the target has a fast compare for the given size, it will return a
8355   // preferred load type for that size. Require that the load VT is legal and
8356   // that the target supports unaligned loads of that type. Otherwise, return
8357   // INVALID.
8358   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8359     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8360     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8361     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8362       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8363       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8364       // TODO: Check alignment of src and dest ptrs.
8365       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8366       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8367       if (!TLI.isTypeLegal(LVT) ||
8368           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8369           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8370         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8371     }
8372 
8373     return LVT;
8374   };
8375 
8376   // This turns into unaligned loads. We only do this if the target natively
8377   // supports the MVT we'll be loading or if it is small enough (<= 4) that
8378   // we'll only produce a small number of byte loads.
8379   MVT LoadVT;
8380   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8381   switch (NumBitsToCompare) {
8382   default:
8383     return false;
8384   case 16:
8385     LoadVT = MVT::i16;
8386     break;
8387   case 32:
8388     LoadVT = MVT::i32;
8389     break;
8390   case 64:
8391   case 128:
8392   case 256:
8393     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8394     break;
8395   }
8396 
8397   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8398     return false;
8399 
8400   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8401   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8402 
8403   // Bitcast to a wide integer type if the loads are vectors.
8404   if (LoadVT.isVector()) {
8405     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8406     LoadL = DAG.getBitcast(CmpVT, LoadL);
8407     LoadR = DAG.getBitcast(CmpVT, LoadR);
8408   }
8409 
8410   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8411   processIntegerCallValue(I, Cmp, false);
8412   return true;
8413 }
8414 
8415 /// See if we can lower a memchr call into an optimized form. If so, return
8416 /// true and lower it. Otherwise return false, and it will be lowered like a
8417 /// normal call.
8418 /// The caller already checked that \p I calls the appropriate LibFunc with a
8419 /// correct prototype.
8420 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8421   const Value *Src = I.getArgOperand(0);
8422   const Value *Char = I.getArgOperand(1);
8423   const Value *Length = I.getArgOperand(2);
8424 
8425   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8426   std::pair<SDValue, SDValue> Res =
8427     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8428                                 getValue(Src), getValue(Char), getValue(Length),
8429                                 MachinePointerInfo(Src));
8430   if (Res.first.getNode()) {
8431     setValue(&I, Res.first);
8432     PendingLoads.push_back(Res.second);
8433     return true;
8434   }
8435 
8436   return false;
8437 }
8438 
8439 /// See if we can lower a mempcpy call into an optimized form. If so, return
8440 /// true and lower it. Otherwise return false, and it will be lowered like a
8441 /// normal call.
8442 /// The caller already checked that \p I calls the appropriate LibFunc with a
8443 /// correct prototype.
8444 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8445   SDValue Dst = getValue(I.getArgOperand(0));
8446   SDValue Src = getValue(I.getArgOperand(1));
8447   SDValue Size = getValue(I.getArgOperand(2));
8448 
8449   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8450   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8451   // DAG::getMemcpy needs Alignment to be defined.
8452   Align Alignment = std::min(DstAlign, SrcAlign);
8453 
8454   SDLoc sdl = getCurSDLoc();
8455 
8456   // In the mempcpy context we need to pass in a false value for isTailCall
8457   // because the return pointer needs to be adjusted by the size of
8458   // the copied memory.
8459   SDValue Root = getMemoryRoot();
8460   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8461                              /*isTailCall=*/false,
8462                              MachinePointerInfo(I.getArgOperand(0)),
8463                              MachinePointerInfo(I.getArgOperand(1)),
8464                              I.getAAMetadata());
8465   assert(MC.getNode() != nullptr &&
8466          "** memcpy should not be lowered as TailCall in mempcpy context **");
8467   DAG.setRoot(MC);
8468 
8469   // Check if Size needs to be truncated or extended.
8470   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8471 
8472   // Adjust return pointer to point just past the last dst byte.
8473   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8474                                     Dst, Size);
8475   setValue(&I, DstPlusSize);
8476   return true;
8477 }
8478 
8479 /// See if we can lower a strcpy call into an optimized form.  If so, return
8480 /// true and lower it, otherwise return false and it will be lowered like a
8481 /// normal call.
8482 /// The caller already checked that \p I calls the appropriate LibFunc with a
8483 /// correct prototype.
8484 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8485   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8486 
8487   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8488   std::pair<SDValue, SDValue> Res =
8489     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8490                                 getValue(Arg0), getValue(Arg1),
8491                                 MachinePointerInfo(Arg0),
8492                                 MachinePointerInfo(Arg1), isStpcpy);
8493   if (Res.first.getNode()) {
8494     setValue(&I, Res.first);
8495     DAG.setRoot(Res.second);
8496     return true;
8497   }
8498 
8499   return false;
8500 }
8501 
8502 /// See if we can lower a strcmp call into an optimized form.  If so, return
8503 /// true and lower it, otherwise return false and it will be lowered like a
8504 /// normal call.
8505 /// The caller already checked that \p I calls the appropriate LibFunc with a
8506 /// correct prototype.
8507 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8508   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8509 
8510   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8511   std::pair<SDValue, SDValue> Res =
8512     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8513                                 getValue(Arg0), getValue(Arg1),
8514                                 MachinePointerInfo(Arg0),
8515                                 MachinePointerInfo(Arg1));
8516   if (Res.first.getNode()) {
8517     processIntegerCallValue(I, Res.first, true);
8518     PendingLoads.push_back(Res.second);
8519     return true;
8520   }
8521 
8522   return false;
8523 }
8524 
8525 /// See if we can lower a strlen call into an optimized form.  If so, return
8526 /// true and lower it, otherwise return false and it will be lowered like a
8527 /// normal call.
8528 /// The caller already checked that \p I calls the appropriate LibFunc with a
8529 /// correct prototype.
8530 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8531   const Value *Arg0 = I.getArgOperand(0);
8532 
8533   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8534   std::pair<SDValue, SDValue> Res =
8535     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8536                                 getValue(Arg0), MachinePointerInfo(Arg0));
8537   if (Res.first.getNode()) {
8538     processIntegerCallValue(I, Res.first, false);
8539     PendingLoads.push_back(Res.second);
8540     return true;
8541   }
8542 
8543   return false;
8544 }
8545 
8546 /// See if we can lower a strnlen call into an optimized form.  If so, return
8547 /// true and lower it, otherwise return false and it will be lowered like a
8548 /// normal call.
8549 /// The caller already checked that \p I calls the appropriate LibFunc with a
8550 /// correct prototype.
8551 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8552   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8553 
8554   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8555   std::pair<SDValue, SDValue> Res =
8556     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8557                                  getValue(Arg0), getValue(Arg1),
8558                                  MachinePointerInfo(Arg0));
8559   if (Res.first.getNode()) {
8560     processIntegerCallValue(I, Res.first, false);
8561     PendingLoads.push_back(Res.second);
8562     return true;
8563   }
8564 
8565   return false;
8566 }
8567 
8568 /// See if we can lower a unary floating-point operation into an SDNode with
8569 /// the specified Opcode.  If so, return true and lower it, otherwise return
8570 /// false and it will be lowered like a normal call.
8571 /// The caller already checked that \p I calls the appropriate LibFunc with a
8572 /// correct prototype.
8573 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8574                                               unsigned Opcode) {
8575   // We already checked this call's prototype; verify it doesn't modify errno.
8576   if (!I.onlyReadsMemory())
8577     return false;
8578 
8579   SDNodeFlags Flags;
8580   Flags.copyFMF(cast<FPMathOperator>(I));
8581 
8582   SDValue Tmp = getValue(I.getArgOperand(0));
8583   setValue(&I,
8584            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8585   return true;
8586 }
8587 
8588 /// See if we can lower a binary floating-point operation into an SDNode with
8589 /// the specified Opcode. If so, return true and lower it. Otherwise return
8590 /// false, and it will be lowered like a normal call.
8591 /// The caller already checked that \p I calls the appropriate LibFunc with a
8592 /// correct prototype.
8593 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8594                                                unsigned Opcode) {
8595   // We already checked this call's prototype; verify it doesn't modify errno.
8596   if (!I.onlyReadsMemory())
8597     return false;
8598 
8599   SDNodeFlags Flags;
8600   Flags.copyFMF(cast<FPMathOperator>(I));
8601 
8602   SDValue Tmp0 = getValue(I.getArgOperand(0));
8603   SDValue Tmp1 = getValue(I.getArgOperand(1));
8604   EVT VT = Tmp0.getValueType();
8605   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8606   return true;
8607 }
8608 
8609 void SelectionDAGBuilder::visitCall(const CallInst &I) {
8610   // Handle inline assembly differently.
8611   if (I.isInlineAsm()) {
8612     visitInlineAsm(I);
8613     return;
8614   }
8615 
8616   diagnoseDontCall(I);
8617 
8618   if (Function *F = I.getCalledFunction()) {
8619     if (F->isDeclaration()) {
8620       // Is this an LLVM intrinsic or a target-specific intrinsic?
8621       unsigned IID = F->getIntrinsicID();
8622       if (!IID)
8623         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8624           IID = II->getIntrinsicID(F);
8625 
8626       if (IID) {
8627         visitIntrinsicCall(I, IID);
8628         return;
8629       }
8630     }
8631 
8632     // Check for well-known libc/libm calls.  If the function is internal, it
8633     // can't be a library call.  Don't do the check if marked as nobuiltin for
8634     // some reason or the call site requires strict floating point semantics.
8635     LibFunc Func;
8636     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8637         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8638         LibInfo->hasOptimizedCodeGen(Func)) {
8639       switch (Func) {
8640       default: break;
8641       case LibFunc_bcmp:
8642         if (visitMemCmpBCmpCall(I))
8643           return;
8644         break;
8645       case LibFunc_copysign:
8646       case LibFunc_copysignf:
8647       case LibFunc_copysignl:
8648         // We already checked this call's prototype; verify it doesn't modify
8649         // errno.
8650         if (I.onlyReadsMemory()) {
8651           SDValue LHS = getValue(I.getArgOperand(0));
8652           SDValue RHS = getValue(I.getArgOperand(1));
8653           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8654                                    LHS.getValueType(), LHS, RHS));
8655           return;
8656         }
8657         break;
8658       case LibFunc_fabs:
8659       case LibFunc_fabsf:
8660       case LibFunc_fabsl:
8661         if (visitUnaryFloatCall(I, ISD::FABS))
8662           return;
8663         break;
8664       case LibFunc_fmin:
8665       case LibFunc_fminf:
8666       case LibFunc_fminl:
8667         if (visitBinaryFloatCall(I, ISD::FMINNUM))
8668           return;
8669         break;
8670       case LibFunc_fmax:
8671       case LibFunc_fmaxf:
8672       case LibFunc_fmaxl:
8673         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8674           return;
8675         break;
8676       case LibFunc_sin:
8677       case LibFunc_sinf:
8678       case LibFunc_sinl:
8679         if (visitUnaryFloatCall(I, ISD::FSIN))
8680           return;
8681         break;
8682       case LibFunc_cos:
8683       case LibFunc_cosf:
8684       case LibFunc_cosl:
8685         if (visitUnaryFloatCall(I, ISD::FCOS))
8686           return;
8687         break;
8688       case LibFunc_sqrt:
8689       case LibFunc_sqrtf:
8690       case LibFunc_sqrtl:
8691       case LibFunc_sqrt_finite:
8692       case LibFunc_sqrtf_finite:
8693       case LibFunc_sqrtl_finite:
8694         if (visitUnaryFloatCall(I, ISD::FSQRT))
8695           return;
8696         break;
8697       case LibFunc_floor:
8698       case LibFunc_floorf:
8699       case LibFunc_floorl:
8700         if (visitUnaryFloatCall(I, ISD::FFLOOR))
8701           return;
8702         break;
8703       case LibFunc_nearbyint:
8704       case LibFunc_nearbyintf:
8705       case LibFunc_nearbyintl:
8706         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8707           return;
8708         break;
8709       case LibFunc_ceil:
8710       case LibFunc_ceilf:
8711       case LibFunc_ceill:
8712         if (visitUnaryFloatCall(I, ISD::FCEIL))
8713           return;
8714         break;
8715       case LibFunc_rint:
8716       case LibFunc_rintf:
8717       case LibFunc_rintl:
8718         if (visitUnaryFloatCall(I, ISD::FRINT))
8719           return;
8720         break;
8721       case LibFunc_round:
8722       case LibFunc_roundf:
8723       case LibFunc_roundl:
8724         if (visitUnaryFloatCall(I, ISD::FROUND))
8725           return;
8726         break;
8727       case LibFunc_trunc:
8728       case LibFunc_truncf:
8729       case LibFunc_truncl:
8730         if (visitUnaryFloatCall(I, ISD::FTRUNC))
8731           return;
8732         break;
8733       case LibFunc_log2:
8734       case LibFunc_log2f:
8735       case LibFunc_log2l:
8736         if (visitUnaryFloatCall(I, ISD::FLOG2))
8737           return;
8738         break;
8739       case LibFunc_exp2:
8740       case LibFunc_exp2f:
8741       case LibFunc_exp2l:
8742         if (visitUnaryFloatCall(I, ISD::FEXP2))
8743           return;
8744         break;
8745       case LibFunc_exp10:
8746       case LibFunc_exp10f:
8747       case LibFunc_exp10l:
8748         if (visitUnaryFloatCall(I, ISD::FEXP10))
8749           return;
8750         break;
8751       case LibFunc_ldexp:
8752       case LibFunc_ldexpf:
8753       case LibFunc_ldexpl:
8754         if (visitBinaryFloatCall(I, ISD::FLDEXP))
8755           return;
8756         break;
8757       case LibFunc_memcmp:
8758         if (visitMemCmpBCmpCall(I))
8759           return;
8760         break;
8761       case LibFunc_mempcpy:
8762         if (visitMemPCpyCall(I))
8763           return;
8764         break;
8765       case LibFunc_memchr:
8766         if (visitMemChrCall(I))
8767           return;
8768         break;
8769       case LibFunc_strcpy:
8770         if (visitStrCpyCall(I, false))
8771           return;
8772         break;
8773       case LibFunc_stpcpy:
8774         if (visitStrCpyCall(I, true))
8775           return;
8776         break;
8777       case LibFunc_strcmp:
8778         if (visitStrCmpCall(I))
8779           return;
8780         break;
8781       case LibFunc_strlen:
8782         if (visitStrLenCall(I))
8783           return;
8784         break;
8785       case LibFunc_strnlen:
8786         if (visitStrNLenCall(I))
8787           return;
8788         break;
8789       }
8790     }
8791   }
8792 
8793   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8794   // have to do anything here to lower funclet bundles.
8795   // CFGuardTarget bundles are lowered in LowerCallTo.
8796   assert(!I.hasOperandBundlesOtherThan(
8797              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8798               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8799               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8800          "Cannot lower calls with arbitrary operand bundles!");
8801 
8802   SDValue Callee = getValue(I.getCalledOperand());
8803 
8804   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8805     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8806   else
8807     // Check if we can potentially perform a tail call. More detailed checking
8808     // is be done within LowerCallTo, after more information about the call is
8809     // known.
8810     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8811 }
8812 
8813 namespace {
8814 
8815 /// AsmOperandInfo - This contains information for each constraint that we are
8816 /// lowering.
8817 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8818 public:
8819   /// CallOperand - If this is the result output operand or a clobber
8820   /// this is null, otherwise it is the incoming operand to the CallInst.
8821   /// This gets modified as the asm is processed.
8822   SDValue CallOperand;
8823 
8824   /// AssignedRegs - If this is a register or register class operand, this
8825   /// contains the set of register corresponding to the operand.
8826   RegsForValue AssignedRegs;
8827 
8828   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8829     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8830   }
8831 
8832   /// Whether or not this operand accesses memory
8833   bool hasMemory(const TargetLowering &TLI) const {
8834     // Indirect operand accesses access memory.
8835     if (isIndirect)
8836       return true;
8837 
8838     for (const auto &Code : Codes)
8839       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8840         return true;
8841 
8842     return false;
8843   }
8844 };
8845 
8846 
8847 } // end anonymous namespace
8848 
8849 /// Make sure that the output operand \p OpInfo and its corresponding input
8850 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
8851 /// out).
8852 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
8853                                SDISelAsmOperandInfo &MatchingOpInfo,
8854                                SelectionDAG &DAG) {
8855   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
8856     return;
8857 
8858   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
8859   const auto &TLI = DAG.getTargetLoweringInfo();
8860 
8861   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
8862       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
8863                                        OpInfo.ConstraintVT);
8864   std::pair<unsigned, const TargetRegisterClass *> InputRC =
8865       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
8866                                        MatchingOpInfo.ConstraintVT);
8867   if ((OpInfo.ConstraintVT.isInteger() !=
8868        MatchingOpInfo.ConstraintVT.isInteger()) ||
8869       (MatchRC.second != InputRC.second)) {
8870     // FIXME: error out in a more elegant fashion
8871     report_fatal_error("Unsupported asm: input constraint"
8872                        " with a matching output constraint of"
8873                        " incompatible type!");
8874   }
8875   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
8876 }
8877 
8878 /// Get a direct memory input to behave well as an indirect operand.
8879 /// This may introduce stores, hence the need for a \p Chain.
8880 /// \return The (possibly updated) chain.
8881 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
8882                                         SDISelAsmOperandInfo &OpInfo,
8883                                         SelectionDAG &DAG) {
8884   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8885 
8886   // If we don't have an indirect input, put it in the constpool if we can,
8887   // otherwise spill it to a stack slot.
8888   // TODO: This isn't quite right. We need to handle these according to
8889   // the addressing mode that the constraint wants. Also, this may take
8890   // an additional register for the computation and we don't want that
8891   // either.
8892 
8893   // If the operand is a float, integer, or vector constant, spill to a
8894   // constant pool entry to get its address.
8895   const Value *OpVal = OpInfo.CallOperandVal;
8896   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
8897       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
8898     OpInfo.CallOperand = DAG.getConstantPool(
8899         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
8900     return Chain;
8901   }
8902 
8903   // Otherwise, create a stack slot and emit a store to it before the asm.
8904   Type *Ty = OpVal->getType();
8905   auto &DL = DAG.getDataLayout();
8906   uint64_t TySize = DL.getTypeAllocSize(Ty);
8907   MachineFunction &MF = DAG.getMachineFunction();
8908   int SSFI = MF.getFrameInfo().CreateStackObject(
8909       TySize, DL.getPrefTypeAlign(Ty), false);
8910   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
8911   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
8912                             MachinePointerInfo::getFixedStack(MF, SSFI),
8913                             TLI.getMemValueType(DL, Ty));
8914   OpInfo.CallOperand = StackSlot;
8915 
8916   return Chain;
8917 }
8918 
8919 /// GetRegistersForValue - Assign registers (virtual or physical) for the
8920 /// specified operand.  We prefer to assign virtual registers, to allow the
8921 /// register allocator to handle the assignment process.  However, if the asm
8922 /// uses features that we can't model on machineinstrs, we have SDISel do the
8923 /// allocation.  This produces generally horrible, but correct, code.
8924 ///
8925 ///   OpInfo describes the operand
8926 ///   RefOpInfo describes the matching operand if any, the operand otherwise
8927 static std::optional<unsigned>
8928 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
8929                      SDISelAsmOperandInfo &OpInfo,
8930                      SDISelAsmOperandInfo &RefOpInfo) {
8931   LLVMContext &Context = *DAG.getContext();
8932   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8933 
8934   MachineFunction &MF = DAG.getMachineFunction();
8935   SmallVector<unsigned, 4> Regs;
8936   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8937 
8938   // No work to do for memory/address operands.
8939   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8940       OpInfo.ConstraintType == TargetLowering::C_Address)
8941     return std::nullopt;
8942 
8943   // If this is a constraint for a single physreg, or a constraint for a
8944   // register class, find it.
8945   unsigned AssignedReg;
8946   const TargetRegisterClass *RC;
8947   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
8948       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
8949   // RC is unset only on failure. Return immediately.
8950   if (!RC)
8951     return std::nullopt;
8952 
8953   // Get the actual register value type.  This is important, because the user
8954   // may have asked for (e.g.) the AX register in i32 type.  We need to
8955   // remember that AX is actually i16 to get the right extension.
8956   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
8957 
8958   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
8959     // If this is an FP operand in an integer register (or visa versa), or more
8960     // generally if the operand value disagrees with the register class we plan
8961     // to stick it in, fix the operand type.
8962     //
8963     // If this is an input value, the bitcast to the new type is done now.
8964     // Bitcast for output value is done at the end of visitInlineAsm().
8965     if ((OpInfo.Type == InlineAsm::isOutput ||
8966          OpInfo.Type == InlineAsm::isInput) &&
8967         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
8968       // Try to convert to the first EVT that the reg class contains.  If the
8969       // types are identical size, use a bitcast to convert (e.g. two differing
8970       // vector types).  Note: output bitcast is done at the end of
8971       // visitInlineAsm().
8972       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
8973         // Exclude indirect inputs while they are unsupported because the code
8974         // to perform the load is missing and thus OpInfo.CallOperand still
8975         // refers to the input address rather than the pointed-to value.
8976         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
8977           OpInfo.CallOperand =
8978               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
8979         OpInfo.ConstraintVT = RegVT;
8980         // If the operand is an FP value and we want it in integer registers,
8981         // use the corresponding integer type. This turns an f64 value into
8982         // i64, which can be passed with two i32 values on a 32-bit machine.
8983       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
8984         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
8985         if (OpInfo.Type == InlineAsm::isInput)
8986           OpInfo.CallOperand =
8987               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
8988         OpInfo.ConstraintVT = VT;
8989       }
8990     }
8991   }
8992 
8993   // No need to allocate a matching input constraint since the constraint it's
8994   // matching to has already been allocated.
8995   if (OpInfo.isMatchingInputConstraint())
8996     return std::nullopt;
8997 
8998   EVT ValueVT = OpInfo.ConstraintVT;
8999   if (OpInfo.ConstraintVT == MVT::Other)
9000     ValueVT = RegVT;
9001 
9002   // Initialize NumRegs.
9003   unsigned NumRegs = 1;
9004   if (OpInfo.ConstraintVT != MVT::Other)
9005     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9006 
9007   // If this is a constraint for a specific physical register, like {r17},
9008   // assign it now.
9009 
9010   // If this associated to a specific register, initialize iterator to correct
9011   // place. If virtual, make sure we have enough registers
9012 
9013   // Initialize iterator if necessary
9014   TargetRegisterClass::iterator I = RC->begin();
9015   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9016 
9017   // Do not check for single registers.
9018   if (AssignedReg) {
9019     I = std::find(I, RC->end(), AssignedReg);
9020     if (I == RC->end()) {
9021       // RC does not contain the selected register, which indicates a
9022       // mismatch between the register and the required type/bitwidth.
9023       return {AssignedReg};
9024     }
9025   }
9026 
9027   for (; NumRegs; --NumRegs, ++I) {
9028     assert(I != RC->end() && "Ran out of registers to allocate!");
9029     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9030     Regs.push_back(R);
9031   }
9032 
9033   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9034   return std::nullopt;
9035 }
9036 
9037 static unsigned
9038 findMatchingInlineAsmOperand(unsigned OperandNo,
9039                              const std::vector<SDValue> &AsmNodeOperands) {
9040   // Scan until we find the definition we already emitted of this operand.
9041   unsigned CurOp = InlineAsm::Op_FirstOperand;
9042   for (; OperandNo; --OperandNo) {
9043     // Advance to the next operand.
9044     unsigned OpFlag =
9045         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
9046     const InlineAsm::Flag F(OpFlag);
9047     assert(
9048         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9049         "Skipped past definitions?");
9050     CurOp += F.getNumOperandRegisters() + 1;
9051   }
9052   return CurOp;
9053 }
9054 
9055 namespace {
9056 
9057 class ExtraFlags {
9058   unsigned Flags = 0;
9059 
9060 public:
9061   explicit ExtraFlags(const CallBase &Call) {
9062     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9063     if (IA->hasSideEffects())
9064       Flags |= InlineAsm::Extra_HasSideEffects;
9065     if (IA->isAlignStack())
9066       Flags |= InlineAsm::Extra_IsAlignStack;
9067     if (Call.isConvergent())
9068       Flags |= InlineAsm::Extra_IsConvergent;
9069     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9070   }
9071 
9072   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9073     // Ideally, we would only check against memory constraints.  However, the
9074     // meaning of an Other constraint can be target-specific and we can't easily
9075     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9076     // for Other constraints as well.
9077     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9078         OpInfo.ConstraintType == TargetLowering::C_Other) {
9079       if (OpInfo.Type == InlineAsm::isInput)
9080         Flags |= InlineAsm::Extra_MayLoad;
9081       else if (OpInfo.Type == InlineAsm::isOutput)
9082         Flags |= InlineAsm::Extra_MayStore;
9083       else if (OpInfo.Type == InlineAsm::isClobber)
9084         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9085     }
9086   }
9087 
9088   unsigned get() const { return Flags; }
9089 };
9090 
9091 } // end anonymous namespace
9092 
9093 static bool isFunction(SDValue Op) {
9094   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9095     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9096       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9097 
9098       // In normal "call dllimport func" instruction (non-inlineasm) it force
9099       // indirect access by specifing call opcode. And usually specially print
9100       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9101       // not do in this way now. (In fact, this is similar with "Data Access"
9102       // action). So here we ignore dllimport function.
9103       if (Fn && !Fn->hasDLLImportStorageClass())
9104         return true;
9105     }
9106   }
9107   return false;
9108 }
9109 
9110 /// visitInlineAsm - Handle a call to an InlineAsm object.
9111 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9112                                          const BasicBlock *EHPadBB) {
9113   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9114 
9115   /// ConstraintOperands - Information about all of the constraints.
9116   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9117 
9118   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9119   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9120       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9121 
9122   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9123   // AsmDialect, MayLoad, MayStore).
9124   bool HasSideEffect = IA->hasSideEffects();
9125   ExtraFlags ExtraInfo(Call);
9126 
9127   for (auto &T : TargetConstraints) {
9128     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9129     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9130 
9131     if (OpInfo.CallOperandVal)
9132       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9133 
9134     if (!HasSideEffect)
9135       HasSideEffect = OpInfo.hasMemory(TLI);
9136 
9137     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9138     // FIXME: Could we compute this on OpInfo rather than T?
9139 
9140     // Compute the constraint code and ConstraintType to use.
9141     TLI.ComputeConstraintToUse(T, SDValue());
9142 
9143     if (T.ConstraintType == TargetLowering::C_Immediate &&
9144         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9145       // We've delayed emitting a diagnostic like the "n" constraint because
9146       // inlining could cause an integer showing up.
9147       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9148                                           "' expects an integer constant "
9149                                           "expression");
9150 
9151     ExtraInfo.update(T);
9152   }
9153 
9154   // We won't need to flush pending loads if this asm doesn't touch
9155   // memory and is nonvolatile.
9156   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9157 
9158   bool EmitEHLabels = isa<InvokeInst>(Call);
9159   if (EmitEHLabels) {
9160     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9161   }
9162   bool IsCallBr = isa<CallBrInst>(Call);
9163 
9164   if (IsCallBr || EmitEHLabels) {
9165     // If this is a callbr or invoke we need to flush pending exports since
9166     // inlineasm_br and invoke are terminators.
9167     // We need to do this before nodes are glued to the inlineasm_br node.
9168     Chain = getControlRoot();
9169   }
9170 
9171   MCSymbol *BeginLabel = nullptr;
9172   if (EmitEHLabels) {
9173     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9174   }
9175 
9176   int OpNo = -1;
9177   SmallVector<StringRef> AsmStrs;
9178   IA->collectAsmStrs(AsmStrs);
9179 
9180   // Second pass over the constraints: compute which constraint option to use.
9181   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9182     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9183       OpNo++;
9184 
9185     // If this is an output operand with a matching input operand, look up the
9186     // matching input. If their types mismatch, e.g. one is an integer, the
9187     // other is floating point, or their sizes are different, flag it as an
9188     // error.
9189     if (OpInfo.hasMatchingInput()) {
9190       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9191       patchMatchingInput(OpInfo, Input, DAG);
9192     }
9193 
9194     // Compute the constraint code and ConstraintType to use.
9195     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9196 
9197     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9198          OpInfo.Type == InlineAsm::isClobber) ||
9199         OpInfo.ConstraintType == TargetLowering::C_Address)
9200       continue;
9201 
9202     // In Linux PIC model, there are 4 cases about value/label addressing:
9203     //
9204     // 1: Function call or Label jmp inside the module.
9205     // 2: Data access (such as global variable, static variable) inside module.
9206     // 3: Function call or Label jmp outside the module.
9207     // 4: Data access (such as global variable) outside the module.
9208     //
9209     // Due to current llvm inline asm architecture designed to not "recognize"
9210     // the asm code, there are quite troubles for us to treat mem addressing
9211     // differently for same value/adress used in different instuctions.
9212     // For example, in pic model, call a func may in plt way or direclty
9213     // pc-related, but lea/mov a function adress may use got.
9214     //
9215     // Here we try to "recognize" function call for the case 1 and case 3 in
9216     // inline asm. And try to adjust the constraint for them.
9217     //
9218     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9219     // label, so here we don't handle jmp function label now, but we need to
9220     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9221     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9222         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9223         TM.getCodeModel() != CodeModel::Large) {
9224       OpInfo.isIndirect = false;
9225       OpInfo.ConstraintType = TargetLowering::C_Address;
9226     }
9227 
9228     // If this is a memory input, and if the operand is not indirect, do what we
9229     // need to provide an address for the memory input.
9230     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9231         !OpInfo.isIndirect) {
9232       assert((OpInfo.isMultipleAlternative ||
9233               (OpInfo.Type == InlineAsm::isInput)) &&
9234              "Can only indirectify direct input operands!");
9235 
9236       // Memory operands really want the address of the value.
9237       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9238 
9239       // There is no longer a Value* corresponding to this operand.
9240       OpInfo.CallOperandVal = nullptr;
9241 
9242       // It is now an indirect operand.
9243       OpInfo.isIndirect = true;
9244     }
9245 
9246   }
9247 
9248   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9249   std::vector<SDValue> AsmNodeOperands;
9250   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9251   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9252       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9253 
9254   // If we have a !srcloc metadata node associated with it, we want to attach
9255   // this to the ultimately generated inline asm machineinstr.  To do this, we
9256   // pass in the third operand as this (potentially null) inline asm MDNode.
9257   const MDNode *SrcLoc = Call.getMetadata("srcloc");
9258   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9259 
9260   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9261   // bits as operand 3.
9262   AsmNodeOperands.push_back(DAG.getTargetConstant(
9263       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9264 
9265   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9266   // this, assign virtual and physical registers for inputs and otput.
9267   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9268     // Assign Registers.
9269     SDISelAsmOperandInfo &RefOpInfo =
9270         OpInfo.isMatchingInputConstraint()
9271             ? ConstraintOperands[OpInfo.getMatchedOperand()]
9272             : OpInfo;
9273     const auto RegError =
9274         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9275     if (RegError) {
9276       const MachineFunction &MF = DAG.getMachineFunction();
9277       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9278       const char *RegName = TRI.getName(*RegError);
9279       emitInlineAsmError(Call, "register '" + Twine(RegName) +
9280                                    "' allocated for constraint '" +
9281                                    Twine(OpInfo.ConstraintCode) +
9282                                    "' does not match required type");
9283       return;
9284     }
9285 
9286     auto DetectWriteToReservedRegister = [&]() {
9287       const MachineFunction &MF = DAG.getMachineFunction();
9288       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9289       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9290         if (Register::isPhysicalRegister(Reg) &&
9291             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9292           const char *RegName = TRI.getName(Reg);
9293           emitInlineAsmError(Call, "write to reserved register '" +
9294                                        Twine(RegName) + "'");
9295           return true;
9296         }
9297       }
9298       return false;
9299     };
9300     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9301             (OpInfo.Type == InlineAsm::isInput &&
9302              !OpInfo.isMatchingInputConstraint())) &&
9303            "Only address as input operand is allowed.");
9304 
9305     switch (OpInfo.Type) {
9306     case InlineAsm::isOutput:
9307       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9308         const InlineAsm::ConstraintCode ConstraintID =
9309             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9310         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9311                "Failed to convert memory constraint code to constraint id.");
9312 
9313         // Add information to the INLINEASM node to know about this output.
9314         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9315         OpFlags.setMemConstraint(ConstraintID);
9316         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9317                                                         MVT::i32));
9318         AsmNodeOperands.push_back(OpInfo.CallOperand);
9319       } else {
9320         // Otherwise, this outputs to a register (directly for C_Register /
9321         // C_RegisterClass, and a target-defined fashion for
9322         // C_Immediate/C_Other). Find a register that we can use.
9323         if (OpInfo.AssignedRegs.Regs.empty()) {
9324           emitInlineAsmError(
9325               Call, "couldn't allocate output register for constraint '" +
9326                         Twine(OpInfo.ConstraintCode) + "'");
9327           return;
9328         }
9329 
9330         if (DetectWriteToReservedRegister())
9331           return;
9332 
9333         // Add information to the INLINEASM node to know that this register is
9334         // set.
9335         OpInfo.AssignedRegs.AddInlineAsmOperands(
9336             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9337                                   : InlineAsm::Kind::RegDef,
9338             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9339       }
9340       break;
9341 
9342     case InlineAsm::isInput:
9343     case InlineAsm::isLabel: {
9344       SDValue InOperandVal = OpInfo.CallOperand;
9345 
9346       if (OpInfo.isMatchingInputConstraint()) {
9347         // If this is required to match an output register we have already set,
9348         // just use its register.
9349         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9350                                                   AsmNodeOperands);
9351         InlineAsm::Flag Flag(
9352             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
9353         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9354           if (OpInfo.isIndirect) {
9355             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9356             emitInlineAsmError(Call, "inline asm not supported yet: "
9357                                      "don't know how to handle tied "
9358                                      "indirect register inputs");
9359             return;
9360           }
9361 
9362           SmallVector<unsigned, 4> Regs;
9363           MachineFunction &MF = DAG.getMachineFunction();
9364           MachineRegisterInfo &MRI = MF.getRegInfo();
9365           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9366           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9367           Register TiedReg = R->getReg();
9368           MVT RegVT = R->getSimpleValueType(0);
9369           const TargetRegisterClass *RC =
9370               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9371               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9372                                       : TRI.getMinimalPhysRegClass(TiedReg);
9373           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9374             Regs.push_back(MRI.createVirtualRegister(RC));
9375 
9376           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9377 
9378           SDLoc dl = getCurSDLoc();
9379           // Use the produced MatchedRegs object to
9380           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9381           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9382                                            OpInfo.getMatchedOperand(), dl, DAG,
9383                                            AsmNodeOperands);
9384           break;
9385         }
9386 
9387         assert(Flag.isMemKind() && "Unknown matching constraint!");
9388         assert(Flag.getNumOperandRegisters() == 1 &&
9389                "Unexpected number of operands");
9390         // Add information to the INLINEASM node to know about this input.
9391         // See InlineAsm.h isUseOperandTiedToDef.
9392         Flag.clearMemConstraint();
9393         Flag.setMatchingOp(OpInfo.getMatchedOperand());
9394         AsmNodeOperands.push_back(DAG.getTargetConstant(
9395             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9396         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9397         break;
9398       }
9399 
9400       // Treat indirect 'X' constraint as memory.
9401       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9402           OpInfo.isIndirect)
9403         OpInfo.ConstraintType = TargetLowering::C_Memory;
9404 
9405       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9406           OpInfo.ConstraintType == TargetLowering::C_Other) {
9407         std::vector<SDValue> Ops;
9408         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9409                                           Ops, DAG);
9410         if (Ops.empty()) {
9411           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9412             if (isa<ConstantSDNode>(InOperandVal)) {
9413               emitInlineAsmError(Call, "value out of range for constraint '" +
9414                                            Twine(OpInfo.ConstraintCode) + "'");
9415               return;
9416             }
9417 
9418           emitInlineAsmError(Call,
9419                              "invalid operand for inline asm constraint '" +
9420                                  Twine(OpInfo.ConstraintCode) + "'");
9421           return;
9422         }
9423 
9424         // Add information to the INLINEASM node to know about this input.
9425         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9426         AsmNodeOperands.push_back(DAG.getTargetConstant(
9427             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9428         llvm::append_range(AsmNodeOperands, Ops);
9429         break;
9430       }
9431 
9432       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9433         assert((OpInfo.isIndirect ||
9434                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9435                "Operand must be indirect to be a mem!");
9436         assert(InOperandVal.getValueType() ==
9437                    TLI.getPointerTy(DAG.getDataLayout()) &&
9438                "Memory operands expect pointer values");
9439 
9440         const InlineAsm::ConstraintCode ConstraintID =
9441             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9442         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9443                "Failed to convert memory constraint code to constraint id.");
9444 
9445         // Add information to the INLINEASM node to know about this input.
9446         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9447         ResOpType.setMemConstraint(ConstraintID);
9448         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9449                                                         getCurSDLoc(),
9450                                                         MVT::i32));
9451         AsmNodeOperands.push_back(InOperandVal);
9452         break;
9453       }
9454 
9455       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9456         const InlineAsm::ConstraintCode ConstraintID =
9457             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9458         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9459                "Failed to convert memory constraint code to constraint id.");
9460 
9461         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9462 
9463         SDValue AsmOp = InOperandVal;
9464         if (isFunction(InOperandVal)) {
9465           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9466           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9467           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9468                                              InOperandVal.getValueType(),
9469                                              GA->getOffset());
9470         }
9471 
9472         // Add information to the INLINEASM node to know about this input.
9473         ResOpType.setMemConstraint(ConstraintID);
9474 
9475         AsmNodeOperands.push_back(
9476             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9477 
9478         AsmNodeOperands.push_back(AsmOp);
9479         break;
9480       }
9481 
9482       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9483               OpInfo.ConstraintType == TargetLowering::C_Register) &&
9484              "Unknown constraint type!");
9485 
9486       // TODO: Support this.
9487       if (OpInfo.isIndirect) {
9488         emitInlineAsmError(
9489             Call, "Don't know how to handle indirect register inputs yet "
9490                   "for constraint '" +
9491                       Twine(OpInfo.ConstraintCode) + "'");
9492         return;
9493       }
9494 
9495       // Copy the input into the appropriate registers.
9496       if (OpInfo.AssignedRegs.Regs.empty()) {
9497         emitInlineAsmError(Call,
9498                            "couldn't allocate input reg for constraint '" +
9499                                Twine(OpInfo.ConstraintCode) + "'");
9500         return;
9501       }
9502 
9503       if (DetectWriteToReservedRegister())
9504         return;
9505 
9506       SDLoc dl = getCurSDLoc();
9507 
9508       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9509                                         &Call);
9510 
9511       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9512                                                0, dl, DAG, AsmNodeOperands);
9513       break;
9514     }
9515     case InlineAsm::isClobber:
9516       // Add the clobbered value to the operand list, so that the register
9517       // allocator is aware that the physreg got clobbered.
9518       if (!OpInfo.AssignedRegs.Regs.empty())
9519         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9520                                                  false, 0, getCurSDLoc(), DAG,
9521                                                  AsmNodeOperands);
9522       break;
9523     }
9524   }
9525 
9526   // Finish up input operands.  Set the input chain and add the flag last.
9527   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9528   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9529 
9530   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9531   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9532                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9533   Glue = Chain.getValue(1);
9534 
9535   // Do additional work to generate outputs.
9536 
9537   SmallVector<EVT, 1> ResultVTs;
9538   SmallVector<SDValue, 1> ResultValues;
9539   SmallVector<SDValue, 8> OutChains;
9540 
9541   llvm::Type *CallResultType = Call.getType();
9542   ArrayRef<Type *> ResultTypes;
9543   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9544     ResultTypes = StructResult->elements();
9545   else if (!CallResultType->isVoidTy())
9546     ResultTypes = ArrayRef(CallResultType);
9547 
9548   auto CurResultType = ResultTypes.begin();
9549   auto handleRegAssign = [&](SDValue V) {
9550     assert(CurResultType != ResultTypes.end() && "Unexpected value");
9551     assert((*CurResultType)->isSized() && "Unexpected unsized type");
9552     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9553     ++CurResultType;
9554     // If the type of the inline asm call site return value is different but has
9555     // same size as the type of the asm output bitcast it.  One example of this
9556     // is for vectors with different width / number of elements.  This can
9557     // happen for register classes that can contain multiple different value
9558     // types.  The preg or vreg allocated may not have the same VT as was
9559     // expected.
9560     //
9561     // This can also happen for a return value that disagrees with the register
9562     // class it is put in, eg. a double in a general-purpose register on a
9563     // 32-bit machine.
9564     if (ResultVT != V.getValueType() &&
9565         ResultVT.getSizeInBits() == V.getValueSizeInBits())
9566       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9567     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9568              V.getValueType().isInteger()) {
9569       // If a result value was tied to an input value, the computed result
9570       // may have a wider width than the expected result.  Extract the
9571       // relevant portion.
9572       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9573     }
9574     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9575     ResultVTs.push_back(ResultVT);
9576     ResultValues.push_back(V);
9577   };
9578 
9579   // Deal with output operands.
9580   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9581     if (OpInfo.Type == InlineAsm::isOutput) {
9582       SDValue Val;
9583       // Skip trivial output operands.
9584       if (OpInfo.AssignedRegs.Regs.empty())
9585         continue;
9586 
9587       switch (OpInfo.ConstraintType) {
9588       case TargetLowering::C_Register:
9589       case TargetLowering::C_RegisterClass:
9590         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9591                                                   Chain, &Glue, &Call);
9592         break;
9593       case TargetLowering::C_Immediate:
9594       case TargetLowering::C_Other:
9595         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9596                                               OpInfo, DAG);
9597         break;
9598       case TargetLowering::C_Memory:
9599         break; // Already handled.
9600       case TargetLowering::C_Address:
9601         break; // Silence warning.
9602       case TargetLowering::C_Unknown:
9603         assert(false && "Unexpected unknown constraint");
9604       }
9605 
9606       // Indirect output manifest as stores. Record output chains.
9607       if (OpInfo.isIndirect) {
9608         const Value *Ptr = OpInfo.CallOperandVal;
9609         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9610         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9611                                      MachinePointerInfo(Ptr));
9612         OutChains.push_back(Store);
9613       } else {
9614         // generate CopyFromRegs to associated registers.
9615         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9616         if (Val.getOpcode() == ISD::MERGE_VALUES) {
9617           for (const SDValue &V : Val->op_values())
9618             handleRegAssign(V);
9619         } else
9620           handleRegAssign(Val);
9621       }
9622     }
9623   }
9624 
9625   // Set results.
9626   if (!ResultValues.empty()) {
9627     assert(CurResultType == ResultTypes.end() &&
9628            "Mismatch in number of ResultTypes");
9629     assert(ResultValues.size() == ResultTypes.size() &&
9630            "Mismatch in number of output operands in asm result");
9631 
9632     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9633                             DAG.getVTList(ResultVTs), ResultValues);
9634     setValue(&Call, V);
9635   }
9636 
9637   // Collect store chains.
9638   if (!OutChains.empty())
9639     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9640 
9641   if (EmitEHLabels) {
9642     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9643   }
9644 
9645   // Only Update Root if inline assembly has a memory effect.
9646   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9647       EmitEHLabels)
9648     DAG.setRoot(Chain);
9649 }
9650 
9651 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9652                                              const Twine &Message) {
9653   LLVMContext &Ctx = *DAG.getContext();
9654   Ctx.emitError(&Call, Message);
9655 
9656   // Make sure we leave the DAG in a valid state
9657   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9658   SmallVector<EVT, 1> ValueVTs;
9659   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9660 
9661   if (ValueVTs.empty())
9662     return;
9663 
9664   SmallVector<SDValue, 1> Ops;
9665   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9666     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9667 
9668   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9669 }
9670 
9671 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9672   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9673                           MVT::Other, getRoot(),
9674                           getValue(I.getArgOperand(0)),
9675                           DAG.getSrcValue(I.getArgOperand(0))));
9676 }
9677 
9678 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9679   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9680   const DataLayout &DL = DAG.getDataLayout();
9681   SDValue V = DAG.getVAArg(
9682       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9683       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9684       DL.getABITypeAlign(I.getType()).value());
9685   DAG.setRoot(V.getValue(1));
9686 
9687   if (I.getType()->isPointerTy())
9688     V = DAG.getPtrExtOrTrunc(
9689         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9690   setValue(&I, V);
9691 }
9692 
9693 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9694   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9695                           MVT::Other, getRoot(),
9696                           getValue(I.getArgOperand(0)),
9697                           DAG.getSrcValue(I.getArgOperand(0))));
9698 }
9699 
9700 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9701   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9702                           MVT::Other, getRoot(),
9703                           getValue(I.getArgOperand(0)),
9704                           getValue(I.getArgOperand(1)),
9705                           DAG.getSrcValue(I.getArgOperand(0)),
9706                           DAG.getSrcValue(I.getArgOperand(1))));
9707 }
9708 
9709 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9710                                                     const Instruction &I,
9711                                                     SDValue Op) {
9712   const MDNode *Range = getRangeMetadata(I);
9713   if (!Range)
9714     return Op;
9715 
9716   ConstantRange CR = getConstantRangeFromMetadata(*Range);
9717   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9718     return Op;
9719 
9720   APInt Lo = CR.getUnsignedMin();
9721   if (!Lo.isMinValue())
9722     return Op;
9723 
9724   APInt Hi = CR.getUnsignedMax();
9725   unsigned Bits = std::max(Hi.getActiveBits(),
9726                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9727 
9728   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9729 
9730   SDLoc SL = getCurSDLoc();
9731 
9732   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9733                              DAG.getValueType(SmallVT));
9734   unsigned NumVals = Op.getNode()->getNumValues();
9735   if (NumVals == 1)
9736     return ZExt;
9737 
9738   SmallVector<SDValue, 4> Ops;
9739 
9740   Ops.push_back(ZExt);
9741   for (unsigned I = 1; I != NumVals; ++I)
9742     Ops.push_back(Op.getValue(I));
9743 
9744   return DAG.getMergeValues(Ops, SL);
9745 }
9746 
9747 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9748 /// the call being lowered.
9749 ///
9750 /// This is a helper for lowering intrinsics that follow a target calling
9751 /// convention or require stack pointer adjustment. Only a subset of the
9752 /// intrinsic's operands need to participate in the calling convention.
9753 void SelectionDAGBuilder::populateCallLoweringInfo(
9754     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9755     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9756     AttributeSet RetAttrs, bool IsPatchPoint) {
9757   TargetLowering::ArgListTy Args;
9758   Args.reserve(NumArgs);
9759 
9760   // Populate the argument list.
9761   // Attributes for args start at offset 1, after the return attribute.
9762   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9763        ArgI != ArgE; ++ArgI) {
9764     const Value *V = Call->getOperand(ArgI);
9765 
9766     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9767 
9768     TargetLowering::ArgListEntry Entry;
9769     Entry.Node = getValue(V);
9770     Entry.Ty = V->getType();
9771     Entry.setAttributes(Call, ArgI);
9772     Args.push_back(Entry);
9773   }
9774 
9775   CLI.setDebugLoc(getCurSDLoc())
9776       .setChain(getRoot())
9777       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
9778                  RetAttrs)
9779       .setDiscardResult(Call->use_empty())
9780       .setIsPatchPoint(IsPatchPoint)
9781       .setIsPreallocated(
9782           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9783 }
9784 
9785 /// Add a stack map intrinsic call's live variable operands to a stackmap
9786 /// or patchpoint target node's operand list.
9787 ///
9788 /// Constants are converted to TargetConstants purely as an optimization to
9789 /// avoid constant materialization and register allocation.
9790 ///
9791 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9792 /// generate addess computation nodes, and so FinalizeISel can convert the
9793 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9794 /// address materialization and register allocation, but may also be required
9795 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9796 /// alloca in the entry block, then the runtime may assume that the alloca's
9797 /// StackMap location can be read immediately after compilation and that the
9798 /// location is valid at any point during execution (this is similar to the
9799 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9800 /// only available in a register, then the runtime would need to trap when
9801 /// execution reaches the StackMap in order to read the alloca's location.
9802 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9803                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9804                                 SelectionDAGBuilder &Builder) {
9805   SelectionDAG &DAG = Builder.DAG;
9806   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9807     SDValue Op = Builder.getValue(Call.getArgOperand(I));
9808 
9809     // Things on the stack are pointer-typed, meaning that they are already
9810     // legal and can be emitted directly to target nodes.
9811     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9812       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9813     } else {
9814       // Otherwise emit a target independent node to be legalised.
9815       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9816     }
9817   }
9818 }
9819 
9820 /// Lower llvm.experimental.stackmap.
9821 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9822   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9823   //                                  [live variables...])
9824 
9825   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
9826 
9827   SDValue Chain, InGlue, Callee;
9828   SmallVector<SDValue, 32> Ops;
9829 
9830   SDLoc DL = getCurSDLoc();
9831   Callee = getValue(CI.getCalledOperand());
9832 
9833   // The stackmap intrinsic only records the live variables (the arguments
9834   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9835   // intrinsic, this won't be lowered to a function call. This means we don't
9836   // have to worry about calling conventions and target specific lowering code.
9837   // Instead we perform the call lowering right here.
9838   //
9839   // chain, flag = CALLSEQ_START(chain, 0, 0)
9840   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9841   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9842   //
9843   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9844   InGlue = Chain.getValue(1);
9845 
9846   // Add the STACKMAP operands, starting with DAG house-keeping.
9847   Ops.push_back(Chain);
9848   Ops.push_back(InGlue);
9849 
9850   // Add the <id>, <numShadowBytes> operands.
9851   //
9852   // These do not require legalisation, and can be emitted directly to target
9853   // constant nodes.
9854   SDValue ID = getValue(CI.getArgOperand(0));
9855   assert(ID.getValueType() == MVT::i64);
9856   SDValue IDConst = DAG.getTargetConstant(
9857       cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
9858   Ops.push_back(IDConst);
9859 
9860   SDValue Shad = getValue(CI.getArgOperand(1));
9861   assert(Shad.getValueType() == MVT::i32);
9862   SDValue ShadConst = DAG.getTargetConstant(
9863       cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
9864   Ops.push_back(ShadConst);
9865 
9866   // Add the live variables.
9867   addStackMapLiveVars(CI, 2, DL, Ops, *this);
9868 
9869   // Create the STACKMAP node.
9870   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9871   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
9872   InGlue = Chain.getValue(1);
9873 
9874   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
9875 
9876   // Stackmaps don't generate values, so nothing goes into the NodeMap.
9877 
9878   // Set the root to the target-lowered call chain.
9879   DAG.setRoot(Chain);
9880 
9881   // Inform the Frame Information that we have a stackmap in this function.
9882   FuncInfo.MF->getFrameInfo().setHasStackMap();
9883 }
9884 
9885 /// Lower llvm.experimental.patchpoint directly to its target opcode.
9886 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
9887                                           const BasicBlock *EHPadBB) {
9888   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
9889   //                                                 i32 <numBytes>,
9890   //                                                 i8* <target>,
9891   //                                                 i32 <numArgs>,
9892   //                                                 [Args...],
9893   //                                                 [live variables...])
9894 
9895   CallingConv::ID CC = CB.getCallingConv();
9896   bool IsAnyRegCC = CC == CallingConv::AnyReg;
9897   bool HasDef = !CB.getType()->isVoidTy();
9898   SDLoc dl = getCurSDLoc();
9899   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
9900 
9901   // Handle immediate and symbolic callees.
9902   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
9903     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
9904                                    /*isTarget=*/true);
9905   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
9906     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
9907                                          SDLoc(SymbolicCallee),
9908                                          SymbolicCallee->getValueType(0));
9909 
9910   // Get the real number of arguments participating in the call <numArgs>
9911   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
9912   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
9913 
9914   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
9915   // Intrinsics include all meta-operands up to but not including CC.
9916   unsigned NumMetaOpers = PatchPointOpers::CCPos;
9917   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
9918          "Not enough arguments provided to the patchpoint intrinsic");
9919 
9920   // For AnyRegCC the arguments are lowered later on manually.
9921   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
9922   Type *ReturnTy =
9923       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
9924 
9925   TargetLowering::CallLoweringInfo CLI(DAG);
9926   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
9927                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
9928   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
9929 
9930   SDNode *CallEnd = Result.second.getNode();
9931   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
9932     CallEnd = CallEnd->getOperand(0).getNode();
9933 
9934   /// Get a call instruction from the call sequence chain.
9935   /// Tail calls are not allowed.
9936   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
9937          "Expected a callseq node.");
9938   SDNode *Call = CallEnd->getOperand(0).getNode();
9939   bool HasGlue = Call->getGluedNode();
9940 
9941   // Replace the target specific call node with the patchable intrinsic.
9942   SmallVector<SDValue, 8> Ops;
9943 
9944   // Push the chain.
9945   Ops.push_back(*(Call->op_begin()));
9946 
9947   // Optionally, push the glue (if any).
9948   if (HasGlue)
9949     Ops.push_back(*(Call->op_end() - 1));
9950 
9951   // Push the register mask info.
9952   if (HasGlue)
9953     Ops.push_back(*(Call->op_end() - 2));
9954   else
9955     Ops.push_back(*(Call->op_end() - 1));
9956 
9957   // Add the <id> and <numBytes> constants.
9958   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
9959   Ops.push_back(DAG.getTargetConstant(
9960                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
9961   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
9962   Ops.push_back(DAG.getTargetConstant(
9963                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
9964                   MVT::i32));
9965 
9966   // Add the callee.
9967   Ops.push_back(Callee);
9968 
9969   // Adjust <numArgs> to account for any arguments that have been passed on the
9970   // stack instead.
9971   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
9972   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
9973   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
9974   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
9975 
9976   // Add the calling convention
9977   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
9978 
9979   // Add the arguments we omitted previously. The register allocator should
9980   // place these in any free register.
9981   if (IsAnyRegCC)
9982     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
9983       Ops.push_back(getValue(CB.getArgOperand(i)));
9984 
9985   // Push the arguments from the call instruction.
9986   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
9987   Ops.append(Call->op_begin() + 2, e);
9988 
9989   // Push live variables for the stack map.
9990   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
9991 
9992   SDVTList NodeTys;
9993   if (IsAnyRegCC && HasDef) {
9994     // Create the return types based on the intrinsic definition
9995     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9996     SmallVector<EVT, 3> ValueVTs;
9997     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
9998     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
9999 
10000     // There is always a chain and a glue type at the end
10001     ValueVTs.push_back(MVT::Other);
10002     ValueVTs.push_back(MVT::Glue);
10003     NodeTys = DAG.getVTList(ValueVTs);
10004   } else
10005     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10006 
10007   // Replace the target specific call node with a PATCHPOINT node.
10008   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10009 
10010   // Update the NodeMap.
10011   if (HasDef) {
10012     if (IsAnyRegCC)
10013       setValue(&CB, SDValue(PPV.getNode(), 0));
10014     else
10015       setValue(&CB, Result.first);
10016   }
10017 
10018   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10019   // call sequence. Furthermore the location of the chain and glue can change
10020   // when the AnyReg calling convention is used and the intrinsic returns a
10021   // value.
10022   if (IsAnyRegCC && HasDef) {
10023     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10024     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10025     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10026   } else
10027     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10028   DAG.DeleteNode(Call);
10029 
10030   // Inform the Frame Information that we have a patchpoint in this function.
10031   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10032 }
10033 
10034 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10035                                             unsigned Intrinsic) {
10036   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10037   SDValue Op1 = getValue(I.getArgOperand(0));
10038   SDValue Op2;
10039   if (I.arg_size() > 1)
10040     Op2 = getValue(I.getArgOperand(1));
10041   SDLoc dl = getCurSDLoc();
10042   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10043   SDValue Res;
10044   SDNodeFlags SDFlags;
10045   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10046     SDFlags.copyFMF(*FPMO);
10047 
10048   switch (Intrinsic) {
10049   case Intrinsic::vector_reduce_fadd:
10050     if (SDFlags.hasAllowReassociation())
10051       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10052                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10053                         SDFlags);
10054     else
10055       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10056     break;
10057   case Intrinsic::vector_reduce_fmul:
10058     if (SDFlags.hasAllowReassociation())
10059       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10060                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10061                         SDFlags);
10062     else
10063       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10064     break;
10065   case Intrinsic::vector_reduce_add:
10066     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10067     break;
10068   case Intrinsic::vector_reduce_mul:
10069     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10070     break;
10071   case Intrinsic::vector_reduce_and:
10072     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10073     break;
10074   case Intrinsic::vector_reduce_or:
10075     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10076     break;
10077   case Intrinsic::vector_reduce_xor:
10078     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10079     break;
10080   case Intrinsic::vector_reduce_smax:
10081     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10082     break;
10083   case Intrinsic::vector_reduce_smin:
10084     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10085     break;
10086   case Intrinsic::vector_reduce_umax:
10087     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10088     break;
10089   case Intrinsic::vector_reduce_umin:
10090     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10091     break;
10092   case Intrinsic::vector_reduce_fmax:
10093     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10094     break;
10095   case Intrinsic::vector_reduce_fmin:
10096     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10097     break;
10098   case Intrinsic::vector_reduce_fmaximum:
10099     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10100     break;
10101   case Intrinsic::vector_reduce_fminimum:
10102     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10103     break;
10104   default:
10105     llvm_unreachable("Unhandled vector reduce intrinsic");
10106   }
10107   setValue(&I, Res);
10108 }
10109 
10110 /// Returns an AttributeList representing the attributes applied to the return
10111 /// value of the given call.
10112 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10113   SmallVector<Attribute::AttrKind, 2> Attrs;
10114   if (CLI.RetSExt)
10115     Attrs.push_back(Attribute::SExt);
10116   if (CLI.RetZExt)
10117     Attrs.push_back(Attribute::ZExt);
10118   if (CLI.IsInReg)
10119     Attrs.push_back(Attribute::InReg);
10120 
10121   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10122                             Attrs);
10123 }
10124 
10125 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10126 /// implementation, which just calls LowerCall.
10127 /// FIXME: When all targets are
10128 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10129 std::pair<SDValue, SDValue>
10130 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10131   // Handle the incoming return values from the call.
10132   CLI.Ins.clear();
10133   Type *OrigRetTy = CLI.RetTy;
10134   SmallVector<EVT, 4> RetTys;
10135   SmallVector<uint64_t, 4> Offsets;
10136   auto &DL = CLI.DAG.getDataLayout();
10137   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10138 
10139   if (CLI.IsPostTypeLegalization) {
10140     // If we are lowering a libcall after legalization, split the return type.
10141     SmallVector<EVT, 4> OldRetTys;
10142     SmallVector<uint64_t, 4> OldOffsets;
10143     RetTys.swap(OldRetTys);
10144     Offsets.swap(OldOffsets);
10145 
10146     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10147       EVT RetVT = OldRetTys[i];
10148       uint64_t Offset = OldOffsets[i];
10149       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10150       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10151       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10152       RetTys.append(NumRegs, RegisterVT);
10153       for (unsigned j = 0; j != NumRegs; ++j)
10154         Offsets.push_back(Offset + j * RegisterVTByteSZ);
10155     }
10156   }
10157 
10158   SmallVector<ISD::OutputArg, 4> Outs;
10159   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10160 
10161   bool CanLowerReturn =
10162       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10163                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10164 
10165   SDValue DemoteStackSlot;
10166   int DemoteStackIdx = -100;
10167   if (!CanLowerReturn) {
10168     // FIXME: equivalent assert?
10169     // assert(!CS.hasInAllocaArgument() &&
10170     //        "sret demotion is incompatible with inalloca");
10171     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10172     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10173     MachineFunction &MF = CLI.DAG.getMachineFunction();
10174     DemoteStackIdx =
10175         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10176     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10177                                               DL.getAllocaAddrSpace());
10178 
10179     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10180     ArgListEntry Entry;
10181     Entry.Node = DemoteStackSlot;
10182     Entry.Ty = StackSlotPtrType;
10183     Entry.IsSExt = false;
10184     Entry.IsZExt = false;
10185     Entry.IsInReg = false;
10186     Entry.IsSRet = true;
10187     Entry.IsNest = false;
10188     Entry.IsByVal = false;
10189     Entry.IsByRef = false;
10190     Entry.IsReturned = false;
10191     Entry.IsSwiftSelf = false;
10192     Entry.IsSwiftAsync = false;
10193     Entry.IsSwiftError = false;
10194     Entry.IsCFGuardTarget = false;
10195     Entry.Alignment = Alignment;
10196     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10197     CLI.NumFixedArgs += 1;
10198     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10199     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10200 
10201     // sret demotion isn't compatible with tail-calls, since the sret argument
10202     // points into the callers stack frame.
10203     CLI.IsTailCall = false;
10204   } else {
10205     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10206         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10207     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10208       ISD::ArgFlagsTy Flags;
10209       if (NeedsRegBlock) {
10210         Flags.setInConsecutiveRegs();
10211         if (I == RetTys.size() - 1)
10212           Flags.setInConsecutiveRegsLast();
10213       }
10214       EVT VT = RetTys[I];
10215       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10216                                                      CLI.CallConv, VT);
10217       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10218                                                        CLI.CallConv, VT);
10219       for (unsigned i = 0; i != NumRegs; ++i) {
10220         ISD::InputArg MyFlags;
10221         MyFlags.Flags = Flags;
10222         MyFlags.VT = RegisterVT;
10223         MyFlags.ArgVT = VT;
10224         MyFlags.Used = CLI.IsReturnValueUsed;
10225         if (CLI.RetTy->isPointerTy()) {
10226           MyFlags.Flags.setPointer();
10227           MyFlags.Flags.setPointerAddrSpace(
10228               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10229         }
10230         if (CLI.RetSExt)
10231           MyFlags.Flags.setSExt();
10232         if (CLI.RetZExt)
10233           MyFlags.Flags.setZExt();
10234         if (CLI.IsInReg)
10235           MyFlags.Flags.setInReg();
10236         CLI.Ins.push_back(MyFlags);
10237       }
10238     }
10239   }
10240 
10241   // We push in swifterror return as the last element of CLI.Ins.
10242   ArgListTy &Args = CLI.getArgs();
10243   if (supportSwiftError()) {
10244     for (const ArgListEntry &Arg : Args) {
10245       if (Arg.IsSwiftError) {
10246         ISD::InputArg MyFlags;
10247         MyFlags.VT = getPointerTy(DL);
10248         MyFlags.ArgVT = EVT(getPointerTy(DL));
10249         MyFlags.Flags.setSwiftError();
10250         CLI.Ins.push_back(MyFlags);
10251       }
10252     }
10253   }
10254 
10255   // Handle all of the outgoing arguments.
10256   CLI.Outs.clear();
10257   CLI.OutVals.clear();
10258   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10259     SmallVector<EVT, 4> ValueVTs;
10260     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10261     // FIXME: Split arguments if CLI.IsPostTypeLegalization
10262     Type *FinalType = Args[i].Ty;
10263     if (Args[i].IsByVal)
10264       FinalType = Args[i].IndirectType;
10265     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10266         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10267     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10268          ++Value) {
10269       EVT VT = ValueVTs[Value];
10270       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10271       SDValue Op = SDValue(Args[i].Node.getNode(),
10272                            Args[i].Node.getResNo() + Value);
10273       ISD::ArgFlagsTy Flags;
10274 
10275       // Certain targets (such as MIPS), may have a different ABI alignment
10276       // for a type depending on the context. Give the target a chance to
10277       // specify the alignment it wants.
10278       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10279       Flags.setOrigAlign(OriginalAlignment);
10280 
10281       if (Args[i].Ty->isPointerTy()) {
10282         Flags.setPointer();
10283         Flags.setPointerAddrSpace(
10284             cast<PointerType>(Args[i].Ty)->getAddressSpace());
10285       }
10286       if (Args[i].IsZExt)
10287         Flags.setZExt();
10288       if (Args[i].IsSExt)
10289         Flags.setSExt();
10290       if (Args[i].IsInReg) {
10291         // If we are using vectorcall calling convention, a structure that is
10292         // passed InReg - is surely an HVA
10293         if (CLI.CallConv == CallingConv::X86_VectorCall &&
10294             isa<StructType>(FinalType)) {
10295           // The first value of a structure is marked
10296           if (0 == Value)
10297             Flags.setHvaStart();
10298           Flags.setHva();
10299         }
10300         // Set InReg Flag
10301         Flags.setInReg();
10302       }
10303       if (Args[i].IsSRet)
10304         Flags.setSRet();
10305       if (Args[i].IsSwiftSelf)
10306         Flags.setSwiftSelf();
10307       if (Args[i].IsSwiftAsync)
10308         Flags.setSwiftAsync();
10309       if (Args[i].IsSwiftError)
10310         Flags.setSwiftError();
10311       if (Args[i].IsCFGuardTarget)
10312         Flags.setCFGuardTarget();
10313       if (Args[i].IsByVal)
10314         Flags.setByVal();
10315       if (Args[i].IsByRef)
10316         Flags.setByRef();
10317       if (Args[i].IsPreallocated) {
10318         Flags.setPreallocated();
10319         // Set the byval flag for CCAssignFn callbacks that don't know about
10320         // preallocated.  This way we can know how many bytes we should've
10321         // allocated and how many bytes a callee cleanup function will pop.  If
10322         // we port preallocated to more targets, we'll have to add custom
10323         // preallocated handling in the various CC lowering callbacks.
10324         Flags.setByVal();
10325       }
10326       if (Args[i].IsInAlloca) {
10327         Flags.setInAlloca();
10328         // Set the byval flag for CCAssignFn callbacks that don't know about
10329         // inalloca.  This way we can know how many bytes we should've allocated
10330         // and how many bytes a callee cleanup function will pop.  If we port
10331         // inalloca to more targets, we'll have to add custom inalloca handling
10332         // in the various CC lowering callbacks.
10333         Flags.setByVal();
10334       }
10335       Align MemAlign;
10336       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10337         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10338         Flags.setByValSize(FrameSize);
10339 
10340         // info is not there but there are cases it cannot get right.
10341         if (auto MA = Args[i].Alignment)
10342           MemAlign = *MA;
10343         else
10344           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10345       } else if (auto MA = Args[i].Alignment) {
10346         MemAlign = *MA;
10347       } else {
10348         MemAlign = OriginalAlignment;
10349       }
10350       Flags.setMemAlign(MemAlign);
10351       if (Args[i].IsNest)
10352         Flags.setNest();
10353       if (NeedsRegBlock)
10354         Flags.setInConsecutiveRegs();
10355 
10356       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10357                                                  CLI.CallConv, VT);
10358       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10359                                                         CLI.CallConv, VT);
10360       SmallVector<SDValue, 4> Parts(NumParts);
10361       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10362 
10363       if (Args[i].IsSExt)
10364         ExtendKind = ISD::SIGN_EXTEND;
10365       else if (Args[i].IsZExt)
10366         ExtendKind = ISD::ZERO_EXTEND;
10367 
10368       // Conservatively only handle 'returned' on non-vectors that can be lowered,
10369       // for now.
10370       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10371           CanLowerReturn) {
10372         assert((CLI.RetTy == Args[i].Ty ||
10373                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10374                  CLI.RetTy->getPointerAddressSpace() ==
10375                      Args[i].Ty->getPointerAddressSpace())) &&
10376                RetTys.size() == NumValues && "unexpected use of 'returned'");
10377         // Before passing 'returned' to the target lowering code, ensure that
10378         // either the register MVT and the actual EVT are the same size or that
10379         // the return value and argument are extended in the same way; in these
10380         // cases it's safe to pass the argument register value unchanged as the
10381         // return register value (although it's at the target's option whether
10382         // to do so)
10383         // TODO: allow code generation to take advantage of partially preserved
10384         // registers rather than clobbering the entire register when the
10385         // parameter extension method is not compatible with the return
10386         // extension method
10387         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10388             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10389              CLI.RetZExt == Args[i].IsZExt))
10390           Flags.setReturned();
10391       }
10392 
10393       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10394                      CLI.CallConv, ExtendKind);
10395 
10396       for (unsigned j = 0; j != NumParts; ++j) {
10397         // if it isn't first piece, alignment must be 1
10398         // For scalable vectors the scalable part is currently handled
10399         // by individual targets, so we just use the known minimum size here.
10400         ISD::OutputArg MyFlags(
10401             Flags, Parts[j].getValueType().getSimpleVT(), VT,
10402             i < CLI.NumFixedArgs, i,
10403             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10404         if (NumParts > 1 && j == 0)
10405           MyFlags.Flags.setSplit();
10406         else if (j != 0) {
10407           MyFlags.Flags.setOrigAlign(Align(1));
10408           if (j == NumParts - 1)
10409             MyFlags.Flags.setSplitEnd();
10410         }
10411 
10412         CLI.Outs.push_back(MyFlags);
10413         CLI.OutVals.push_back(Parts[j]);
10414       }
10415 
10416       if (NeedsRegBlock && Value == NumValues - 1)
10417         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10418     }
10419   }
10420 
10421   SmallVector<SDValue, 4> InVals;
10422   CLI.Chain = LowerCall(CLI, InVals);
10423 
10424   // Update CLI.InVals to use outside of this function.
10425   CLI.InVals = InVals;
10426 
10427   // Verify that the target's LowerCall behaved as expected.
10428   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10429          "LowerCall didn't return a valid chain!");
10430   assert((!CLI.IsTailCall || InVals.empty()) &&
10431          "LowerCall emitted a return value for a tail call!");
10432   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10433          "LowerCall didn't emit the correct number of values!");
10434 
10435   // For a tail call, the return value is merely live-out and there aren't
10436   // any nodes in the DAG representing it. Return a special value to
10437   // indicate that a tail call has been emitted and no more Instructions
10438   // should be processed in the current block.
10439   if (CLI.IsTailCall) {
10440     CLI.DAG.setRoot(CLI.Chain);
10441     return std::make_pair(SDValue(), SDValue());
10442   }
10443 
10444 #ifndef NDEBUG
10445   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10446     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10447     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10448            "LowerCall emitted a value with the wrong type!");
10449   }
10450 #endif
10451 
10452   SmallVector<SDValue, 4> ReturnValues;
10453   if (!CanLowerReturn) {
10454     // The instruction result is the result of loading from the
10455     // hidden sret parameter.
10456     SmallVector<EVT, 1> PVTs;
10457     Type *PtrRetTy =
10458         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10459 
10460     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10461     assert(PVTs.size() == 1 && "Pointers should fit in one register");
10462     EVT PtrVT = PVTs[0];
10463 
10464     unsigned NumValues = RetTys.size();
10465     ReturnValues.resize(NumValues);
10466     SmallVector<SDValue, 4> Chains(NumValues);
10467 
10468     // An aggregate return value cannot wrap around the address space, so
10469     // offsets to its parts don't wrap either.
10470     SDNodeFlags Flags;
10471     Flags.setNoUnsignedWrap(true);
10472 
10473     MachineFunction &MF = CLI.DAG.getMachineFunction();
10474     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10475     for (unsigned i = 0; i < NumValues; ++i) {
10476       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10477                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
10478                                                         PtrVT), Flags);
10479       SDValue L = CLI.DAG.getLoad(
10480           RetTys[i], CLI.DL, CLI.Chain, Add,
10481           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10482                                             DemoteStackIdx, Offsets[i]),
10483           HiddenSRetAlign);
10484       ReturnValues[i] = L;
10485       Chains[i] = L.getValue(1);
10486     }
10487 
10488     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10489   } else {
10490     // Collect the legal value parts into potentially illegal values
10491     // that correspond to the original function's return values.
10492     std::optional<ISD::NodeType> AssertOp;
10493     if (CLI.RetSExt)
10494       AssertOp = ISD::AssertSext;
10495     else if (CLI.RetZExt)
10496       AssertOp = ISD::AssertZext;
10497     unsigned CurReg = 0;
10498     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10499       EVT VT = RetTys[I];
10500       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10501                                                      CLI.CallConv, VT);
10502       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10503                                                        CLI.CallConv, VT);
10504 
10505       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
10506                                               NumRegs, RegisterVT, VT, nullptr,
10507                                               CLI.CallConv, AssertOp));
10508       CurReg += NumRegs;
10509     }
10510 
10511     // For a function returning void, there is no return value. We can't create
10512     // such a node, so we just return a null return value in that case. In
10513     // that case, nothing will actually look at the value.
10514     if (ReturnValues.empty())
10515       return std::make_pair(SDValue(), CLI.Chain);
10516   }
10517 
10518   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10519                                 CLI.DAG.getVTList(RetTys), ReturnValues);
10520   return std::make_pair(Res, CLI.Chain);
10521 }
10522 
10523 /// Places new result values for the node in Results (their number
10524 /// and types must exactly match those of the original return values of
10525 /// the node), or leaves Results empty, which indicates that the node is not
10526 /// to be custom lowered after all.
10527 void TargetLowering::LowerOperationWrapper(SDNode *N,
10528                                            SmallVectorImpl<SDValue> &Results,
10529                                            SelectionDAG &DAG) const {
10530   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10531 
10532   if (!Res.getNode())
10533     return;
10534 
10535   // If the original node has one result, take the return value from
10536   // LowerOperation as is. It might not be result number 0.
10537   if (N->getNumValues() == 1) {
10538     Results.push_back(Res);
10539     return;
10540   }
10541 
10542   // If the original node has multiple results, then the return node should
10543   // have the same number of results.
10544   assert((N->getNumValues() == Res->getNumValues()) &&
10545       "Lowering returned the wrong number of results!");
10546 
10547   // Places new result values base on N result number.
10548   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10549     Results.push_back(Res.getValue(I));
10550 }
10551 
10552 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10553   llvm_unreachable("LowerOperation not implemented for this target!");
10554 }
10555 
10556 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10557                                                      unsigned Reg,
10558                                                      ISD::NodeType ExtendType) {
10559   SDValue Op = getNonRegisterValue(V);
10560   assert((Op.getOpcode() != ISD::CopyFromReg ||
10561           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10562          "Copy from a reg to the same reg!");
10563   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10564 
10565   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10566   // If this is an InlineAsm we have to match the registers required, not the
10567   // notional registers required by the type.
10568 
10569   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10570                    std::nullopt); // This is not an ABI copy.
10571   SDValue Chain = DAG.getEntryNode();
10572 
10573   if (ExtendType == ISD::ANY_EXTEND) {
10574     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10575     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10576       ExtendType = PreferredExtendIt->second;
10577   }
10578   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10579   PendingExports.push_back(Chain);
10580 }
10581 
10582 #include "llvm/CodeGen/SelectionDAGISel.h"
10583 
10584 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10585 /// entry block, return true.  This includes arguments used by switches, since
10586 /// the switch may expand into multiple basic blocks.
10587 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10588   // With FastISel active, we may be splitting blocks, so force creation
10589   // of virtual registers for all non-dead arguments.
10590   if (FastISel)
10591     return A->use_empty();
10592 
10593   const BasicBlock &Entry = A->getParent()->front();
10594   for (const User *U : A->users())
10595     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10596       return false;  // Use not in entry block.
10597 
10598   return true;
10599 }
10600 
10601 using ArgCopyElisionMapTy =
10602     DenseMap<const Argument *,
10603              std::pair<const AllocaInst *, const StoreInst *>>;
10604 
10605 /// Scan the entry block of the function in FuncInfo for arguments that look
10606 /// like copies into a local alloca. Record any copied arguments in
10607 /// ArgCopyElisionCandidates.
10608 static void
10609 findArgumentCopyElisionCandidates(const DataLayout &DL,
10610                                   FunctionLoweringInfo *FuncInfo,
10611                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10612   // Record the state of every static alloca used in the entry block. Argument
10613   // allocas are all used in the entry block, so we need approximately as many
10614   // entries as we have arguments.
10615   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10616   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10617   unsigned NumArgs = FuncInfo->Fn->arg_size();
10618   StaticAllocas.reserve(NumArgs * 2);
10619 
10620   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10621     if (!V)
10622       return nullptr;
10623     V = V->stripPointerCasts();
10624     const auto *AI = dyn_cast<AllocaInst>(V);
10625     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10626       return nullptr;
10627     auto Iter = StaticAllocas.insert({AI, Unknown});
10628     return &Iter.first->second;
10629   };
10630 
10631   // Look for stores of arguments to static allocas. Look through bitcasts and
10632   // GEPs to handle type coercions, as long as the alloca is fully initialized
10633   // by the store. Any non-store use of an alloca escapes it and any subsequent
10634   // unanalyzed store might write it.
10635   // FIXME: Handle structs initialized with multiple stores.
10636   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10637     // Look for stores, and handle non-store uses conservatively.
10638     const auto *SI = dyn_cast<StoreInst>(&I);
10639     if (!SI) {
10640       // We will look through cast uses, so ignore them completely.
10641       if (I.isCast())
10642         continue;
10643       // Ignore debug info and pseudo op intrinsics, they don't escape or store
10644       // to allocas.
10645       if (I.isDebugOrPseudoInst())
10646         continue;
10647       // This is an unknown instruction. Assume it escapes or writes to all
10648       // static alloca operands.
10649       for (const Use &U : I.operands()) {
10650         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10651           *Info = StaticAllocaInfo::Clobbered;
10652       }
10653       continue;
10654     }
10655 
10656     // If the stored value is a static alloca, mark it as escaped.
10657     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10658       *Info = StaticAllocaInfo::Clobbered;
10659 
10660     // Check if the destination is a static alloca.
10661     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10662     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10663     if (!Info)
10664       continue;
10665     const AllocaInst *AI = cast<AllocaInst>(Dst);
10666 
10667     // Skip allocas that have been initialized or clobbered.
10668     if (*Info != StaticAllocaInfo::Unknown)
10669       continue;
10670 
10671     // Check if the stored value is an argument, and that this store fully
10672     // initializes the alloca.
10673     // If the argument type has padding bits we can't directly forward a pointer
10674     // as the upper bits may contain garbage.
10675     // Don't elide copies from the same argument twice.
10676     const Value *Val = SI->getValueOperand()->stripPointerCasts();
10677     const auto *Arg = dyn_cast<Argument>(Val);
10678     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10679         Arg->getType()->isEmptyTy() ||
10680         DL.getTypeStoreSize(Arg->getType()) !=
10681             DL.getTypeAllocSize(AI->getAllocatedType()) ||
10682         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10683         ArgCopyElisionCandidates.count(Arg)) {
10684       *Info = StaticAllocaInfo::Clobbered;
10685       continue;
10686     }
10687 
10688     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10689                       << '\n');
10690 
10691     // Mark this alloca and store for argument copy elision.
10692     *Info = StaticAllocaInfo::Elidable;
10693     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10694 
10695     // Stop scanning if we've seen all arguments. This will happen early in -O0
10696     // builds, which is useful, because -O0 builds have large entry blocks and
10697     // many allocas.
10698     if (ArgCopyElisionCandidates.size() == NumArgs)
10699       break;
10700   }
10701 }
10702 
10703 /// Try to elide argument copies from memory into a local alloca. Succeeds if
10704 /// ArgVal is a load from a suitable fixed stack object.
10705 static void tryToElideArgumentCopy(
10706     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10707     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10708     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10709     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10710     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10711   // Check if this is a load from a fixed stack object.
10712   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10713   if (!LNode)
10714     return;
10715   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10716   if (!FINode)
10717     return;
10718 
10719   // Check that the fixed stack object is the right size and alignment.
10720   // Look at the alignment that the user wrote on the alloca instead of looking
10721   // at the stack object.
10722   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10723   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10724   const AllocaInst *AI = ArgCopyIter->second.first;
10725   int FixedIndex = FINode->getIndex();
10726   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10727   int OldIndex = AllocaIndex;
10728   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10729   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10730     LLVM_DEBUG(
10731         dbgs() << "  argument copy elision failed due to bad fixed stack "
10732                   "object size\n");
10733     return;
10734   }
10735   Align RequiredAlignment = AI->getAlign();
10736   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10737     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10738                          "greater than stack argument alignment ("
10739                       << DebugStr(RequiredAlignment) << " vs "
10740                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10741     return;
10742   }
10743 
10744   // Perform the elision. Delete the old stack object and replace its only use
10745   // in the variable info map. Mark the stack object as mutable.
10746   LLVM_DEBUG({
10747     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10748            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10749            << '\n';
10750   });
10751   MFI.RemoveStackObject(OldIndex);
10752   MFI.setIsImmutableObjectIndex(FixedIndex, false);
10753   AllocaIndex = FixedIndex;
10754   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10755   for (SDValue ArgVal : ArgVals)
10756     Chains.push_back(ArgVal.getValue(1));
10757 
10758   // Avoid emitting code for the store implementing the copy.
10759   const StoreInst *SI = ArgCopyIter->second.second;
10760   ElidedArgCopyInstrs.insert(SI);
10761 
10762   // Check for uses of the argument again so that we can avoid exporting ArgVal
10763   // if it is't used by anything other than the store.
10764   for (const Value *U : Arg.users()) {
10765     if (U != SI) {
10766       ArgHasUses = true;
10767       break;
10768     }
10769   }
10770 }
10771 
10772 void SelectionDAGISel::LowerArguments(const Function &F) {
10773   SelectionDAG &DAG = SDB->DAG;
10774   SDLoc dl = SDB->getCurSDLoc();
10775   const DataLayout &DL = DAG.getDataLayout();
10776   SmallVector<ISD::InputArg, 16> Ins;
10777 
10778   // In Naked functions we aren't going to save any registers.
10779   if (F.hasFnAttribute(Attribute::Naked))
10780     return;
10781 
10782   if (!FuncInfo->CanLowerReturn) {
10783     // Put in an sret pointer parameter before all the other parameters.
10784     SmallVector<EVT, 1> ValueVTs;
10785     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10786                     PointerType::get(F.getContext(),
10787                                      DAG.getDataLayout().getAllocaAddrSpace()),
10788                     ValueVTs);
10789 
10790     // NOTE: Assuming that a pointer will never break down to more than one VT
10791     // or one register.
10792     ISD::ArgFlagsTy Flags;
10793     Flags.setSRet();
10794     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10795     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10796                          ISD::InputArg::NoArgIndex, 0);
10797     Ins.push_back(RetArg);
10798   }
10799 
10800   // Look for stores of arguments to static allocas. Mark such arguments with a
10801   // flag to ask the target to give us the memory location of that argument if
10802   // available.
10803   ArgCopyElisionMapTy ArgCopyElisionCandidates;
10804   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10805                                     ArgCopyElisionCandidates);
10806 
10807   // Set up the incoming argument description vector.
10808   for (const Argument &Arg : F.args()) {
10809     unsigned ArgNo = Arg.getArgNo();
10810     SmallVector<EVT, 4> ValueVTs;
10811     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10812     bool isArgValueUsed = !Arg.use_empty();
10813     unsigned PartBase = 0;
10814     Type *FinalType = Arg.getType();
10815     if (Arg.hasAttribute(Attribute::ByVal))
10816       FinalType = Arg.getParamByValType();
10817     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10818         FinalType, F.getCallingConv(), F.isVarArg(), DL);
10819     for (unsigned Value = 0, NumValues = ValueVTs.size();
10820          Value != NumValues; ++Value) {
10821       EVT VT = ValueVTs[Value];
10822       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10823       ISD::ArgFlagsTy Flags;
10824 
10825 
10826       if (Arg.getType()->isPointerTy()) {
10827         Flags.setPointer();
10828         Flags.setPointerAddrSpace(
10829             cast<PointerType>(Arg.getType())->getAddressSpace());
10830       }
10831       if (Arg.hasAttribute(Attribute::ZExt))
10832         Flags.setZExt();
10833       if (Arg.hasAttribute(Attribute::SExt))
10834         Flags.setSExt();
10835       if (Arg.hasAttribute(Attribute::InReg)) {
10836         // If we are using vectorcall calling convention, a structure that is
10837         // passed InReg - is surely an HVA
10838         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10839             isa<StructType>(Arg.getType())) {
10840           // The first value of a structure is marked
10841           if (0 == Value)
10842             Flags.setHvaStart();
10843           Flags.setHva();
10844         }
10845         // Set InReg Flag
10846         Flags.setInReg();
10847       }
10848       if (Arg.hasAttribute(Attribute::StructRet))
10849         Flags.setSRet();
10850       if (Arg.hasAttribute(Attribute::SwiftSelf))
10851         Flags.setSwiftSelf();
10852       if (Arg.hasAttribute(Attribute::SwiftAsync))
10853         Flags.setSwiftAsync();
10854       if (Arg.hasAttribute(Attribute::SwiftError))
10855         Flags.setSwiftError();
10856       if (Arg.hasAttribute(Attribute::ByVal))
10857         Flags.setByVal();
10858       if (Arg.hasAttribute(Attribute::ByRef))
10859         Flags.setByRef();
10860       if (Arg.hasAttribute(Attribute::InAlloca)) {
10861         Flags.setInAlloca();
10862         // Set the byval flag for CCAssignFn callbacks that don't know about
10863         // inalloca.  This way we can know how many bytes we should've allocated
10864         // and how many bytes a callee cleanup function will pop.  If we port
10865         // inalloca to more targets, we'll have to add custom inalloca handling
10866         // in the various CC lowering callbacks.
10867         Flags.setByVal();
10868       }
10869       if (Arg.hasAttribute(Attribute::Preallocated)) {
10870         Flags.setPreallocated();
10871         // Set the byval flag for CCAssignFn callbacks that don't know about
10872         // preallocated.  This way we can know how many bytes we should've
10873         // allocated and how many bytes a callee cleanup function will pop.  If
10874         // we port preallocated to more targets, we'll have to add custom
10875         // preallocated handling in the various CC lowering callbacks.
10876         Flags.setByVal();
10877       }
10878 
10879       // Certain targets (such as MIPS), may have a different ABI alignment
10880       // for a type depending on the context. Give the target a chance to
10881       // specify the alignment it wants.
10882       const Align OriginalAlignment(
10883           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
10884       Flags.setOrigAlign(OriginalAlignment);
10885 
10886       Align MemAlign;
10887       Type *ArgMemTy = nullptr;
10888       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
10889           Flags.isByRef()) {
10890         if (!ArgMemTy)
10891           ArgMemTy = Arg.getPointeeInMemoryValueType();
10892 
10893         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
10894 
10895         // For in-memory arguments, size and alignment should be passed from FE.
10896         // BE will guess if this info is not there but there are cases it cannot
10897         // get right.
10898         if (auto ParamAlign = Arg.getParamStackAlign())
10899           MemAlign = *ParamAlign;
10900         else if ((ParamAlign = Arg.getParamAlign()))
10901           MemAlign = *ParamAlign;
10902         else
10903           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
10904         if (Flags.isByRef())
10905           Flags.setByRefSize(MemSize);
10906         else
10907           Flags.setByValSize(MemSize);
10908       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
10909         MemAlign = *ParamAlign;
10910       } else {
10911         MemAlign = OriginalAlignment;
10912       }
10913       Flags.setMemAlign(MemAlign);
10914 
10915       if (Arg.hasAttribute(Attribute::Nest))
10916         Flags.setNest();
10917       if (NeedsRegBlock)
10918         Flags.setInConsecutiveRegs();
10919       if (ArgCopyElisionCandidates.count(&Arg))
10920         Flags.setCopyElisionCandidate();
10921       if (Arg.hasAttribute(Attribute::Returned))
10922         Flags.setReturned();
10923 
10924       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
10925           *CurDAG->getContext(), F.getCallingConv(), VT);
10926       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
10927           *CurDAG->getContext(), F.getCallingConv(), VT);
10928       for (unsigned i = 0; i != NumRegs; ++i) {
10929         // For scalable vectors, use the minimum size; individual targets
10930         // are responsible for handling scalable vector arguments and
10931         // return values.
10932         ISD::InputArg MyFlags(
10933             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
10934             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
10935         if (NumRegs > 1 && i == 0)
10936           MyFlags.Flags.setSplit();
10937         // if it isn't first piece, alignment must be 1
10938         else if (i > 0) {
10939           MyFlags.Flags.setOrigAlign(Align(1));
10940           if (i == NumRegs - 1)
10941             MyFlags.Flags.setSplitEnd();
10942         }
10943         Ins.push_back(MyFlags);
10944       }
10945       if (NeedsRegBlock && Value == NumValues - 1)
10946         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
10947       PartBase += VT.getStoreSize().getKnownMinValue();
10948     }
10949   }
10950 
10951   // Call the target to set up the argument values.
10952   SmallVector<SDValue, 8> InVals;
10953   SDValue NewRoot = TLI->LowerFormalArguments(
10954       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
10955 
10956   // Verify that the target's LowerFormalArguments behaved as expected.
10957   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
10958          "LowerFormalArguments didn't return a valid chain!");
10959   assert(InVals.size() == Ins.size() &&
10960          "LowerFormalArguments didn't emit the correct number of values!");
10961   LLVM_DEBUG({
10962     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
10963       assert(InVals[i].getNode() &&
10964              "LowerFormalArguments emitted a null value!");
10965       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
10966              "LowerFormalArguments emitted a value with the wrong type!");
10967     }
10968   });
10969 
10970   // Update the DAG with the new chain value resulting from argument lowering.
10971   DAG.setRoot(NewRoot);
10972 
10973   // Set up the argument values.
10974   unsigned i = 0;
10975   if (!FuncInfo->CanLowerReturn) {
10976     // Create a virtual register for the sret pointer, and put in a copy
10977     // from the sret argument into it.
10978     SmallVector<EVT, 1> ValueVTs;
10979     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10980                     PointerType::get(F.getContext(),
10981                                      DAG.getDataLayout().getAllocaAddrSpace()),
10982                     ValueVTs);
10983     MVT VT = ValueVTs[0].getSimpleVT();
10984     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
10985     std::optional<ISD::NodeType> AssertOp;
10986     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
10987                                         nullptr, F.getCallingConv(), AssertOp);
10988 
10989     MachineFunction& MF = SDB->DAG.getMachineFunction();
10990     MachineRegisterInfo& RegInfo = MF.getRegInfo();
10991     Register SRetReg =
10992         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
10993     FuncInfo->DemoteRegister = SRetReg;
10994     NewRoot =
10995         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
10996     DAG.setRoot(NewRoot);
10997 
10998     // i indexes lowered arguments.  Bump it past the hidden sret argument.
10999     ++i;
11000   }
11001 
11002   SmallVector<SDValue, 4> Chains;
11003   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11004   for (const Argument &Arg : F.args()) {
11005     SmallVector<SDValue, 4> ArgValues;
11006     SmallVector<EVT, 4> ValueVTs;
11007     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11008     unsigned NumValues = ValueVTs.size();
11009     if (NumValues == 0)
11010       continue;
11011 
11012     bool ArgHasUses = !Arg.use_empty();
11013 
11014     // Elide the copying store if the target loaded this argument from a
11015     // suitable fixed stack object.
11016     if (Ins[i].Flags.isCopyElisionCandidate()) {
11017       unsigned NumParts = 0;
11018       for (EVT VT : ValueVTs)
11019         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11020                                                        F.getCallingConv(), VT);
11021 
11022       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11023                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11024                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11025     }
11026 
11027     // If this argument is unused then remember its value. It is used to generate
11028     // debugging information.
11029     bool isSwiftErrorArg =
11030         TLI->supportSwiftError() &&
11031         Arg.hasAttribute(Attribute::SwiftError);
11032     if (!ArgHasUses && !isSwiftErrorArg) {
11033       SDB->setUnusedArgValue(&Arg, InVals[i]);
11034 
11035       // Also remember any frame index for use in FastISel.
11036       if (FrameIndexSDNode *FI =
11037           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11038         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11039     }
11040 
11041     for (unsigned Val = 0; Val != NumValues; ++Val) {
11042       EVT VT = ValueVTs[Val];
11043       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11044                                                       F.getCallingConv(), VT);
11045       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11046           *CurDAG->getContext(), F.getCallingConv(), VT);
11047 
11048       // Even an apparent 'unused' swifterror argument needs to be returned. So
11049       // we do generate a copy for it that can be used on return from the
11050       // function.
11051       if (ArgHasUses || isSwiftErrorArg) {
11052         std::optional<ISD::NodeType> AssertOp;
11053         if (Arg.hasAttribute(Attribute::SExt))
11054           AssertOp = ISD::AssertSext;
11055         else if (Arg.hasAttribute(Attribute::ZExt))
11056           AssertOp = ISD::AssertZext;
11057 
11058         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11059                                              PartVT, VT, nullptr,
11060                                              F.getCallingConv(), AssertOp));
11061       }
11062 
11063       i += NumParts;
11064     }
11065 
11066     // We don't need to do anything else for unused arguments.
11067     if (ArgValues.empty())
11068       continue;
11069 
11070     // Note down frame index.
11071     if (FrameIndexSDNode *FI =
11072         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11073       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11074 
11075     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11076                                      SDB->getCurSDLoc());
11077 
11078     SDB->setValue(&Arg, Res);
11079     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11080       // We want to associate the argument with the frame index, among
11081       // involved operands, that correspond to the lowest address. The
11082       // getCopyFromParts function, called earlier, is swapping the order of
11083       // the operands to BUILD_PAIR depending on endianness. The result of
11084       // that swapping is that the least significant bits of the argument will
11085       // be in the first operand of the BUILD_PAIR node, and the most
11086       // significant bits will be in the second operand.
11087       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11088       if (LoadSDNode *LNode =
11089           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11090         if (FrameIndexSDNode *FI =
11091             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11092           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11093     }
11094 
11095     // Analyses past this point are naive and don't expect an assertion.
11096     if (Res.getOpcode() == ISD::AssertZext)
11097       Res = Res.getOperand(0);
11098 
11099     // Update the SwiftErrorVRegDefMap.
11100     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11101       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11102       if (Register::isVirtualRegister(Reg))
11103         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11104                                    Reg);
11105     }
11106 
11107     // If this argument is live outside of the entry block, insert a copy from
11108     // wherever we got it to the vreg that other BB's will reference it as.
11109     if (Res.getOpcode() == ISD::CopyFromReg) {
11110       // If we can, though, try to skip creating an unnecessary vreg.
11111       // FIXME: This isn't very clean... it would be nice to make this more
11112       // general.
11113       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11114       if (Register::isVirtualRegister(Reg)) {
11115         FuncInfo->ValueMap[&Arg] = Reg;
11116         continue;
11117       }
11118     }
11119     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11120       FuncInfo->InitializeRegForValue(&Arg);
11121       SDB->CopyToExportRegsIfNeeded(&Arg);
11122     }
11123   }
11124 
11125   if (!Chains.empty()) {
11126     Chains.push_back(NewRoot);
11127     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11128   }
11129 
11130   DAG.setRoot(NewRoot);
11131 
11132   assert(i == InVals.size() && "Argument register count mismatch!");
11133 
11134   // If any argument copy elisions occurred and we have debug info, update the
11135   // stale frame indices used in the dbg.declare variable info table.
11136   if (!ArgCopyElisionFrameIndexMap.empty()) {
11137     for (MachineFunction::VariableDbgInfo &VI :
11138          MF->getInStackSlotVariableDbgInfo()) {
11139       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11140       if (I != ArgCopyElisionFrameIndexMap.end())
11141         VI.updateStackSlot(I->second);
11142     }
11143   }
11144 
11145   // Finally, if the target has anything special to do, allow it to do so.
11146   emitFunctionEntryCode();
11147 }
11148 
11149 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11150 /// ensure constants are generated when needed.  Remember the virtual registers
11151 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11152 /// directly add them, because expansion might result in multiple MBB's for one
11153 /// BB.  As such, the start of the BB might correspond to a different MBB than
11154 /// the end.
11155 void
11156 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11157   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11158 
11159   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11160 
11161   // Check PHI nodes in successors that expect a value to be available from this
11162   // block.
11163   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11164     if (!isa<PHINode>(SuccBB->begin())) continue;
11165     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11166 
11167     // If this terminator has multiple identical successors (common for
11168     // switches), only handle each succ once.
11169     if (!SuccsHandled.insert(SuccMBB).second)
11170       continue;
11171 
11172     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11173 
11174     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11175     // nodes and Machine PHI nodes, but the incoming operands have not been
11176     // emitted yet.
11177     for (const PHINode &PN : SuccBB->phis()) {
11178       // Ignore dead phi's.
11179       if (PN.use_empty())
11180         continue;
11181 
11182       // Skip empty types
11183       if (PN.getType()->isEmptyTy())
11184         continue;
11185 
11186       unsigned Reg;
11187       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11188 
11189       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11190         unsigned &RegOut = ConstantsOut[C];
11191         if (RegOut == 0) {
11192           RegOut = FuncInfo.CreateRegs(C);
11193           // We need to zero/sign extend ConstantInt phi operands to match
11194           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11195           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11196           if (auto *CI = dyn_cast<ConstantInt>(C))
11197             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11198                                                     : ISD::ZERO_EXTEND;
11199           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11200         }
11201         Reg = RegOut;
11202       } else {
11203         DenseMap<const Value *, Register>::iterator I =
11204           FuncInfo.ValueMap.find(PHIOp);
11205         if (I != FuncInfo.ValueMap.end())
11206           Reg = I->second;
11207         else {
11208           assert(isa<AllocaInst>(PHIOp) &&
11209                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11210                  "Didn't codegen value into a register!??");
11211           Reg = FuncInfo.CreateRegs(PHIOp);
11212           CopyValueToVirtualRegister(PHIOp, Reg);
11213         }
11214       }
11215 
11216       // Remember that this register needs to added to the machine PHI node as
11217       // the input for this MBB.
11218       SmallVector<EVT, 4> ValueVTs;
11219       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11220       for (EVT VT : ValueVTs) {
11221         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11222         for (unsigned i = 0; i != NumRegisters; ++i)
11223           FuncInfo.PHINodesToUpdate.push_back(
11224               std::make_pair(&*MBBI++, Reg + i));
11225         Reg += NumRegisters;
11226       }
11227     }
11228   }
11229 
11230   ConstantsOut.clear();
11231 }
11232 
11233 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11234   MachineFunction::iterator I(MBB);
11235   if (++I == FuncInfo.MF->end())
11236     return nullptr;
11237   return &*I;
11238 }
11239 
11240 /// During lowering new call nodes can be created (such as memset, etc.).
11241 /// Those will become new roots of the current DAG, but complications arise
11242 /// when they are tail calls. In such cases, the call lowering will update
11243 /// the root, but the builder still needs to know that a tail call has been
11244 /// lowered in order to avoid generating an additional return.
11245 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11246   // If the node is null, we do have a tail call.
11247   if (MaybeTC.getNode() != nullptr)
11248     DAG.setRoot(MaybeTC);
11249   else
11250     HasTailCall = true;
11251 }
11252 
11253 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11254                                         MachineBasicBlock *SwitchMBB,
11255                                         MachineBasicBlock *DefaultMBB) {
11256   MachineFunction *CurMF = FuncInfo.MF;
11257   MachineBasicBlock *NextMBB = nullptr;
11258   MachineFunction::iterator BBI(W.MBB);
11259   if (++BBI != FuncInfo.MF->end())
11260     NextMBB = &*BBI;
11261 
11262   unsigned Size = W.LastCluster - W.FirstCluster + 1;
11263 
11264   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11265 
11266   if (Size == 2 && W.MBB == SwitchMBB) {
11267     // If any two of the cases has the same destination, and if one value
11268     // is the same as the other, but has one bit unset that the other has set,
11269     // use bit manipulation to do two compares at once.  For example:
11270     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11271     // TODO: This could be extended to merge any 2 cases in switches with 3
11272     // cases.
11273     // TODO: Handle cases where W.CaseBB != SwitchBB.
11274     CaseCluster &Small = *W.FirstCluster;
11275     CaseCluster &Big = *W.LastCluster;
11276 
11277     if (Small.Low == Small.High && Big.Low == Big.High &&
11278         Small.MBB == Big.MBB) {
11279       const APInt &SmallValue = Small.Low->getValue();
11280       const APInt &BigValue = Big.Low->getValue();
11281 
11282       // Check that there is only one bit different.
11283       APInt CommonBit = BigValue ^ SmallValue;
11284       if (CommonBit.isPowerOf2()) {
11285         SDValue CondLHS = getValue(Cond);
11286         EVT VT = CondLHS.getValueType();
11287         SDLoc DL = getCurSDLoc();
11288 
11289         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11290                                  DAG.getConstant(CommonBit, DL, VT));
11291         SDValue Cond = DAG.getSetCC(
11292             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11293             ISD::SETEQ);
11294 
11295         // Update successor info.
11296         // Both Small and Big will jump to Small.BB, so we sum up the
11297         // probabilities.
11298         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11299         if (BPI)
11300           addSuccessorWithProb(
11301               SwitchMBB, DefaultMBB,
11302               // The default destination is the first successor in IR.
11303               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11304         else
11305           addSuccessorWithProb(SwitchMBB, DefaultMBB);
11306 
11307         // Insert the true branch.
11308         SDValue BrCond =
11309             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11310                         DAG.getBasicBlock(Small.MBB));
11311         // Insert the false branch.
11312         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11313                              DAG.getBasicBlock(DefaultMBB));
11314 
11315         DAG.setRoot(BrCond);
11316         return;
11317       }
11318     }
11319   }
11320 
11321   if (TM.getOptLevel() != CodeGenOptLevel::None) {
11322     // Here, we order cases by probability so the most likely case will be
11323     // checked first. However, two clusters can have the same probability in
11324     // which case their relative ordering is non-deterministic. So we use Low
11325     // as a tie-breaker as clusters are guaranteed to never overlap.
11326     llvm::sort(W.FirstCluster, W.LastCluster + 1,
11327                [](const CaseCluster &a, const CaseCluster &b) {
11328       return a.Prob != b.Prob ?
11329              a.Prob > b.Prob :
11330              a.Low->getValue().slt(b.Low->getValue());
11331     });
11332 
11333     // Rearrange the case blocks so that the last one falls through if possible
11334     // without changing the order of probabilities.
11335     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11336       --I;
11337       if (I->Prob > W.LastCluster->Prob)
11338         break;
11339       if (I->Kind == CC_Range && I->MBB == NextMBB) {
11340         std::swap(*I, *W.LastCluster);
11341         break;
11342       }
11343     }
11344   }
11345 
11346   // Compute total probability.
11347   BranchProbability DefaultProb = W.DefaultProb;
11348   BranchProbability UnhandledProbs = DefaultProb;
11349   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11350     UnhandledProbs += I->Prob;
11351 
11352   MachineBasicBlock *CurMBB = W.MBB;
11353   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11354     bool FallthroughUnreachable = false;
11355     MachineBasicBlock *Fallthrough;
11356     if (I == W.LastCluster) {
11357       // For the last cluster, fall through to the default destination.
11358       Fallthrough = DefaultMBB;
11359       FallthroughUnreachable = isa<UnreachableInst>(
11360           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11361     } else {
11362       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11363       CurMF->insert(BBI, Fallthrough);
11364       // Put Cond in a virtual register to make it available from the new blocks.
11365       ExportFromCurrentBlock(Cond);
11366     }
11367     UnhandledProbs -= I->Prob;
11368 
11369     switch (I->Kind) {
11370       case CC_JumpTable: {
11371         // FIXME: Optimize away range check based on pivot comparisons.
11372         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11373         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11374 
11375         // The jump block hasn't been inserted yet; insert it here.
11376         MachineBasicBlock *JumpMBB = JT->MBB;
11377         CurMF->insert(BBI, JumpMBB);
11378 
11379         auto JumpProb = I->Prob;
11380         auto FallthroughProb = UnhandledProbs;
11381 
11382         // If the default statement is a target of the jump table, we evenly
11383         // distribute the default probability to successors of CurMBB. Also
11384         // update the probability on the edge from JumpMBB to Fallthrough.
11385         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11386                                               SE = JumpMBB->succ_end();
11387              SI != SE; ++SI) {
11388           if (*SI == DefaultMBB) {
11389             JumpProb += DefaultProb / 2;
11390             FallthroughProb -= DefaultProb / 2;
11391             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11392             JumpMBB->normalizeSuccProbs();
11393             break;
11394           }
11395         }
11396 
11397         // If the default clause is unreachable, propagate that knowledge into
11398         // JTH->FallthroughUnreachable which will use it to suppress the range
11399         // check.
11400         //
11401         // However, don't do this if we're doing branch target enforcement,
11402         // because a table branch _without_ a range check can be a tempting JOP
11403         // gadget - out-of-bounds inputs that are impossible in correct
11404         // execution become possible again if an attacker can influence the
11405         // control flow. So if an attacker doesn't already have a BTI bypass
11406         // available, we don't want them to be able to get one out of this
11407         // table branch.
11408         if (FallthroughUnreachable) {
11409           Function &CurFunc = CurMF->getFunction();
11410           bool HasBranchTargetEnforcement = false;
11411           if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11412             HasBranchTargetEnforcement =
11413                 CurFunc.getFnAttribute("branch-target-enforcement")
11414                     .getValueAsBool();
11415           } else {
11416             HasBranchTargetEnforcement =
11417                 CurMF->getMMI().getModule()->getModuleFlag(
11418                     "branch-target-enforcement");
11419           }
11420           if (!HasBranchTargetEnforcement)
11421             JTH->FallthroughUnreachable = true;
11422         }
11423 
11424         if (!JTH->FallthroughUnreachable)
11425           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11426         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11427         CurMBB->normalizeSuccProbs();
11428 
11429         // The jump table header will be inserted in our current block, do the
11430         // range check, and fall through to our fallthrough block.
11431         JTH->HeaderBB = CurMBB;
11432         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11433 
11434         // If we're in the right place, emit the jump table header right now.
11435         if (CurMBB == SwitchMBB) {
11436           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11437           JTH->Emitted = true;
11438         }
11439         break;
11440       }
11441       case CC_BitTests: {
11442         // FIXME: Optimize away range check based on pivot comparisons.
11443         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11444 
11445         // The bit test blocks haven't been inserted yet; insert them here.
11446         for (BitTestCase &BTC : BTB->Cases)
11447           CurMF->insert(BBI, BTC.ThisBB);
11448 
11449         // Fill in fields of the BitTestBlock.
11450         BTB->Parent = CurMBB;
11451         BTB->Default = Fallthrough;
11452 
11453         BTB->DefaultProb = UnhandledProbs;
11454         // If the cases in bit test don't form a contiguous range, we evenly
11455         // distribute the probability on the edge to Fallthrough to two
11456         // successors of CurMBB.
11457         if (!BTB->ContiguousRange) {
11458           BTB->Prob += DefaultProb / 2;
11459           BTB->DefaultProb -= DefaultProb / 2;
11460         }
11461 
11462         if (FallthroughUnreachable)
11463           BTB->FallthroughUnreachable = true;
11464 
11465         // If we're in the right place, emit the bit test header right now.
11466         if (CurMBB == SwitchMBB) {
11467           visitBitTestHeader(*BTB, SwitchMBB);
11468           BTB->Emitted = true;
11469         }
11470         break;
11471       }
11472       case CC_Range: {
11473         const Value *RHS, *LHS, *MHS;
11474         ISD::CondCode CC;
11475         if (I->Low == I->High) {
11476           // Check Cond == I->Low.
11477           CC = ISD::SETEQ;
11478           LHS = Cond;
11479           RHS=I->Low;
11480           MHS = nullptr;
11481         } else {
11482           // Check I->Low <= Cond <= I->High.
11483           CC = ISD::SETLE;
11484           LHS = I->Low;
11485           MHS = Cond;
11486           RHS = I->High;
11487         }
11488 
11489         // If Fallthrough is unreachable, fold away the comparison.
11490         if (FallthroughUnreachable)
11491           CC = ISD::SETTRUE;
11492 
11493         // The false probability is the sum of all unhandled cases.
11494         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11495                      getCurSDLoc(), I->Prob, UnhandledProbs);
11496 
11497         if (CurMBB == SwitchMBB)
11498           visitSwitchCase(CB, SwitchMBB);
11499         else
11500           SL->SwitchCases.push_back(CB);
11501 
11502         break;
11503       }
11504     }
11505     CurMBB = Fallthrough;
11506   }
11507 }
11508 
11509 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
11510                                               CaseClusterIt First,
11511                                               CaseClusterIt Last) {
11512   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
11513     if (X.Prob != CC.Prob)
11514       return X.Prob > CC.Prob;
11515 
11516     // Ties are broken by comparing the case value.
11517     return X.Low->getValue().slt(CC.Low->getValue());
11518   });
11519 }
11520 
11521 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11522                                         const SwitchWorkListItem &W,
11523                                         Value *Cond,
11524                                         MachineBasicBlock *SwitchMBB) {
11525   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11526          "Clusters not sorted?");
11527 
11528   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11529 
11530   // Balance the tree based on branch probabilities to create a near-optimal (in
11531   // terms of search time given key frequency) binary search tree. See e.g. Kurt
11532   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
11533   CaseClusterIt LastLeft = W.FirstCluster;
11534   CaseClusterIt FirstRight = W.LastCluster;
11535   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
11536   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
11537 
11538   // Move LastLeft and FirstRight towards each other from opposite directions to
11539   // find a partitioning of the clusters which balances the probability on both
11540   // sides. If LeftProb and RightProb are equal, alternate which side is
11541   // taken to ensure 0-probability nodes are distributed evenly.
11542   unsigned I = 0;
11543   while (LastLeft + 1 < FirstRight) {
11544     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
11545       LeftProb += (++LastLeft)->Prob;
11546     else
11547       RightProb += (--FirstRight)->Prob;
11548     I++;
11549   }
11550 
11551   while (true) {
11552     // Our binary search tree differs from a typical BST in that ours can have up
11553     // to three values in each leaf. The pivot selection above doesn't take that
11554     // into account, which means the tree might require more nodes and be less
11555     // efficient. We compensate for this here.
11556 
11557     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
11558     unsigned NumRight = W.LastCluster - FirstRight + 1;
11559 
11560     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
11561       // If one side has less than 3 clusters, and the other has more than 3,
11562       // consider taking a cluster from the other side.
11563 
11564       if (NumLeft < NumRight) {
11565         // Consider moving the first cluster on the right to the left side.
11566         CaseCluster &CC = *FirstRight;
11567         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11568         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11569         if (LeftSideRank <= RightSideRank) {
11570           // Moving the cluster to the left does not demote it.
11571           ++LastLeft;
11572           ++FirstRight;
11573           continue;
11574         }
11575       } else {
11576         assert(NumRight < NumLeft);
11577         // Consider moving the last element on the left to the right side.
11578         CaseCluster &CC = *LastLeft;
11579         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11580         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11581         if (RightSideRank <= LeftSideRank) {
11582           // Moving the cluster to the right does not demot it.
11583           --LastLeft;
11584           --FirstRight;
11585           continue;
11586         }
11587       }
11588     }
11589     break;
11590   }
11591 
11592   assert(LastLeft + 1 == FirstRight);
11593   assert(LastLeft >= W.FirstCluster);
11594   assert(FirstRight <= W.LastCluster);
11595 
11596   // Use the first element on the right as pivot since we will make less-than
11597   // comparisons against it.
11598   CaseClusterIt PivotCluster = FirstRight;
11599   assert(PivotCluster > W.FirstCluster);
11600   assert(PivotCluster <= W.LastCluster);
11601 
11602   CaseClusterIt FirstLeft = W.FirstCluster;
11603   CaseClusterIt LastRight = W.LastCluster;
11604 
11605   const ConstantInt *Pivot = PivotCluster->Low;
11606 
11607   // New blocks will be inserted immediately after the current one.
11608   MachineFunction::iterator BBI(W.MBB);
11609   ++BBI;
11610 
11611   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11612   // we can branch to its destination directly if it's squeezed exactly in
11613   // between the known lower bound and Pivot - 1.
11614   MachineBasicBlock *LeftMBB;
11615   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11616       FirstLeft->Low == W.GE &&
11617       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11618     LeftMBB = FirstLeft->MBB;
11619   } else {
11620     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11621     FuncInfo.MF->insert(BBI, LeftMBB);
11622     WorkList.push_back(
11623         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11624     // Put Cond in a virtual register to make it available from the new blocks.
11625     ExportFromCurrentBlock(Cond);
11626   }
11627 
11628   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11629   // single cluster, RHS.Low == Pivot, and we can branch to its destination
11630   // directly if RHS.High equals the current upper bound.
11631   MachineBasicBlock *RightMBB;
11632   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11633       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11634     RightMBB = FirstRight->MBB;
11635   } else {
11636     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11637     FuncInfo.MF->insert(BBI, RightMBB);
11638     WorkList.push_back(
11639         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11640     // Put Cond in a virtual register to make it available from the new blocks.
11641     ExportFromCurrentBlock(Cond);
11642   }
11643 
11644   // Create the CaseBlock record that will be used to lower the branch.
11645   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11646                getCurSDLoc(), LeftProb, RightProb);
11647 
11648   if (W.MBB == SwitchMBB)
11649     visitSwitchCase(CB, SwitchMBB);
11650   else
11651     SL->SwitchCases.push_back(CB);
11652 }
11653 
11654 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11655 // from the swith statement.
11656 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11657                                             BranchProbability PeeledCaseProb) {
11658   if (PeeledCaseProb == BranchProbability::getOne())
11659     return BranchProbability::getZero();
11660   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11661 
11662   uint32_t Numerator = CaseProb.getNumerator();
11663   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11664   return BranchProbability(Numerator, std::max(Numerator, Denominator));
11665 }
11666 
11667 // Try to peel the top probability case if it exceeds the threshold.
11668 // Return current MachineBasicBlock for the switch statement if the peeling
11669 // does not occur.
11670 // If the peeling is performed, return the newly created MachineBasicBlock
11671 // for the peeled switch statement. Also update Clusters to remove the peeled
11672 // case. PeeledCaseProb is the BranchProbability for the peeled case.
11673 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11674     const SwitchInst &SI, CaseClusterVector &Clusters,
11675     BranchProbability &PeeledCaseProb) {
11676   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11677   // Don't perform if there is only one cluster or optimizing for size.
11678   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11679       TM.getOptLevel() == CodeGenOptLevel::None ||
11680       SwitchMBB->getParent()->getFunction().hasMinSize())
11681     return SwitchMBB;
11682 
11683   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11684   unsigned PeeledCaseIndex = 0;
11685   bool SwitchPeeled = false;
11686   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11687     CaseCluster &CC = Clusters[Index];
11688     if (CC.Prob < TopCaseProb)
11689       continue;
11690     TopCaseProb = CC.Prob;
11691     PeeledCaseIndex = Index;
11692     SwitchPeeled = true;
11693   }
11694   if (!SwitchPeeled)
11695     return SwitchMBB;
11696 
11697   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11698                     << TopCaseProb << "\n");
11699 
11700   // Record the MBB for the peeled switch statement.
11701   MachineFunction::iterator BBI(SwitchMBB);
11702   ++BBI;
11703   MachineBasicBlock *PeeledSwitchMBB =
11704       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11705   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11706 
11707   ExportFromCurrentBlock(SI.getCondition());
11708   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11709   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11710                           nullptr,   nullptr,      TopCaseProb.getCompl()};
11711   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11712 
11713   Clusters.erase(PeeledCaseIt);
11714   for (CaseCluster &CC : Clusters) {
11715     LLVM_DEBUG(
11716         dbgs() << "Scale the probablity for one cluster, before scaling: "
11717                << CC.Prob << "\n");
11718     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11719     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11720   }
11721   PeeledCaseProb = TopCaseProb;
11722   return PeeledSwitchMBB;
11723 }
11724 
11725 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11726   // Extract cases from the switch.
11727   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11728   CaseClusterVector Clusters;
11729   Clusters.reserve(SI.getNumCases());
11730   for (auto I : SI.cases()) {
11731     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11732     const ConstantInt *CaseVal = I.getCaseValue();
11733     BranchProbability Prob =
11734         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11735             : BranchProbability(1, SI.getNumCases() + 1);
11736     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11737   }
11738 
11739   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11740 
11741   // Cluster adjacent cases with the same destination. We do this at all
11742   // optimization levels because it's cheap to do and will make codegen faster
11743   // if there are many clusters.
11744   sortAndRangeify(Clusters);
11745 
11746   // The branch probablity of the peeled case.
11747   BranchProbability PeeledCaseProb = BranchProbability::getZero();
11748   MachineBasicBlock *PeeledSwitchMBB =
11749       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11750 
11751   // If there is only the default destination, jump there directly.
11752   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11753   if (Clusters.empty()) {
11754     assert(PeeledSwitchMBB == SwitchMBB);
11755     SwitchMBB->addSuccessor(DefaultMBB);
11756     if (DefaultMBB != NextBlock(SwitchMBB)) {
11757       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11758                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11759     }
11760     return;
11761   }
11762 
11763   SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
11764   SL->findBitTestClusters(Clusters, &SI);
11765 
11766   LLVM_DEBUG({
11767     dbgs() << "Case clusters: ";
11768     for (const CaseCluster &C : Clusters) {
11769       if (C.Kind == CC_JumpTable)
11770         dbgs() << "JT:";
11771       if (C.Kind == CC_BitTests)
11772         dbgs() << "BT:";
11773 
11774       C.Low->getValue().print(dbgs(), true);
11775       if (C.Low != C.High) {
11776         dbgs() << '-';
11777         C.High->getValue().print(dbgs(), true);
11778       }
11779       dbgs() << ' ';
11780     }
11781     dbgs() << '\n';
11782   });
11783 
11784   assert(!Clusters.empty());
11785   SwitchWorkList WorkList;
11786   CaseClusterIt First = Clusters.begin();
11787   CaseClusterIt Last = Clusters.end() - 1;
11788   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11789   // Scale the branchprobability for DefaultMBB if the peel occurs and
11790   // DefaultMBB is not replaced.
11791   if (PeeledCaseProb != BranchProbability::getZero() &&
11792       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11793     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11794   WorkList.push_back(
11795       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11796 
11797   while (!WorkList.empty()) {
11798     SwitchWorkListItem W = WorkList.pop_back_val();
11799     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11800 
11801     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11802         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11803       // For optimized builds, lower large range as a balanced binary tree.
11804       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11805       continue;
11806     }
11807 
11808     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11809   }
11810 }
11811 
11812 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11813   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11814   auto DL = getCurSDLoc();
11815   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11816   setValue(&I, DAG.getStepVector(DL, ResultVT));
11817 }
11818 
11819 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11820   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11821   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11822 
11823   SDLoc DL = getCurSDLoc();
11824   SDValue V = getValue(I.getOperand(0));
11825   assert(VT == V.getValueType() && "Malformed vector.reverse!");
11826 
11827   if (VT.isScalableVector()) {
11828     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11829     return;
11830   }
11831 
11832   // Use VECTOR_SHUFFLE for the fixed-length vector
11833   // to maintain existing behavior.
11834   SmallVector<int, 8> Mask;
11835   unsigned NumElts = VT.getVectorMinNumElements();
11836   for (unsigned i = 0; i != NumElts; ++i)
11837     Mask.push_back(NumElts - 1 - i);
11838 
11839   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11840 }
11841 
11842 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11843   auto DL = getCurSDLoc();
11844   SDValue InVec = getValue(I.getOperand(0));
11845   EVT OutVT =
11846       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11847 
11848   unsigned OutNumElts = OutVT.getVectorMinNumElements();
11849 
11850   // ISD Node needs the input vectors split into two equal parts
11851   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11852                            DAG.getVectorIdxConstant(0, DL));
11853   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11854                            DAG.getVectorIdxConstant(OutNumElts, DL));
11855 
11856   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11857   // legalisation and combines.
11858   if (OutVT.isFixedLengthVector()) {
11859     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11860                                         createStrideMask(0, 2, OutNumElts));
11861     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11862                                        createStrideMask(1, 2, OutNumElts));
11863     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
11864     setValue(&I, Res);
11865     return;
11866   }
11867 
11868   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
11869                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
11870   setValue(&I, Res);
11871 }
11872 
11873 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
11874   auto DL = getCurSDLoc();
11875   EVT InVT = getValue(I.getOperand(0)).getValueType();
11876   SDValue InVec0 = getValue(I.getOperand(0));
11877   SDValue InVec1 = getValue(I.getOperand(1));
11878   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11879   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11880 
11881   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11882   // legalisation and combines.
11883   if (OutVT.isFixedLengthVector()) {
11884     unsigned NumElts = InVT.getVectorMinNumElements();
11885     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
11886     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
11887                                       createInterleaveMask(NumElts, 2)));
11888     return;
11889   }
11890 
11891   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
11892                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
11893   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
11894                     Res.getValue(1));
11895   setValue(&I, Res);
11896 }
11897 
11898 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11899   SmallVector<EVT, 4> ValueVTs;
11900   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11901                   ValueVTs);
11902   unsigned NumValues = ValueVTs.size();
11903   if (NumValues == 0) return;
11904 
11905   SmallVector<SDValue, 4> Values(NumValues);
11906   SDValue Op = getValue(I.getOperand(0));
11907 
11908   for (unsigned i = 0; i != NumValues; ++i)
11909     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
11910                             SDValue(Op.getNode(), Op.getResNo() + i));
11911 
11912   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
11913                            DAG.getVTList(ValueVTs), Values));
11914 }
11915 
11916 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
11917   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11918   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11919 
11920   SDLoc DL = getCurSDLoc();
11921   SDValue V1 = getValue(I.getOperand(0));
11922   SDValue V2 = getValue(I.getOperand(1));
11923   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
11924 
11925   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
11926   if (VT.isScalableVector()) {
11927     MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
11928     setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
11929                              DAG.getConstant(Imm, DL, IdxVT)));
11930     return;
11931   }
11932 
11933   unsigned NumElts = VT.getVectorNumElements();
11934 
11935   uint64_t Idx = (NumElts + Imm) % NumElts;
11936 
11937   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
11938   SmallVector<int, 8> Mask;
11939   for (unsigned i = 0; i < NumElts; ++i)
11940     Mask.push_back(Idx + i);
11941   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
11942 }
11943 
11944 // Consider the following MIR after SelectionDAG, which produces output in
11945 // phyregs in the first case or virtregs in the second case.
11946 //
11947 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
11948 // %5:gr32 = COPY $ebx
11949 // %6:gr32 = COPY $edx
11950 // %1:gr32 = COPY %6:gr32
11951 // %0:gr32 = COPY %5:gr32
11952 //
11953 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
11954 // %1:gr32 = COPY %6:gr32
11955 // %0:gr32 = COPY %5:gr32
11956 //
11957 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
11958 // Given %1, we'd like to return $edx in the first case and %6 in the second.
11959 //
11960 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
11961 // to a single virtreg (such as %0). The remaining outputs monotonically
11962 // increase in virtreg number from there. If a callbr has no outputs, then it
11963 // should not have a corresponding callbr landingpad; in fact, the callbr
11964 // landingpad would not even be able to refer to such a callbr.
11965 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
11966   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
11967   // There is definitely at least one copy.
11968   assert(MI->getOpcode() == TargetOpcode::COPY &&
11969          "start of copy chain MUST be COPY");
11970   Reg = MI->getOperand(1).getReg();
11971   MI = MRI.def_begin(Reg)->getParent();
11972   // There may be an optional second copy.
11973   if (MI->getOpcode() == TargetOpcode::COPY) {
11974     assert(Reg.isVirtual() && "expected COPY of virtual register");
11975     Reg = MI->getOperand(1).getReg();
11976     assert(Reg.isPhysical() && "expected COPY of physical register");
11977     MI = MRI.def_begin(Reg)->getParent();
11978   }
11979   // The start of the chain must be an INLINEASM_BR.
11980   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
11981          "end of copy chain MUST be INLINEASM_BR");
11982   return Reg;
11983 }
11984 
11985 // We must do this walk rather than the simpler
11986 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
11987 // otherwise we will end up with copies of virtregs only valid along direct
11988 // edges.
11989 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
11990   SmallVector<EVT, 8> ResultVTs;
11991   SmallVector<SDValue, 8> ResultValues;
11992   const auto *CBR =
11993       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
11994 
11995   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11996   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
11997   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
11998 
11999   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12000   SDValue Chain = DAG.getRoot();
12001 
12002   // Re-parse the asm constraints string.
12003   TargetLowering::AsmOperandInfoVector TargetConstraints =
12004       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12005   for (auto &T : TargetConstraints) {
12006     SDISelAsmOperandInfo OpInfo(T);
12007     if (OpInfo.Type != InlineAsm::isOutput)
12008       continue;
12009 
12010     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12011     // individual constraint.
12012     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12013 
12014     switch (OpInfo.ConstraintType) {
12015     case TargetLowering::C_Register:
12016     case TargetLowering::C_RegisterClass: {
12017       // Fill in OpInfo.AssignedRegs.Regs.
12018       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12019 
12020       // getRegistersForValue may produce 1 to many registers based on whether
12021       // the OpInfo.ConstraintVT is legal on the target or not.
12022       for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12023         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12024         if (Register::isPhysicalRegister(OriginalDef))
12025           FuncInfo.MBB->addLiveIn(OriginalDef);
12026         // Update the assigned registers to use the original defs.
12027         OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12028       }
12029 
12030       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12031           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12032       ResultValues.push_back(V);
12033       ResultVTs.push_back(OpInfo.ConstraintVT);
12034       break;
12035     }
12036     case TargetLowering::C_Other: {
12037       SDValue Flag;
12038       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12039                                                   OpInfo, DAG);
12040       ++InitialDef;
12041       ResultValues.push_back(V);
12042       ResultVTs.push_back(OpInfo.ConstraintVT);
12043       break;
12044     }
12045     default:
12046       break;
12047     }
12048   }
12049   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12050                           DAG.getVTList(ResultVTs), ResultValues);
12051   setValue(&I, V);
12052 }
12053