xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 0a1aa6cda2758b0926a95f87d39ffefb1cb90200)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33 #include "llvm/CodeGen/CodeGenCommonISel.h"
34 #include "llvm/CodeGen/FunctionLoweringInfo.h"
35 #include "llvm/CodeGen/GCMetadata.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsWebAssembly.h"
80 #include "llvm/IR/LLVMContext.h"
81 #include "llvm/IR/Metadata.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/Operator.h"
84 #include "llvm/IR/PatternMatch.h"
85 #include "llvm/IR/Statepoint.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/IR/User.h"
88 #include "llvm/IR/Value.h"
89 #include "llvm/MC/MCContext.h"
90 #include "llvm/Support/AtomicOrdering.h"
91 #include "llvm/Support/Casting.h"
92 #include "llvm/Support/CommandLine.h"
93 #include "llvm/Support/Compiler.h"
94 #include "llvm/Support/Debug.h"
95 #include "llvm/Support/MathExtras.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Target/TargetIntrinsicInfo.h"
98 #include "llvm/Target/TargetMachine.h"
99 #include "llvm/Target/TargetOptions.h"
100 #include "llvm/TargetParser/Triple.h"
101 #include "llvm/Transforms/Utils/Local.h"
102 #include <cstddef>
103 #include <iterator>
104 #include <limits>
105 #include <optional>
106 #include <tuple>
107 
108 using namespace llvm;
109 using namespace PatternMatch;
110 using namespace SwitchCG;
111 
112 #define DEBUG_TYPE "isel"
113 
114 /// LimitFloatPrecision - Generate low-precision inline sequences for
115 /// some float libcalls (6, 8 or 12 bits).
116 static unsigned LimitFloatPrecision;
117 
118 static cl::opt<bool>
119     InsertAssertAlign("insert-assert-align", cl::init(true),
120                       cl::desc("Insert the experimental `assertalign` node."),
121                       cl::ReallyHidden);
122 
123 static cl::opt<unsigned, true>
124     LimitFPPrecision("limit-float-precision",
125                      cl::desc("Generate low-precision inline sequences "
126                               "for some float libcalls"),
127                      cl::location(LimitFloatPrecision), cl::Hidden,
128                      cl::init(0));
129 
130 static cl::opt<unsigned> SwitchPeelThreshold(
131     "switch-peel-threshold", cl::Hidden, cl::init(66),
132     cl::desc("Set the case probability threshold for peeling the case from a "
133              "switch statement. A value greater than 100 will void this "
134              "optimization"));
135 
136 // Limit the width of DAG chains. This is important in general to prevent
137 // DAG-based analysis from blowing up. For example, alias analysis and
138 // load clustering may not complete in reasonable time. It is difficult to
139 // recognize and avoid this situation within each individual analysis, and
140 // future analyses are likely to have the same behavior. Limiting DAG width is
141 // the safe approach and will be especially important with global DAGs.
142 //
143 // MaxParallelChains default is arbitrarily high to avoid affecting
144 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
145 // sequence over this should have been converted to llvm.memcpy by the
146 // frontend. It is easy to induce this behavior with .ll code such as:
147 // %buffer = alloca [4096 x i8]
148 // %data = load [4096 x i8]* %argPtr
149 // store [4096 x i8] %data, [4096 x i8]* %buffer
150 static const unsigned MaxParallelChains = 64;
151 
152 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
153                                       const SDValue *Parts, unsigned NumParts,
154                                       MVT PartVT, EVT ValueVT, const Value *V,
155                                       std::optional<CallingConv::ID> CC);
156 
157 /// getCopyFromParts - Create a value that contains the specified legal parts
158 /// combined into the value they represent.  If the parts combine to a type
159 /// larger than ValueVT then AssertOp can be used to specify whether the extra
160 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
161 /// (ISD::AssertSext).
162 static SDValue
163 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
164                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
165                  std::optional<CallingConv::ID> CC = std::nullopt,
166                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
167   // Let the target assemble the parts if it wants to
168   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
169   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
170                                                    PartVT, ValueVT, CC))
171     return Val;
172 
173   if (ValueVT.isVector())
174     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
175                                   CC);
176 
177   assert(NumParts > 0 && "No parts to assemble!");
178   SDValue Val = Parts[0];
179 
180   if (NumParts > 1) {
181     // Assemble the value from multiple parts.
182     if (ValueVT.isInteger()) {
183       unsigned PartBits = PartVT.getSizeInBits();
184       unsigned ValueBits = ValueVT.getSizeInBits();
185 
186       // Assemble the power of 2 part.
187       unsigned RoundParts = llvm::bit_floor(NumParts);
188       unsigned RoundBits = PartBits * RoundParts;
189       EVT RoundVT = RoundBits == ValueBits ?
190         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
191       SDValue Lo, Hi;
192 
193       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
194 
195       if (RoundParts > 2) {
196         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
197                               PartVT, HalfVT, V);
198         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
199                               RoundParts / 2, PartVT, HalfVT, V);
200       } else {
201         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
202         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
203       }
204 
205       if (DAG.getDataLayout().isBigEndian())
206         std::swap(Lo, Hi);
207 
208       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
209 
210       if (RoundParts < NumParts) {
211         // Assemble the trailing non-power-of-2 part.
212         unsigned OddParts = NumParts - RoundParts;
213         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
214         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
215                               OddVT, V, CC);
216 
217         // Combine the round and odd parts.
218         Lo = Val;
219         if (DAG.getDataLayout().isBigEndian())
220           std::swap(Lo, Hi);
221         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
222         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
223         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
224                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
225                                          TLI.getShiftAmountTy(
226                                              TotalVT, DAG.getDataLayout())));
227         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
228         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
229       }
230     } else if (PartVT.isFloatingPoint()) {
231       // FP split into multiple FP parts (for ppcf128)
232       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
233              "Unexpected split");
234       SDValue Lo, Hi;
235       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
236       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
237       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
238         std::swap(Lo, Hi);
239       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
240     } else {
241       // FP split into integer parts (soft fp)
242       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
243              !PartVT.isVector() && "Unexpected split");
244       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
245       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
246     }
247   }
248 
249   // There is now one part, held in Val.  Correct it to match ValueVT.
250   // PartEVT is the type of the register class that holds the value.
251   // ValueVT is the type of the inline asm operation.
252   EVT PartEVT = Val.getValueType();
253 
254   if (PartEVT == ValueVT)
255     return Val;
256 
257   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
258       ValueVT.bitsLT(PartEVT)) {
259     // For an FP value in an integer part, we need to truncate to the right
260     // width first.
261     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
262     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
263   }
264 
265   // Handle types that have the same size.
266   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
267     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
268 
269   // Handle types with different sizes.
270   if (PartEVT.isInteger() && ValueVT.isInteger()) {
271     if (ValueVT.bitsLT(PartEVT)) {
272       // For a truncate, see if we have any information to
273       // indicate whether the truncated bits will always be
274       // zero or sign-extension.
275       if (AssertOp)
276         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
277                           DAG.getValueType(ValueVT));
278       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
279     }
280     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
281   }
282 
283   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
284     // FP_ROUND's are always exact here.
285     if (ValueVT.bitsLT(Val.getValueType()))
286       return DAG.getNode(
287           ISD::FP_ROUND, DL, ValueVT, Val,
288           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
289 
290     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
291   }
292 
293   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
294   // then truncating.
295   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
296       ValueVT.bitsLT(PartEVT)) {
297     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
298     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
299   }
300 
301   report_fatal_error("Unknown mismatch in getCopyFromParts!");
302 }
303 
304 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
305                                               const Twine &ErrMsg) {
306   const Instruction *I = dyn_cast_or_null<Instruction>(V);
307   if (!V)
308     return Ctx.emitError(ErrMsg);
309 
310   const char *AsmError = ", possible invalid constraint for vector type";
311   if (const CallInst *CI = dyn_cast<CallInst>(I))
312     if (CI->isInlineAsm())
313       return Ctx.emitError(I, ErrMsg + AsmError);
314 
315   return Ctx.emitError(I, ErrMsg);
316 }
317 
318 /// getCopyFromPartsVector - Create a value that contains the specified legal
319 /// parts combined into the value they represent.  If the parts combine to a
320 /// type larger than ValueVT then AssertOp can be used to specify whether the
321 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
322 /// ValueVT (ISD::AssertSext).
323 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
324                                       const SDValue *Parts, unsigned NumParts,
325                                       MVT PartVT, EVT ValueVT, const Value *V,
326                                       std::optional<CallingConv::ID> CallConv) {
327   assert(ValueVT.isVector() && "Not a vector value");
328   assert(NumParts > 0 && "No parts to assemble!");
329   const bool IsABIRegCopy = CallConv.has_value();
330 
331   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
332   SDValue Val = Parts[0];
333 
334   // Handle a multi-element vector.
335   if (NumParts > 1) {
336     EVT IntermediateVT;
337     MVT RegisterVT;
338     unsigned NumIntermediates;
339     unsigned NumRegs;
340 
341     if (IsABIRegCopy) {
342       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
343           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
344           NumIntermediates, RegisterVT);
345     } else {
346       NumRegs =
347           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
348                                      NumIntermediates, RegisterVT);
349     }
350 
351     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
352     NumParts = NumRegs; // Silence a compiler warning.
353     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
354     assert(RegisterVT.getSizeInBits() ==
355            Parts[0].getSimpleValueType().getSizeInBits() &&
356            "Part type sizes don't match!");
357 
358     // Assemble the parts into intermediate operands.
359     SmallVector<SDValue, 8> Ops(NumIntermediates);
360     if (NumIntermediates == NumParts) {
361       // If the register was not expanded, truncate or copy the value,
362       // as appropriate.
363       for (unsigned i = 0; i != NumParts; ++i)
364         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
365                                   PartVT, IntermediateVT, V, CallConv);
366     } else if (NumParts > 0) {
367       // If the intermediate type was expanded, build the intermediate
368       // operands from the parts.
369       assert(NumParts % NumIntermediates == 0 &&
370              "Must expand into a divisible number of parts!");
371       unsigned Factor = NumParts / NumIntermediates;
372       for (unsigned i = 0; i != NumIntermediates; ++i)
373         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
374                                   PartVT, IntermediateVT, V, CallConv);
375     }
376 
377     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
378     // intermediate operands.
379     EVT BuiltVectorTy =
380         IntermediateVT.isVector()
381             ? EVT::getVectorVT(
382                   *DAG.getContext(), IntermediateVT.getScalarType(),
383                   IntermediateVT.getVectorElementCount() * NumParts)
384             : EVT::getVectorVT(*DAG.getContext(),
385                                IntermediateVT.getScalarType(),
386                                NumIntermediates);
387     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
388                                                 : ISD::BUILD_VECTOR,
389                       DL, BuiltVectorTy, Ops);
390   }
391 
392   // There is now one part, held in Val.  Correct it to match ValueVT.
393   EVT PartEVT = Val.getValueType();
394 
395   if (PartEVT == ValueVT)
396     return Val;
397 
398   if (PartEVT.isVector()) {
399     // Vector/Vector bitcast.
400     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
401       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
402 
403     // If the parts vector has more elements than the value vector, then we
404     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
405     // Extract the elements we want.
406     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
407       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
408               ValueVT.getVectorElementCount().getKnownMinValue()) &&
409              (PartEVT.getVectorElementCount().isScalable() ==
410               ValueVT.getVectorElementCount().isScalable()) &&
411              "Cannot narrow, it would be a lossy transformation");
412       PartEVT =
413           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
414                            ValueVT.getVectorElementCount());
415       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
416                         DAG.getVectorIdxConstant(0, DL));
417       if (PartEVT == ValueVT)
418         return Val;
419       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
420         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
421 
422       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
423       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
424         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
425     }
426 
427     // Promoted vector extract
428     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
429   }
430 
431   // Trivial bitcast if the types are the same size and the destination
432   // vector type is legal.
433   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
434       TLI.isTypeLegal(ValueVT))
435     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
436 
437   if (ValueVT.getVectorNumElements() != 1) {
438      // Certain ABIs require that vectors are passed as integers. For vectors
439      // are the same size, this is an obvious bitcast.
440      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
441        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
442      } else if (ValueVT.bitsLT(PartEVT)) {
443        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
444        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
445        // Drop the extra bits.
446        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
447        return DAG.getBitcast(ValueVT, Val);
448      }
449 
450      diagnosePossiblyInvalidConstraint(
451          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
452      return DAG.getUNDEF(ValueVT);
453   }
454 
455   // Handle cases such as i8 -> <1 x i1>
456   EVT ValueSVT = ValueVT.getVectorElementType();
457   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
458     unsigned ValueSize = ValueSVT.getSizeInBits();
459     if (ValueSize == PartEVT.getSizeInBits()) {
460       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
461     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
462       // It's possible a scalar floating point type gets softened to integer and
463       // then promoted to a larger integer. If PartEVT is the larger integer
464       // we need to truncate it and then bitcast to the FP type.
465       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
466       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
467       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
468       Val = DAG.getBitcast(ValueSVT, Val);
469     } else {
470       Val = ValueVT.isFloatingPoint()
471                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
472                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
473     }
474   }
475 
476   return DAG.getBuildVector(ValueVT, DL, Val);
477 }
478 
479 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
480                                  SDValue Val, SDValue *Parts, unsigned NumParts,
481                                  MVT PartVT, const Value *V,
482                                  std::optional<CallingConv::ID> CallConv);
483 
484 /// getCopyToParts - Create a series of nodes that contain the specified value
485 /// split into legal parts.  If the parts contain more bits than Val, then, for
486 /// integers, ExtendKind can be used to specify how to generate the extra bits.
487 static void
488 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
489                unsigned NumParts, MVT PartVT, const Value *V,
490                std::optional<CallingConv::ID> CallConv = std::nullopt,
491                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
492   // Let the target split the parts if it wants to
493   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
494   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
495                                       CallConv))
496     return;
497   EVT ValueVT = Val.getValueType();
498 
499   // Handle the vector case separately.
500   if (ValueVT.isVector())
501     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
502                                 CallConv);
503 
504   unsigned OrigNumParts = NumParts;
505   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
506          "Copying to an illegal type!");
507 
508   if (NumParts == 0)
509     return;
510 
511   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
512   EVT PartEVT = PartVT;
513   if (PartEVT == ValueVT) {
514     assert(NumParts == 1 && "No-op copy with multiple parts!");
515     Parts[0] = Val;
516     return;
517   }
518 
519   unsigned PartBits = PartVT.getSizeInBits();
520   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
521     // If the parts cover more bits than the value has, promote the value.
522     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
523       assert(NumParts == 1 && "Do not know what to promote to!");
524       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
525     } else {
526       if (ValueVT.isFloatingPoint()) {
527         // FP values need to be bitcast, then extended if they are being put
528         // into a larger container.
529         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
530         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
531       }
532       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
533              ValueVT.isInteger() &&
534              "Unknown mismatch!");
535       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
536       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
537       if (PartVT == MVT::x86mmx)
538         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
539     }
540   } else if (PartBits == ValueVT.getSizeInBits()) {
541     // Different types of the same size.
542     assert(NumParts == 1 && PartEVT != ValueVT);
543     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
544   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
545     // If the parts cover less bits than value has, truncate the value.
546     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
547            ValueVT.isInteger() &&
548            "Unknown mismatch!");
549     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
550     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
551     if (PartVT == MVT::x86mmx)
552       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
553   }
554 
555   // The value may have changed - recompute ValueVT.
556   ValueVT = Val.getValueType();
557   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
558          "Failed to tile the value with PartVT!");
559 
560   if (NumParts == 1) {
561     if (PartEVT != ValueVT) {
562       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
563                                         "scalar-to-vector conversion failed");
564       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
565     }
566 
567     Parts[0] = Val;
568     return;
569   }
570 
571   // Expand the value into multiple parts.
572   if (NumParts & (NumParts - 1)) {
573     // The number of parts is not a power of 2.  Split off and copy the tail.
574     assert(PartVT.isInteger() && ValueVT.isInteger() &&
575            "Do not know what to expand to!");
576     unsigned RoundParts = llvm::bit_floor(NumParts);
577     unsigned RoundBits = RoundParts * PartBits;
578     unsigned OddParts = NumParts - RoundParts;
579     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
580       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
581 
582     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
583                    CallConv);
584 
585     if (DAG.getDataLayout().isBigEndian())
586       // The odd parts were reversed by getCopyToParts - unreverse them.
587       std::reverse(Parts + RoundParts, Parts + NumParts);
588 
589     NumParts = RoundParts;
590     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
591     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
592   }
593 
594   // The number of parts is a power of 2.  Repeatedly bisect the value using
595   // EXTRACT_ELEMENT.
596   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
597                          EVT::getIntegerVT(*DAG.getContext(),
598                                            ValueVT.getSizeInBits()),
599                          Val);
600 
601   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
602     for (unsigned i = 0; i < NumParts; i += StepSize) {
603       unsigned ThisBits = StepSize * PartBits / 2;
604       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
605       SDValue &Part0 = Parts[i];
606       SDValue &Part1 = Parts[i+StepSize/2];
607 
608       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
609                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
610       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
611                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
612 
613       if (ThisBits == PartBits && ThisVT != PartVT) {
614         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
615         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
616       }
617     }
618   }
619 
620   if (DAG.getDataLayout().isBigEndian())
621     std::reverse(Parts, Parts + OrigNumParts);
622 }
623 
624 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
625                                      const SDLoc &DL, EVT PartVT) {
626   if (!PartVT.isVector())
627     return SDValue();
628 
629   EVT ValueVT = Val.getValueType();
630   EVT PartEVT = PartVT.getVectorElementType();
631   EVT ValueEVT = ValueVT.getVectorElementType();
632   ElementCount PartNumElts = PartVT.getVectorElementCount();
633   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
634 
635   // We only support widening vectors with equivalent element types and
636   // fixed/scalable properties. If a target needs to widen a fixed-length type
637   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
638   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
639       PartNumElts.isScalable() != ValueNumElts.isScalable())
640     return SDValue();
641 
642   // Have a try for bf16 because some targets share its ABI with fp16.
643   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
644     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
645            "Cannot widen to illegal type");
646     Val = DAG.getNode(ISD::BITCAST, DL,
647                       ValueVT.changeVectorElementType(MVT::f16), Val);
648   } else if (PartEVT != ValueEVT) {
649     return SDValue();
650   }
651 
652   // Widening a scalable vector to another scalable vector is done by inserting
653   // the vector into a larger undef one.
654   if (PartNumElts.isScalable())
655     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
656                        Val, DAG.getVectorIdxConstant(0, DL));
657 
658   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
659   // undef elements.
660   SmallVector<SDValue, 16> Ops;
661   DAG.ExtractVectorElements(Val, Ops);
662   SDValue EltUndef = DAG.getUNDEF(PartEVT);
663   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
664 
665   // FIXME: Use CONCAT for 2x -> 4x.
666   return DAG.getBuildVector(PartVT, DL, Ops);
667 }
668 
669 /// getCopyToPartsVector - Create a series of nodes that contain the specified
670 /// value split into legal parts.
671 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
672                                  SDValue Val, SDValue *Parts, unsigned NumParts,
673                                  MVT PartVT, const Value *V,
674                                  std::optional<CallingConv::ID> CallConv) {
675   EVT ValueVT = Val.getValueType();
676   assert(ValueVT.isVector() && "Not a vector");
677   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
678   const bool IsABIRegCopy = CallConv.has_value();
679 
680   if (NumParts == 1) {
681     EVT PartEVT = PartVT;
682     if (PartEVT == ValueVT) {
683       // Nothing to do.
684     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
685       // Bitconvert vector->vector case.
686       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
687     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
688       Val = Widened;
689     } else if (PartVT.isVector() &&
690                PartEVT.getVectorElementType().bitsGE(
691                    ValueVT.getVectorElementType()) &&
692                PartEVT.getVectorElementCount() ==
693                    ValueVT.getVectorElementCount()) {
694 
695       // Promoted vector extract
696       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
697     } else if (PartEVT.isVector() &&
698                PartEVT.getVectorElementType() !=
699                    ValueVT.getVectorElementType() &&
700                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
701                    TargetLowering::TypeWidenVector) {
702       // Combination of widening and promotion.
703       EVT WidenVT =
704           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
705                            PartVT.getVectorElementCount());
706       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
707       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
708     } else {
709       // Don't extract an integer from a float vector. This can happen if the
710       // FP type gets softened to integer and then promoted. The promotion
711       // prevents it from being picked up by the earlier bitcast case.
712       if (ValueVT.getVectorElementCount().isScalar() &&
713           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
714         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
715                           DAG.getVectorIdxConstant(0, DL));
716       } else {
717         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
718         assert(PartVT.getFixedSizeInBits() > ValueSize &&
719                "lossy conversion of vector to scalar type");
720         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
721         Val = DAG.getBitcast(IntermediateType, Val);
722         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
723       }
724     }
725 
726     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
727     Parts[0] = Val;
728     return;
729   }
730 
731   // Handle a multi-element vector.
732   EVT IntermediateVT;
733   MVT RegisterVT;
734   unsigned NumIntermediates;
735   unsigned NumRegs;
736   if (IsABIRegCopy) {
737     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
738         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
739         RegisterVT);
740   } else {
741     NumRegs =
742         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
743                                    NumIntermediates, RegisterVT);
744   }
745 
746   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
747   NumParts = NumRegs; // Silence a compiler warning.
748   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
749 
750   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
751          "Mixing scalable and fixed vectors when copying in parts");
752 
753   std::optional<ElementCount> DestEltCnt;
754 
755   if (IntermediateVT.isVector())
756     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
757   else
758     DestEltCnt = ElementCount::getFixed(NumIntermediates);
759 
760   EVT BuiltVectorTy = EVT::getVectorVT(
761       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
762 
763   if (ValueVT == BuiltVectorTy) {
764     // Nothing to do.
765   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
766     // Bitconvert vector->vector case.
767     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
768   } else {
769     if (BuiltVectorTy.getVectorElementType().bitsGT(
770             ValueVT.getVectorElementType())) {
771       // Integer promotion.
772       ValueVT = EVT::getVectorVT(*DAG.getContext(),
773                                  BuiltVectorTy.getVectorElementType(),
774                                  ValueVT.getVectorElementCount());
775       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
776     }
777 
778     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
779       Val = Widened;
780     }
781   }
782 
783   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
784 
785   // Split the vector into intermediate operands.
786   SmallVector<SDValue, 8> Ops(NumIntermediates);
787   for (unsigned i = 0; i != NumIntermediates; ++i) {
788     if (IntermediateVT.isVector()) {
789       // This does something sensible for scalable vectors - see the
790       // definition of EXTRACT_SUBVECTOR for further details.
791       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
792       Ops[i] =
793           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
794                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
795     } else {
796       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
797                            DAG.getVectorIdxConstant(i, DL));
798     }
799   }
800 
801   // Split the intermediate operands into legal parts.
802   if (NumParts == NumIntermediates) {
803     // If the register was not expanded, promote or copy the value,
804     // as appropriate.
805     for (unsigned i = 0; i != NumParts; ++i)
806       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
807   } else if (NumParts > 0) {
808     // If the intermediate type was expanded, split each the value into
809     // legal parts.
810     assert(NumIntermediates != 0 && "division by zero");
811     assert(NumParts % NumIntermediates == 0 &&
812            "Must expand into a divisible number of parts!");
813     unsigned Factor = NumParts / NumIntermediates;
814     for (unsigned i = 0; i != NumIntermediates; ++i)
815       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
816                      CallConv);
817   }
818 }
819 
820 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
821                            EVT valuevt, std::optional<CallingConv::ID> CC)
822     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
823       RegCount(1, regs.size()), CallConv(CC) {}
824 
825 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
826                            const DataLayout &DL, unsigned Reg, Type *Ty,
827                            std::optional<CallingConv::ID> CC) {
828   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
829 
830   CallConv = CC;
831 
832   for (EVT ValueVT : ValueVTs) {
833     unsigned NumRegs =
834         isABIMangled()
835             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
836             : TLI.getNumRegisters(Context, ValueVT);
837     MVT RegisterVT =
838         isABIMangled()
839             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
840             : TLI.getRegisterType(Context, ValueVT);
841     for (unsigned i = 0; i != NumRegs; ++i)
842       Regs.push_back(Reg + i);
843     RegVTs.push_back(RegisterVT);
844     RegCount.push_back(NumRegs);
845     Reg += NumRegs;
846   }
847 }
848 
849 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
850                                       FunctionLoweringInfo &FuncInfo,
851                                       const SDLoc &dl, SDValue &Chain,
852                                       SDValue *Glue, const Value *V) const {
853   // A Value with type {} or [0 x %t] needs no registers.
854   if (ValueVTs.empty())
855     return SDValue();
856 
857   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
858 
859   // Assemble the legal parts into the final values.
860   SmallVector<SDValue, 4> Values(ValueVTs.size());
861   SmallVector<SDValue, 8> Parts;
862   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
863     // Copy the legal parts from the registers.
864     EVT ValueVT = ValueVTs[Value];
865     unsigned NumRegs = RegCount[Value];
866     MVT RegisterVT = isABIMangled()
867                          ? TLI.getRegisterTypeForCallingConv(
868                                *DAG.getContext(), *CallConv, RegVTs[Value])
869                          : RegVTs[Value];
870 
871     Parts.resize(NumRegs);
872     for (unsigned i = 0; i != NumRegs; ++i) {
873       SDValue P;
874       if (!Glue) {
875         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
876       } else {
877         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
878         *Glue = P.getValue(2);
879       }
880 
881       Chain = P.getValue(1);
882       Parts[i] = P;
883 
884       // If the source register was virtual and if we know something about it,
885       // add an assert node.
886       if (!Register::isVirtualRegister(Regs[Part + i]) ||
887           !RegisterVT.isInteger())
888         continue;
889 
890       const FunctionLoweringInfo::LiveOutInfo *LOI =
891         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
892       if (!LOI)
893         continue;
894 
895       unsigned RegSize = RegisterVT.getScalarSizeInBits();
896       unsigned NumSignBits = LOI->NumSignBits;
897       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
898 
899       if (NumZeroBits == RegSize) {
900         // The current value is a zero.
901         // Explicitly express that as it would be easier for
902         // optimizations to kick in.
903         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
904         continue;
905       }
906 
907       // FIXME: We capture more information than the dag can represent.  For
908       // now, just use the tightest assertzext/assertsext possible.
909       bool isSExt;
910       EVT FromVT(MVT::Other);
911       if (NumZeroBits) {
912         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
913         isSExt = false;
914       } else if (NumSignBits > 1) {
915         FromVT =
916             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
917         isSExt = true;
918       } else {
919         continue;
920       }
921       // Add an assertion node.
922       assert(FromVT != MVT::Other);
923       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
924                              RegisterVT, P, DAG.getValueType(FromVT));
925     }
926 
927     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
928                                      RegisterVT, ValueVT, V, CallConv);
929     Part += NumRegs;
930     Parts.clear();
931   }
932 
933   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
934 }
935 
936 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
937                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
938                                  const Value *V,
939                                  ISD::NodeType PreferredExtendType) const {
940   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
941   ISD::NodeType ExtendKind = PreferredExtendType;
942 
943   // Get the list of the values's legal parts.
944   unsigned NumRegs = Regs.size();
945   SmallVector<SDValue, 8> Parts(NumRegs);
946   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
947     unsigned NumParts = RegCount[Value];
948 
949     MVT RegisterVT = isABIMangled()
950                          ? TLI.getRegisterTypeForCallingConv(
951                                *DAG.getContext(), *CallConv, RegVTs[Value])
952                          : RegVTs[Value];
953 
954     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
955       ExtendKind = ISD::ZERO_EXTEND;
956 
957     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
958                    NumParts, RegisterVT, V, CallConv, ExtendKind);
959     Part += NumParts;
960   }
961 
962   // Copy the parts into the registers.
963   SmallVector<SDValue, 8> Chains(NumRegs);
964   for (unsigned i = 0; i != NumRegs; ++i) {
965     SDValue Part;
966     if (!Glue) {
967       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
968     } else {
969       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
970       *Glue = Part.getValue(1);
971     }
972 
973     Chains[i] = Part.getValue(0);
974   }
975 
976   if (NumRegs == 1 || Glue)
977     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
978     // flagged to it. That is the CopyToReg nodes and the user are considered
979     // a single scheduling unit. If we create a TokenFactor and return it as
980     // chain, then the TokenFactor is both a predecessor (operand) of the
981     // user as well as a successor (the TF operands are flagged to the user).
982     // c1, f1 = CopyToReg
983     // c2, f2 = CopyToReg
984     // c3     = TokenFactor c1, c2
985     // ...
986     //        = op c3, ..., f2
987     Chain = Chains[NumRegs-1];
988   else
989     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
990 }
991 
992 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
993                                         unsigned MatchingIdx, const SDLoc &dl,
994                                         SelectionDAG &DAG,
995                                         std::vector<SDValue> &Ops) const {
996   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
997 
998   InlineAsm::Flag Flag(Code, Regs.size());
999   if (HasMatching)
1000     Flag.setMatchingOp(MatchingIdx);
1001   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1002     // Put the register class of the virtual registers in the flag word.  That
1003     // way, later passes can recompute register class constraints for inline
1004     // assembly as well as normal instructions.
1005     // Don't do this for tied operands that can use the regclass information
1006     // from the def.
1007     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1008     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1009     Flag.setRegClass(RC->getID());
1010   }
1011 
1012   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1013   Ops.push_back(Res);
1014 
1015   if (Code == InlineAsm::Kind::Clobber) {
1016     // Clobbers should always have a 1:1 mapping with registers, and may
1017     // reference registers that have illegal (e.g. vector) types. Hence, we
1018     // shouldn't try to apply any sort of splitting logic to them.
1019     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1020            "No 1:1 mapping from clobbers to regs?");
1021     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1022     (void)SP;
1023     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1024       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1025       assert(
1026           (Regs[I] != SP ||
1027            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1028           "If we clobbered the stack pointer, MFI should know about it.");
1029     }
1030     return;
1031   }
1032 
1033   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1034     MVT RegisterVT = RegVTs[Value];
1035     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1036                                            RegisterVT);
1037     for (unsigned i = 0; i != NumRegs; ++i) {
1038       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1039       unsigned TheReg = Regs[Reg++];
1040       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1041     }
1042   }
1043 }
1044 
1045 SmallVector<std::pair<unsigned, TypeSize>, 4>
1046 RegsForValue::getRegsAndSizes() const {
1047   SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1048   unsigned I = 0;
1049   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1050     unsigned RegCount = std::get<0>(CountAndVT);
1051     MVT RegisterVT = std::get<1>(CountAndVT);
1052     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1053     for (unsigned E = I + RegCount; I != E; ++I)
1054       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1055   }
1056   return OutVec;
1057 }
1058 
1059 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1060                                AssumptionCache *ac,
1061                                const TargetLibraryInfo *li) {
1062   AA = aa;
1063   AC = ac;
1064   GFI = gfi;
1065   LibInfo = li;
1066   Context = DAG.getContext();
1067   LPadToCallSiteMap.clear();
1068   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1069   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1070       *DAG.getMachineFunction().getFunction().getParent());
1071 }
1072 
1073 void SelectionDAGBuilder::clear() {
1074   NodeMap.clear();
1075   UnusedArgNodeMap.clear();
1076   PendingLoads.clear();
1077   PendingExports.clear();
1078   PendingConstrainedFP.clear();
1079   PendingConstrainedFPStrict.clear();
1080   CurInst = nullptr;
1081   HasTailCall = false;
1082   SDNodeOrder = LowestSDNodeOrder;
1083   StatepointLowering.clear();
1084 }
1085 
1086 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1087   DanglingDebugInfoMap.clear();
1088 }
1089 
1090 // Update DAG root to include dependencies on Pending chains.
1091 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1092   SDValue Root = DAG.getRoot();
1093 
1094   if (Pending.empty())
1095     return Root;
1096 
1097   // Add current root to PendingChains, unless we already indirectly
1098   // depend on it.
1099   if (Root.getOpcode() != ISD::EntryToken) {
1100     unsigned i = 0, e = Pending.size();
1101     for (; i != e; ++i) {
1102       assert(Pending[i].getNode()->getNumOperands() > 1);
1103       if (Pending[i].getNode()->getOperand(0) == Root)
1104         break;  // Don't add the root if we already indirectly depend on it.
1105     }
1106 
1107     if (i == e)
1108       Pending.push_back(Root);
1109   }
1110 
1111   if (Pending.size() == 1)
1112     Root = Pending[0];
1113   else
1114     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1115 
1116   DAG.setRoot(Root);
1117   Pending.clear();
1118   return Root;
1119 }
1120 
1121 SDValue SelectionDAGBuilder::getMemoryRoot() {
1122   return updateRoot(PendingLoads);
1123 }
1124 
1125 SDValue SelectionDAGBuilder::getRoot() {
1126   // Chain up all pending constrained intrinsics together with all
1127   // pending loads, by simply appending them to PendingLoads and
1128   // then calling getMemoryRoot().
1129   PendingLoads.reserve(PendingLoads.size() +
1130                        PendingConstrainedFP.size() +
1131                        PendingConstrainedFPStrict.size());
1132   PendingLoads.append(PendingConstrainedFP.begin(),
1133                       PendingConstrainedFP.end());
1134   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1135                       PendingConstrainedFPStrict.end());
1136   PendingConstrainedFP.clear();
1137   PendingConstrainedFPStrict.clear();
1138   return getMemoryRoot();
1139 }
1140 
1141 SDValue SelectionDAGBuilder::getControlRoot() {
1142   // We need to emit pending fpexcept.strict constrained intrinsics,
1143   // so append them to the PendingExports list.
1144   PendingExports.append(PendingConstrainedFPStrict.begin(),
1145                         PendingConstrainedFPStrict.end());
1146   PendingConstrainedFPStrict.clear();
1147   return updateRoot(PendingExports);
1148 }
1149 
1150 void SelectionDAGBuilder::visit(const Instruction &I) {
1151   // Set up outgoing PHI node register values before emitting the terminator.
1152   if (I.isTerminator()) {
1153     HandlePHINodesInSuccessorBlocks(I.getParent());
1154   }
1155 
1156   // Add SDDbgValue nodes for any var locs here. Do so before updating
1157   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1158   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1159     // Add SDDbgValue nodes for any var locs here. Do so before updating
1160     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1161     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1162          It != End; ++It) {
1163       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1164       dropDanglingDebugInfo(Var, It->Expr);
1165       if (It->Values.isKillLocation(It->Expr)) {
1166         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1167         continue;
1168       }
1169       SmallVector<Value *> Values(It->Values.location_ops());
1170       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1171                             It->Values.hasArgList()))
1172         addDanglingDebugInfo(It, SDNodeOrder);
1173     }
1174   }
1175 
1176   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1177   if (!isa<DbgInfoIntrinsic>(I))
1178     ++SDNodeOrder;
1179 
1180   CurInst = &I;
1181 
1182   // Set inserted listener only if required.
1183   bool NodeInserted = false;
1184   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1185   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1186   if (PCSectionsMD) {
1187     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1188         DAG, [&](SDNode *) { NodeInserted = true; });
1189   }
1190 
1191   visit(I.getOpcode(), I);
1192 
1193   if (!I.isTerminator() && !HasTailCall &&
1194       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1195     CopyToExportRegsIfNeeded(&I);
1196 
1197   // Handle metadata.
1198   if (PCSectionsMD) {
1199     auto It = NodeMap.find(&I);
1200     if (It != NodeMap.end()) {
1201       DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1202     } else if (NodeInserted) {
1203       // This should not happen; if it does, don't let it go unnoticed so we can
1204       // fix it. Relevant visit*() function is probably missing a setValue().
1205       errs() << "warning: loosing !pcsections metadata ["
1206              << I.getModule()->getName() << "]\n";
1207       LLVM_DEBUG(I.dump());
1208       assert(false);
1209     }
1210   }
1211 
1212   CurInst = nullptr;
1213 }
1214 
1215 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1216   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1217 }
1218 
1219 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1220   // Note: this doesn't use InstVisitor, because it has to work with
1221   // ConstantExpr's in addition to instructions.
1222   switch (Opcode) {
1223   default: llvm_unreachable("Unknown instruction type encountered!");
1224     // Build the switch statement using the Instruction.def file.
1225 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1226     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1227 #include "llvm/IR/Instruction.def"
1228   }
1229 }
1230 
1231 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1232                                             DILocalVariable *Variable,
1233                                             DebugLoc DL, unsigned Order,
1234                                             RawLocationWrapper Values,
1235                                             DIExpression *Expression) {
1236   if (!Values.hasArgList())
1237     return false;
1238   // For variadic dbg_values we will now insert an undef.
1239   // FIXME: We can potentially recover these!
1240   SmallVector<SDDbgOperand, 2> Locs;
1241   for (const Value *V : Values.location_ops()) {
1242     auto *Undef = UndefValue::get(V->getType());
1243     Locs.push_back(SDDbgOperand::fromConst(Undef));
1244   }
1245   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1246                                         /*IsIndirect=*/false, DL, Order,
1247                                         /*IsVariadic=*/true);
1248   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1249   return true;
1250 }
1251 
1252 void SelectionDAGBuilder::addDanglingDebugInfo(const VarLocInfo *VarLoc,
1253                                                unsigned Order) {
1254   if (!handleDanglingVariadicDebugInfo(
1255           DAG,
1256           const_cast<DILocalVariable *>(DAG.getFunctionVarLocs()
1257                                             ->getVariable(VarLoc->VariableID)
1258                                             .getVariable()),
1259           VarLoc->DL, Order, VarLoc->Values, VarLoc->Expr)) {
1260     DanglingDebugInfoMap[VarLoc->Values.getVariableLocationOp(0)].emplace_back(
1261         VarLoc, Order);
1262   }
1263 }
1264 
1265 void SelectionDAGBuilder::addDanglingDebugInfo(const DbgValueInst *DI,
1266                                                unsigned Order) {
1267   // We treat variadic dbg_values differently at this stage.
1268   if (!handleDanglingVariadicDebugInfo(
1269           DAG, DI->getVariable(), DI->getDebugLoc(), Order,
1270           DI->getWrappedLocation(), DI->getExpression())) {
1271     // TODO: Dangling debug info will eventually either be resolved or produce
1272     // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1273     // between the original dbg.value location and its resolved DBG_VALUE,
1274     // which we should ideally fill with an extra Undef DBG_VALUE.
1275     assert(DI->getNumVariableLocationOps() == 1 &&
1276            "DbgValueInst without an ArgList should have a single location "
1277            "operand.");
1278     DanglingDebugInfoMap[DI->getValue(0)].emplace_back(DI, Order);
1279   }
1280 }
1281 
1282 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1283                                                 const DIExpression *Expr) {
1284   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1285     DIVariable *DanglingVariable = DDI.getVariable(DAG.getFunctionVarLocs());
1286     DIExpression *DanglingExpr = DDI.getExpression();
1287     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1288       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << printDDI(DDI)
1289                         << "\n");
1290       return true;
1291     }
1292     return false;
1293   };
1294 
1295   for (auto &DDIMI : DanglingDebugInfoMap) {
1296     DanglingDebugInfoVector &DDIV = DDIMI.second;
1297 
1298     // If debug info is to be dropped, run it through final checks to see
1299     // whether it can be salvaged.
1300     for (auto &DDI : DDIV)
1301       if (isMatchingDbgValue(DDI))
1302         salvageUnresolvedDbgValue(DDI);
1303 
1304     erase_if(DDIV, isMatchingDbgValue);
1305   }
1306 }
1307 
1308 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1309 // generate the debug data structures now that we've seen its definition.
1310 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1311                                                    SDValue Val) {
1312   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1313   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1314     return;
1315 
1316   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1317   for (auto &DDI : DDIV) {
1318     DebugLoc DL = DDI.getDebugLoc();
1319     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1320     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1321     DILocalVariable *Variable = DDI.getVariable(DAG.getFunctionVarLocs());
1322     DIExpression *Expr = DDI.getExpression();
1323     assert(Variable->isValidLocationForIntrinsic(DL) &&
1324            "Expected inlined-at fields to agree");
1325     SDDbgValue *SDV;
1326     if (Val.getNode()) {
1327       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1328       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1329       // we couldn't resolve it directly when examining the DbgValue intrinsic
1330       // in the first place we should not be more successful here). Unless we
1331       // have some test case that prove this to be correct we should avoid
1332       // calling EmitFuncArgumentDbgValue here.
1333       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1334                                     FuncArgumentDbgValueKind::Value, Val)) {
1335         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for " << printDDI(DDI)
1336                           << "\n");
1337         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1338         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1339         // inserted after the definition of Val when emitting the instructions
1340         // after ISel. An alternative could be to teach
1341         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1342         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1343                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1344                    << ValSDNodeOrder << "\n");
1345         SDV = getDbgValue(Val, Variable, Expr, DL,
1346                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1347         DAG.AddDbgValue(SDV, false);
1348       } else
1349         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1350                           << printDDI(DDI) << " in EmitFuncArgumentDbgValue\n");
1351     } else {
1352       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(DDI) << "\n");
1353       auto Undef = UndefValue::get(V->getType());
1354       auto SDV =
1355           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1356       DAG.AddDbgValue(SDV, false);
1357     }
1358   }
1359   DDIV.clear();
1360 }
1361 
1362 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1363   // TODO: For the variadic implementation, instead of only checking the fail
1364   // state of `handleDebugValue`, we need know specifically which values were
1365   // invalid, so that we attempt to salvage only those values when processing
1366   // a DIArgList.
1367   Value *V = DDI.getVariableLocationOp(0);
1368   Value *OrigV = V;
1369   DILocalVariable *Var = DDI.getVariable(DAG.getFunctionVarLocs());
1370   DIExpression *Expr = DDI.getExpression();
1371   DebugLoc DL = DDI.getDebugLoc();
1372   unsigned SDOrder = DDI.getSDNodeOrder();
1373 
1374   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1375   // that DW_OP_stack_value is desired.
1376   bool StackValue = true;
1377 
1378   // Can this Value can be encoded without any further work?
1379   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1380     return;
1381 
1382   // Attempt to salvage back through as many instructions as possible. Bail if
1383   // a non-instruction is seen, such as a constant expression or global
1384   // variable. FIXME: Further work could recover those too.
1385   while (isa<Instruction>(V)) {
1386     Instruction &VAsInst = *cast<Instruction>(V);
1387     // Temporary "0", awaiting real implementation.
1388     SmallVector<uint64_t, 16> Ops;
1389     SmallVector<Value *, 4> AdditionalValues;
1390     V = salvageDebugInfoImpl(VAsInst, Expr->getNumLocationOperands(), Ops,
1391                              AdditionalValues);
1392     // If we cannot salvage any further, and haven't yet found a suitable debug
1393     // expression, bail out.
1394     if (!V)
1395       break;
1396 
1397     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1398     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1399     // here for variadic dbg_values, remove that condition.
1400     if (!AdditionalValues.empty())
1401       break;
1402 
1403     // New value and expr now represent this debuginfo.
1404     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1405 
1406     // Some kind of simplification occurred: check whether the operand of the
1407     // salvaged debug expression can be encoded in this DAG.
1408     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1409       LLVM_DEBUG(
1410           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1411                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1412       return;
1413     }
1414   }
1415 
1416   // This was the final opportunity to salvage this debug information, and it
1417   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1418   // any earlier variable location.
1419   assert(OrigV && "V shouldn't be null");
1420   auto *Undef = UndefValue::get(OrigV->getType());
1421   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1422   DAG.AddDbgValue(SDV, false);
1423   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  " << printDDI(DDI)
1424                     << "\n");
1425 }
1426 
1427 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1428                                                DIExpression *Expr,
1429                                                DebugLoc DbgLoc,
1430                                                unsigned Order) {
1431   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1432   DIExpression *NewExpr =
1433       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1434   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1435                    /*IsVariadic*/ false);
1436 }
1437 
1438 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1439                                            DILocalVariable *Var,
1440                                            DIExpression *Expr, DebugLoc DbgLoc,
1441                                            unsigned Order, bool IsVariadic) {
1442   if (Values.empty())
1443     return true;
1444   SmallVector<SDDbgOperand> LocationOps;
1445   SmallVector<SDNode *> Dependencies;
1446   for (const Value *V : Values) {
1447     // Constant value.
1448     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1449         isa<ConstantPointerNull>(V)) {
1450       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1451       continue;
1452     }
1453 
1454     // Look through IntToPtr constants.
1455     if (auto *CE = dyn_cast<ConstantExpr>(V))
1456       if (CE->getOpcode() == Instruction::IntToPtr) {
1457         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1458         continue;
1459       }
1460 
1461     // If the Value is a frame index, we can create a FrameIndex debug value
1462     // without relying on the DAG at all.
1463     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1464       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1465       if (SI != FuncInfo.StaticAllocaMap.end()) {
1466         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1467         continue;
1468       }
1469     }
1470 
1471     // Do not use getValue() in here; we don't want to generate code at
1472     // this point if it hasn't been done yet.
1473     SDValue N = NodeMap[V];
1474     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1475       N = UnusedArgNodeMap[V];
1476     if (N.getNode()) {
1477       // Only emit func arg dbg value for non-variadic dbg.values for now.
1478       if (!IsVariadic &&
1479           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1480                                    FuncArgumentDbgValueKind::Value, N))
1481         return true;
1482       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1483         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1484         // describe stack slot locations.
1485         //
1486         // Consider "int x = 0; int *px = &x;". There are two kinds of
1487         // interesting debug values here after optimization:
1488         //
1489         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1490         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1491         //
1492         // Both describe the direct values of their associated variables.
1493         Dependencies.push_back(N.getNode());
1494         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1495         continue;
1496       }
1497       LocationOps.emplace_back(
1498           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1499       continue;
1500     }
1501 
1502     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1503     // Special rules apply for the first dbg.values of parameter variables in a
1504     // function. Identify them by the fact they reference Argument Values, that
1505     // they're parameters, and they are parameters of the current function. We
1506     // need to let them dangle until they get an SDNode.
1507     bool IsParamOfFunc =
1508         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1509     if (IsParamOfFunc)
1510       return false;
1511 
1512     // The value is not used in this block yet (or it would have an SDNode).
1513     // We still want the value to appear for the user if possible -- if it has
1514     // an associated VReg, we can refer to that instead.
1515     auto VMI = FuncInfo.ValueMap.find(V);
1516     if (VMI != FuncInfo.ValueMap.end()) {
1517       unsigned Reg = VMI->second;
1518       // If this is a PHI node, it may be split up into several MI PHI nodes
1519       // (in FunctionLoweringInfo::set).
1520       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1521                        V->getType(), std::nullopt);
1522       if (RFV.occupiesMultipleRegs()) {
1523         // FIXME: We could potentially support variadic dbg_values here.
1524         if (IsVariadic)
1525           return false;
1526         unsigned Offset = 0;
1527         unsigned BitsToDescribe = 0;
1528         if (auto VarSize = Var->getSizeInBits())
1529           BitsToDescribe = *VarSize;
1530         if (auto Fragment = Expr->getFragmentInfo())
1531           BitsToDescribe = Fragment->SizeInBits;
1532         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1533           // Bail out if all bits are described already.
1534           if (Offset >= BitsToDescribe)
1535             break;
1536           // TODO: handle scalable vectors.
1537           unsigned RegisterSize = RegAndSize.second;
1538           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1539                                       ? BitsToDescribe - Offset
1540                                       : RegisterSize;
1541           auto FragmentExpr = DIExpression::createFragmentExpression(
1542               Expr, Offset, FragmentSize);
1543           if (!FragmentExpr)
1544             continue;
1545           SDDbgValue *SDV = DAG.getVRegDbgValue(
1546               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1547           DAG.AddDbgValue(SDV, false);
1548           Offset += RegisterSize;
1549         }
1550         return true;
1551       }
1552       // We can use simple vreg locations for variadic dbg_values as well.
1553       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1554       continue;
1555     }
1556     // We failed to create a SDDbgOperand for V.
1557     return false;
1558   }
1559 
1560   // We have created a SDDbgOperand for each Value in Values.
1561   // Should use Order instead of SDNodeOrder?
1562   assert(!LocationOps.empty());
1563   SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1564                                         /*IsIndirect=*/false, DbgLoc,
1565                                         SDNodeOrder, IsVariadic);
1566   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1567   return true;
1568 }
1569 
1570 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1571   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1572   for (auto &Pair : DanglingDebugInfoMap)
1573     for (auto &DDI : Pair.second)
1574       salvageUnresolvedDbgValue(DDI);
1575   clearDanglingDebugInfo();
1576 }
1577 
1578 /// getCopyFromRegs - If there was virtual register allocated for the value V
1579 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1580 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1581   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1582   SDValue Result;
1583 
1584   if (It != FuncInfo.ValueMap.end()) {
1585     Register InReg = It->second;
1586 
1587     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1588                      DAG.getDataLayout(), InReg, Ty,
1589                      std::nullopt); // This is not an ABI copy.
1590     SDValue Chain = DAG.getEntryNode();
1591     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1592                                  V);
1593     resolveDanglingDebugInfo(V, Result);
1594   }
1595 
1596   return Result;
1597 }
1598 
1599 /// getValue - Return an SDValue for the given Value.
1600 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1601   // If we already have an SDValue for this value, use it. It's important
1602   // to do this first, so that we don't create a CopyFromReg if we already
1603   // have a regular SDValue.
1604   SDValue &N = NodeMap[V];
1605   if (N.getNode()) return N;
1606 
1607   // If there's a virtual register allocated and initialized for this
1608   // value, use it.
1609   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1610     return copyFromReg;
1611 
1612   // Otherwise create a new SDValue and remember it.
1613   SDValue Val = getValueImpl(V);
1614   NodeMap[V] = Val;
1615   resolveDanglingDebugInfo(V, Val);
1616   return Val;
1617 }
1618 
1619 /// getNonRegisterValue - Return an SDValue for the given Value, but
1620 /// don't look in FuncInfo.ValueMap for a virtual register.
1621 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1622   // If we already have an SDValue for this value, use it.
1623   SDValue &N = NodeMap[V];
1624   if (N.getNode()) {
1625     if (isIntOrFPConstant(N)) {
1626       // Remove the debug location from the node as the node is about to be used
1627       // in a location which may differ from the original debug location.  This
1628       // is relevant to Constant and ConstantFP nodes because they can appear
1629       // as constant expressions inside PHI nodes.
1630       N->setDebugLoc(DebugLoc());
1631     }
1632     return N;
1633   }
1634 
1635   // Otherwise create a new SDValue and remember it.
1636   SDValue Val = getValueImpl(V);
1637   NodeMap[V] = Val;
1638   resolveDanglingDebugInfo(V, Val);
1639   return Val;
1640 }
1641 
1642 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1643 /// Create an SDValue for the given value.
1644 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1645   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1646 
1647   if (const Constant *C = dyn_cast<Constant>(V)) {
1648     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1649 
1650     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1651       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1652 
1653     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1654       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1655 
1656     if (isa<ConstantPointerNull>(C)) {
1657       unsigned AS = V->getType()->getPointerAddressSpace();
1658       return DAG.getConstant(0, getCurSDLoc(),
1659                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1660     }
1661 
1662     if (match(C, m_VScale()))
1663       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1664 
1665     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1666       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1667 
1668     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1669       return DAG.getUNDEF(VT);
1670 
1671     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1672       visit(CE->getOpcode(), *CE);
1673       SDValue N1 = NodeMap[V];
1674       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1675       return N1;
1676     }
1677 
1678     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1679       SmallVector<SDValue, 4> Constants;
1680       for (const Use &U : C->operands()) {
1681         SDNode *Val = getValue(U).getNode();
1682         // If the operand is an empty aggregate, there are no values.
1683         if (!Val) continue;
1684         // Add each leaf value from the operand to the Constants list
1685         // to form a flattened list of all the values.
1686         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1687           Constants.push_back(SDValue(Val, i));
1688       }
1689 
1690       return DAG.getMergeValues(Constants, getCurSDLoc());
1691     }
1692 
1693     if (const ConstantDataSequential *CDS =
1694           dyn_cast<ConstantDataSequential>(C)) {
1695       SmallVector<SDValue, 4> Ops;
1696       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1697         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1698         // Add each leaf value from the operand to the Constants list
1699         // to form a flattened list of all the values.
1700         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1701           Ops.push_back(SDValue(Val, i));
1702       }
1703 
1704       if (isa<ArrayType>(CDS->getType()))
1705         return DAG.getMergeValues(Ops, getCurSDLoc());
1706       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1707     }
1708 
1709     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1710       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1711              "Unknown struct or array constant!");
1712 
1713       SmallVector<EVT, 4> ValueVTs;
1714       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1715       unsigned NumElts = ValueVTs.size();
1716       if (NumElts == 0)
1717         return SDValue(); // empty struct
1718       SmallVector<SDValue, 4> Constants(NumElts);
1719       for (unsigned i = 0; i != NumElts; ++i) {
1720         EVT EltVT = ValueVTs[i];
1721         if (isa<UndefValue>(C))
1722           Constants[i] = DAG.getUNDEF(EltVT);
1723         else if (EltVT.isFloatingPoint())
1724           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1725         else
1726           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1727       }
1728 
1729       return DAG.getMergeValues(Constants, getCurSDLoc());
1730     }
1731 
1732     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1733       return DAG.getBlockAddress(BA, VT);
1734 
1735     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1736       return getValue(Equiv->getGlobalValue());
1737 
1738     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1739       return getValue(NC->getGlobalValue());
1740 
1741     VectorType *VecTy = cast<VectorType>(V->getType());
1742 
1743     // Now that we know the number and type of the elements, get that number of
1744     // elements into the Ops array based on what kind of constant it is.
1745     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1746       SmallVector<SDValue, 16> Ops;
1747       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1748       for (unsigned i = 0; i != NumElements; ++i)
1749         Ops.push_back(getValue(CV->getOperand(i)));
1750 
1751       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1752     }
1753 
1754     if (isa<ConstantAggregateZero>(C)) {
1755       EVT EltVT =
1756           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1757 
1758       SDValue Op;
1759       if (EltVT.isFloatingPoint())
1760         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1761       else
1762         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1763 
1764       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1765     }
1766 
1767     llvm_unreachable("Unknown vector constant");
1768   }
1769 
1770   // If this is a static alloca, generate it as the frameindex instead of
1771   // computation.
1772   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1773     DenseMap<const AllocaInst*, int>::iterator SI =
1774       FuncInfo.StaticAllocaMap.find(AI);
1775     if (SI != FuncInfo.StaticAllocaMap.end())
1776       return DAG.getFrameIndex(
1777           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1778   }
1779 
1780   // If this is an instruction which fast-isel has deferred, select it now.
1781   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1782     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1783 
1784     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1785                      Inst->getType(), std::nullopt);
1786     SDValue Chain = DAG.getEntryNode();
1787     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1788   }
1789 
1790   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1791     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1792 
1793   if (const auto *BB = dyn_cast<BasicBlock>(V))
1794     return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1795 
1796   llvm_unreachable("Can't get register for value!");
1797 }
1798 
1799 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1800   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1801   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1802   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1803   bool IsSEH = isAsynchronousEHPersonality(Pers);
1804   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1805   if (!IsSEH)
1806     CatchPadMBB->setIsEHScopeEntry();
1807   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1808   if (IsMSVCCXX || IsCoreCLR)
1809     CatchPadMBB->setIsEHFuncletEntry();
1810 }
1811 
1812 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1813   // Update machine-CFG edge.
1814   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1815   FuncInfo.MBB->addSuccessor(TargetMBB);
1816   TargetMBB->setIsEHCatchretTarget(true);
1817   DAG.getMachineFunction().setHasEHCatchret(true);
1818 
1819   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1820   bool IsSEH = isAsynchronousEHPersonality(Pers);
1821   if (IsSEH) {
1822     // If this is not a fall-through branch or optimizations are switched off,
1823     // emit the branch.
1824     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1825         TM.getOptLevel() == CodeGenOptLevel::None)
1826       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1827                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1828     return;
1829   }
1830 
1831   // Figure out the funclet membership for the catchret's successor.
1832   // This will be used by the FuncletLayout pass to determine how to order the
1833   // BB's.
1834   // A 'catchret' returns to the outer scope's color.
1835   Value *ParentPad = I.getCatchSwitchParentPad();
1836   const BasicBlock *SuccessorColor;
1837   if (isa<ConstantTokenNone>(ParentPad))
1838     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1839   else
1840     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1841   assert(SuccessorColor && "No parent funclet for catchret!");
1842   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1843   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1844 
1845   // Create the terminator node.
1846   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1847                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1848                             DAG.getBasicBlock(SuccessorColorMBB));
1849   DAG.setRoot(Ret);
1850 }
1851 
1852 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1853   // Don't emit any special code for the cleanuppad instruction. It just marks
1854   // the start of an EH scope/funclet.
1855   FuncInfo.MBB->setIsEHScopeEntry();
1856   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1857   if (Pers != EHPersonality::Wasm_CXX) {
1858     FuncInfo.MBB->setIsEHFuncletEntry();
1859     FuncInfo.MBB->setIsCleanupFuncletEntry();
1860   }
1861 }
1862 
1863 // In wasm EH, even though a catchpad may not catch an exception if a tag does
1864 // not match, it is OK to add only the first unwind destination catchpad to the
1865 // successors, because there will be at least one invoke instruction within the
1866 // catch scope that points to the next unwind destination, if one exists, so
1867 // CFGSort cannot mess up with BB sorting order.
1868 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1869 // call within them, and catchpads only consisting of 'catch (...)' have a
1870 // '__cxa_end_catch' call within them, both of which generate invokes in case
1871 // the next unwind destination exists, i.e., the next unwind destination is not
1872 // the caller.)
1873 //
1874 // Having at most one EH pad successor is also simpler and helps later
1875 // transformations.
1876 //
1877 // For example,
1878 // current:
1879 //   invoke void @foo to ... unwind label %catch.dispatch
1880 // catch.dispatch:
1881 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
1882 // catch.start:
1883 //   ...
1884 //   ... in this BB or some other child BB dominated by this BB there will be an
1885 //   invoke that points to 'next' BB as an unwind destination
1886 //
1887 // next: ; We don't need to add this to 'current' BB's successor
1888 //   ...
1889 static void findWasmUnwindDestinations(
1890     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1891     BranchProbability Prob,
1892     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1893         &UnwindDests) {
1894   while (EHPadBB) {
1895     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1896     if (isa<CleanupPadInst>(Pad)) {
1897       // Stop on cleanup pads.
1898       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1899       UnwindDests.back().first->setIsEHScopeEntry();
1900       break;
1901     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1902       // Add the catchpad handlers to the possible destinations. We don't
1903       // continue to the unwind destination of the catchswitch for wasm.
1904       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1905         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1906         UnwindDests.back().first->setIsEHScopeEntry();
1907       }
1908       break;
1909     } else {
1910       continue;
1911     }
1912   }
1913 }
1914 
1915 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1916 /// many places it could ultimately go. In the IR, we have a single unwind
1917 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1918 /// This function skips over imaginary basic blocks that hold catchswitch
1919 /// instructions, and finds all the "real" machine
1920 /// basic block destinations. As those destinations may not be successors of
1921 /// EHPadBB, here we also calculate the edge probability to those destinations.
1922 /// The passed-in Prob is the edge probability to EHPadBB.
1923 static void findUnwindDestinations(
1924     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1925     BranchProbability Prob,
1926     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1927         &UnwindDests) {
1928   EHPersonality Personality =
1929     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1930   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1931   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1932   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1933   bool IsSEH = isAsynchronousEHPersonality(Personality);
1934 
1935   if (IsWasmCXX) {
1936     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1937     assert(UnwindDests.size() <= 1 &&
1938            "There should be at most one unwind destination for wasm");
1939     return;
1940   }
1941 
1942   while (EHPadBB) {
1943     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1944     BasicBlock *NewEHPadBB = nullptr;
1945     if (isa<LandingPadInst>(Pad)) {
1946       // Stop on landingpads. They are not funclets.
1947       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1948       break;
1949     } else if (isa<CleanupPadInst>(Pad)) {
1950       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1951       // personalities.
1952       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1953       UnwindDests.back().first->setIsEHScopeEntry();
1954       UnwindDests.back().first->setIsEHFuncletEntry();
1955       break;
1956     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1957       // Add the catchpad handlers to the possible destinations.
1958       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1959         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1960         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1961         if (IsMSVCCXX || IsCoreCLR)
1962           UnwindDests.back().first->setIsEHFuncletEntry();
1963         if (!IsSEH)
1964           UnwindDests.back().first->setIsEHScopeEntry();
1965       }
1966       NewEHPadBB = CatchSwitch->getUnwindDest();
1967     } else {
1968       continue;
1969     }
1970 
1971     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1972     if (BPI && NewEHPadBB)
1973       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1974     EHPadBB = NewEHPadBB;
1975   }
1976 }
1977 
1978 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1979   // Update successor info.
1980   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1981   auto UnwindDest = I.getUnwindDest();
1982   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1983   BranchProbability UnwindDestProb =
1984       (BPI && UnwindDest)
1985           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1986           : BranchProbability::getZero();
1987   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1988   for (auto &UnwindDest : UnwindDests) {
1989     UnwindDest.first->setIsEHPad();
1990     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1991   }
1992   FuncInfo.MBB->normalizeSuccProbs();
1993 
1994   // Create the terminator node.
1995   SDValue Ret =
1996       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1997   DAG.setRoot(Ret);
1998 }
1999 
2000 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2001   report_fatal_error("visitCatchSwitch not yet implemented!");
2002 }
2003 
2004 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2005   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2006   auto &DL = DAG.getDataLayout();
2007   SDValue Chain = getControlRoot();
2008   SmallVector<ISD::OutputArg, 8> Outs;
2009   SmallVector<SDValue, 8> OutVals;
2010 
2011   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2012   // lower
2013   //
2014   //   %val = call <ty> @llvm.experimental.deoptimize()
2015   //   ret <ty> %val
2016   //
2017   // differently.
2018   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2019     LowerDeoptimizingReturn();
2020     return;
2021   }
2022 
2023   if (!FuncInfo.CanLowerReturn) {
2024     unsigned DemoteReg = FuncInfo.DemoteRegister;
2025     const Function *F = I.getParent()->getParent();
2026 
2027     // Emit a store of the return value through the virtual register.
2028     // Leave Outs empty so that LowerReturn won't try to load return
2029     // registers the usual way.
2030     SmallVector<EVT, 1> PtrValueVTs;
2031     ComputeValueVTs(TLI, DL,
2032                     PointerType::get(F->getContext(),
2033                                      DAG.getDataLayout().getAllocaAddrSpace()),
2034                     PtrValueVTs);
2035 
2036     SDValue RetPtr =
2037         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2038     SDValue RetOp = getValue(I.getOperand(0));
2039 
2040     SmallVector<EVT, 4> ValueVTs, MemVTs;
2041     SmallVector<uint64_t, 4> Offsets;
2042     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2043                     &Offsets, 0);
2044     unsigned NumValues = ValueVTs.size();
2045 
2046     SmallVector<SDValue, 4> Chains(NumValues);
2047     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2048     for (unsigned i = 0; i != NumValues; ++i) {
2049       // An aggregate return value cannot wrap around the address space, so
2050       // offsets to its parts don't wrap either.
2051       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2052                                            TypeSize::Fixed(Offsets[i]));
2053 
2054       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2055       if (MemVTs[i] != ValueVTs[i])
2056         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2057       Chains[i] = DAG.getStore(
2058           Chain, getCurSDLoc(), Val,
2059           // FIXME: better loc info would be nice.
2060           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2061           commonAlignment(BaseAlign, Offsets[i]));
2062     }
2063 
2064     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2065                         MVT::Other, Chains);
2066   } else if (I.getNumOperands() != 0) {
2067     SmallVector<EVT, 4> ValueVTs;
2068     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2069     unsigned NumValues = ValueVTs.size();
2070     if (NumValues) {
2071       SDValue RetOp = getValue(I.getOperand(0));
2072 
2073       const Function *F = I.getParent()->getParent();
2074 
2075       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2076           I.getOperand(0)->getType(), F->getCallingConv(),
2077           /*IsVarArg*/ false, DL);
2078 
2079       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2080       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2081         ExtendKind = ISD::SIGN_EXTEND;
2082       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2083         ExtendKind = ISD::ZERO_EXTEND;
2084 
2085       LLVMContext &Context = F->getContext();
2086       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2087 
2088       for (unsigned j = 0; j != NumValues; ++j) {
2089         EVT VT = ValueVTs[j];
2090 
2091         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2092           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2093 
2094         CallingConv::ID CC = F->getCallingConv();
2095 
2096         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2097         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2098         SmallVector<SDValue, 4> Parts(NumParts);
2099         getCopyToParts(DAG, getCurSDLoc(),
2100                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2101                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2102 
2103         // 'inreg' on function refers to return value
2104         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2105         if (RetInReg)
2106           Flags.setInReg();
2107 
2108         if (I.getOperand(0)->getType()->isPointerTy()) {
2109           Flags.setPointer();
2110           Flags.setPointerAddrSpace(
2111               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2112         }
2113 
2114         if (NeedsRegBlock) {
2115           Flags.setInConsecutiveRegs();
2116           if (j == NumValues - 1)
2117             Flags.setInConsecutiveRegsLast();
2118         }
2119 
2120         // Propagate extension type if any
2121         if (ExtendKind == ISD::SIGN_EXTEND)
2122           Flags.setSExt();
2123         else if (ExtendKind == ISD::ZERO_EXTEND)
2124           Flags.setZExt();
2125 
2126         for (unsigned i = 0; i < NumParts; ++i) {
2127           Outs.push_back(ISD::OutputArg(Flags,
2128                                         Parts[i].getValueType().getSimpleVT(),
2129                                         VT, /*isfixed=*/true, 0, 0));
2130           OutVals.push_back(Parts[i]);
2131         }
2132       }
2133     }
2134   }
2135 
2136   // Push in swifterror virtual register as the last element of Outs. This makes
2137   // sure swifterror virtual register will be returned in the swifterror
2138   // physical register.
2139   const Function *F = I.getParent()->getParent();
2140   if (TLI.supportSwiftError() &&
2141       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2142     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2143     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2144     Flags.setSwiftError();
2145     Outs.push_back(ISD::OutputArg(
2146         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2147         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2148     // Create SDNode for the swifterror virtual register.
2149     OutVals.push_back(
2150         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2151                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2152                         EVT(TLI.getPointerTy(DL))));
2153   }
2154 
2155   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2156   CallingConv::ID CallConv =
2157     DAG.getMachineFunction().getFunction().getCallingConv();
2158   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2159       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2160 
2161   // Verify that the target's LowerReturn behaved as expected.
2162   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2163          "LowerReturn didn't return a valid chain!");
2164 
2165   // Update the DAG with the new chain value resulting from return lowering.
2166   DAG.setRoot(Chain);
2167 }
2168 
2169 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2170 /// created for it, emit nodes to copy the value into the virtual
2171 /// registers.
2172 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2173   // Skip empty types
2174   if (V->getType()->isEmptyTy())
2175     return;
2176 
2177   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2178   if (VMI != FuncInfo.ValueMap.end()) {
2179     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2180            "Unused value assigned virtual registers!");
2181     CopyValueToVirtualRegister(V, VMI->second);
2182   }
2183 }
2184 
2185 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2186 /// the current basic block, add it to ValueMap now so that we'll get a
2187 /// CopyTo/FromReg.
2188 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2189   // No need to export constants.
2190   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2191 
2192   // Already exported?
2193   if (FuncInfo.isExportedInst(V)) return;
2194 
2195   Register Reg = FuncInfo.InitializeRegForValue(V);
2196   CopyValueToVirtualRegister(V, Reg);
2197 }
2198 
2199 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2200                                                      const BasicBlock *FromBB) {
2201   // The operands of the setcc have to be in this block.  We don't know
2202   // how to export them from some other block.
2203   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2204     // Can export from current BB.
2205     if (VI->getParent() == FromBB)
2206       return true;
2207 
2208     // Is already exported, noop.
2209     return FuncInfo.isExportedInst(V);
2210   }
2211 
2212   // If this is an argument, we can export it if the BB is the entry block or
2213   // if it is already exported.
2214   if (isa<Argument>(V)) {
2215     if (FromBB->isEntryBlock())
2216       return true;
2217 
2218     // Otherwise, can only export this if it is already exported.
2219     return FuncInfo.isExportedInst(V);
2220   }
2221 
2222   // Otherwise, constants can always be exported.
2223   return true;
2224 }
2225 
2226 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2227 BranchProbability
2228 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2229                                         const MachineBasicBlock *Dst) const {
2230   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2231   const BasicBlock *SrcBB = Src->getBasicBlock();
2232   const BasicBlock *DstBB = Dst->getBasicBlock();
2233   if (!BPI) {
2234     // If BPI is not available, set the default probability as 1 / N, where N is
2235     // the number of successors.
2236     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2237     return BranchProbability(1, SuccSize);
2238   }
2239   return BPI->getEdgeProbability(SrcBB, DstBB);
2240 }
2241 
2242 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2243                                                MachineBasicBlock *Dst,
2244                                                BranchProbability Prob) {
2245   if (!FuncInfo.BPI)
2246     Src->addSuccessorWithoutProb(Dst);
2247   else {
2248     if (Prob.isUnknown())
2249       Prob = getEdgeProbability(Src, Dst);
2250     Src->addSuccessor(Dst, Prob);
2251   }
2252 }
2253 
2254 static bool InBlock(const Value *V, const BasicBlock *BB) {
2255   if (const Instruction *I = dyn_cast<Instruction>(V))
2256     return I->getParent() == BB;
2257   return true;
2258 }
2259 
2260 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2261 /// This function emits a branch and is used at the leaves of an OR or an
2262 /// AND operator tree.
2263 void
2264 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2265                                                   MachineBasicBlock *TBB,
2266                                                   MachineBasicBlock *FBB,
2267                                                   MachineBasicBlock *CurBB,
2268                                                   MachineBasicBlock *SwitchBB,
2269                                                   BranchProbability TProb,
2270                                                   BranchProbability FProb,
2271                                                   bool InvertCond) {
2272   const BasicBlock *BB = CurBB->getBasicBlock();
2273 
2274   // If the leaf of the tree is a comparison, merge the condition into
2275   // the caseblock.
2276   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2277     // The operands of the cmp have to be in this block.  We don't know
2278     // how to export them from some other block.  If this is the first block
2279     // of the sequence, no exporting is needed.
2280     if (CurBB == SwitchBB ||
2281         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2282          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2283       ISD::CondCode Condition;
2284       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2285         ICmpInst::Predicate Pred =
2286             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2287         Condition = getICmpCondCode(Pred);
2288       } else {
2289         const FCmpInst *FC = cast<FCmpInst>(Cond);
2290         FCmpInst::Predicate Pred =
2291             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2292         Condition = getFCmpCondCode(Pred);
2293         if (TM.Options.NoNaNsFPMath)
2294           Condition = getFCmpCodeWithoutNaN(Condition);
2295       }
2296 
2297       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2298                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2299       SL->SwitchCases.push_back(CB);
2300       return;
2301     }
2302   }
2303 
2304   // Create a CaseBlock record representing this branch.
2305   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2306   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2307                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2308   SL->SwitchCases.push_back(CB);
2309 }
2310 
2311 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2312                                                MachineBasicBlock *TBB,
2313                                                MachineBasicBlock *FBB,
2314                                                MachineBasicBlock *CurBB,
2315                                                MachineBasicBlock *SwitchBB,
2316                                                Instruction::BinaryOps Opc,
2317                                                BranchProbability TProb,
2318                                                BranchProbability FProb,
2319                                                bool InvertCond) {
2320   // Skip over not part of the tree and remember to invert op and operands at
2321   // next level.
2322   Value *NotCond;
2323   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2324       InBlock(NotCond, CurBB->getBasicBlock())) {
2325     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2326                          !InvertCond);
2327     return;
2328   }
2329 
2330   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2331   const Value *BOpOp0, *BOpOp1;
2332   // Compute the effective opcode for Cond, taking into account whether it needs
2333   // to be inverted, e.g.
2334   //   and (not (or A, B)), C
2335   // gets lowered as
2336   //   and (and (not A, not B), C)
2337   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2338   if (BOp) {
2339     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2340                ? Instruction::And
2341                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2342                       ? Instruction::Or
2343                       : (Instruction::BinaryOps)0);
2344     if (InvertCond) {
2345       if (BOpc == Instruction::And)
2346         BOpc = Instruction::Or;
2347       else if (BOpc == Instruction::Or)
2348         BOpc = Instruction::And;
2349     }
2350   }
2351 
2352   // If this node is not part of the or/and tree, emit it as a branch.
2353   // Note that all nodes in the tree should have same opcode.
2354   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2355   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2356       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2357       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2358     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2359                                  TProb, FProb, InvertCond);
2360     return;
2361   }
2362 
2363   //  Create TmpBB after CurBB.
2364   MachineFunction::iterator BBI(CurBB);
2365   MachineFunction &MF = DAG.getMachineFunction();
2366   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2367   CurBB->getParent()->insert(++BBI, TmpBB);
2368 
2369   if (Opc == Instruction::Or) {
2370     // Codegen X | Y as:
2371     // BB1:
2372     //   jmp_if_X TBB
2373     //   jmp TmpBB
2374     // TmpBB:
2375     //   jmp_if_Y TBB
2376     //   jmp FBB
2377     //
2378 
2379     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2380     // The requirement is that
2381     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2382     //     = TrueProb for original BB.
2383     // Assuming the original probabilities are A and B, one choice is to set
2384     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2385     // A/(1+B) and 2B/(1+B). This choice assumes that
2386     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2387     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2388     // TmpBB, but the math is more complicated.
2389 
2390     auto NewTrueProb = TProb / 2;
2391     auto NewFalseProb = TProb / 2 + FProb;
2392     // Emit the LHS condition.
2393     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2394                          NewFalseProb, InvertCond);
2395 
2396     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2397     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2398     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2399     // Emit the RHS condition into TmpBB.
2400     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2401                          Probs[1], InvertCond);
2402   } else {
2403     assert(Opc == Instruction::And && "Unknown merge op!");
2404     // Codegen X & Y as:
2405     // BB1:
2406     //   jmp_if_X TmpBB
2407     //   jmp FBB
2408     // TmpBB:
2409     //   jmp_if_Y TBB
2410     //   jmp FBB
2411     //
2412     //  This requires creation of TmpBB after CurBB.
2413 
2414     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2415     // The requirement is that
2416     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2417     //     = FalseProb for original BB.
2418     // Assuming the original probabilities are A and B, one choice is to set
2419     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2420     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2421     // TrueProb for BB1 * FalseProb for TmpBB.
2422 
2423     auto NewTrueProb = TProb + FProb / 2;
2424     auto NewFalseProb = FProb / 2;
2425     // Emit the LHS condition.
2426     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2427                          NewFalseProb, InvertCond);
2428 
2429     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2430     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2431     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2432     // Emit the RHS condition into TmpBB.
2433     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2434                          Probs[1], InvertCond);
2435   }
2436 }
2437 
2438 /// If the set of cases should be emitted as a series of branches, return true.
2439 /// If we should emit this as a bunch of and/or'd together conditions, return
2440 /// false.
2441 bool
2442 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2443   if (Cases.size() != 2) return true;
2444 
2445   // If this is two comparisons of the same values or'd or and'd together, they
2446   // will get folded into a single comparison, so don't emit two blocks.
2447   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2448        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2449       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2450        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2451     return false;
2452   }
2453 
2454   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2455   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2456   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2457       Cases[0].CC == Cases[1].CC &&
2458       isa<Constant>(Cases[0].CmpRHS) &&
2459       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2460     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2461       return false;
2462     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2463       return false;
2464   }
2465 
2466   return true;
2467 }
2468 
2469 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2470   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2471 
2472   // Update machine-CFG edges.
2473   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2474 
2475   if (I.isUnconditional()) {
2476     // Update machine-CFG edges.
2477     BrMBB->addSuccessor(Succ0MBB);
2478 
2479     // If this is not a fall-through branch or optimizations are switched off,
2480     // emit the branch.
2481     if (Succ0MBB != NextBlock(BrMBB) ||
2482         TM.getOptLevel() == CodeGenOptLevel::None) {
2483       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2484                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2485       setValue(&I, Br);
2486       DAG.setRoot(Br);
2487     }
2488 
2489     return;
2490   }
2491 
2492   // If this condition is one of the special cases we handle, do special stuff
2493   // now.
2494   const Value *CondVal = I.getCondition();
2495   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2496 
2497   // If this is a series of conditions that are or'd or and'd together, emit
2498   // this as a sequence of branches instead of setcc's with and/or operations.
2499   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2500   // unpredictable branches, and vector extracts because those jumps are likely
2501   // expensive for any target), this should improve performance.
2502   // For example, instead of something like:
2503   //     cmp A, B
2504   //     C = seteq
2505   //     cmp D, E
2506   //     F = setle
2507   //     or C, F
2508   //     jnz foo
2509   // Emit:
2510   //     cmp A, B
2511   //     je foo
2512   //     cmp D, E
2513   //     jle foo
2514   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2515   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2516       BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2517     Value *Vec;
2518     const Value *BOp0, *BOp1;
2519     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2520     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2521       Opcode = Instruction::And;
2522     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2523       Opcode = Instruction::Or;
2524 
2525     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2526                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2527       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2528                            getEdgeProbability(BrMBB, Succ0MBB),
2529                            getEdgeProbability(BrMBB, Succ1MBB),
2530                            /*InvertCond=*/false);
2531       // If the compares in later blocks need to use values not currently
2532       // exported from this block, export them now.  This block should always
2533       // be the first entry.
2534       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2535 
2536       // Allow some cases to be rejected.
2537       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2538         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2539           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2540           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2541         }
2542 
2543         // Emit the branch for this block.
2544         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2545         SL->SwitchCases.erase(SL->SwitchCases.begin());
2546         return;
2547       }
2548 
2549       // Okay, we decided not to do this, remove any inserted MBB's and clear
2550       // SwitchCases.
2551       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2552         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2553 
2554       SL->SwitchCases.clear();
2555     }
2556   }
2557 
2558   // Create a CaseBlock record representing this branch.
2559   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2560                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2561 
2562   // Use visitSwitchCase to actually insert the fast branch sequence for this
2563   // cond branch.
2564   visitSwitchCase(CB, BrMBB);
2565 }
2566 
2567 /// visitSwitchCase - Emits the necessary code to represent a single node in
2568 /// the binary search tree resulting from lowering a switch instruction.
2569 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2570                                           MachineBasicBlock *SwitchBB) {
2571   SDValue Cond;
2572   SDValue CondLHS = getValue(CB.CmpLHS);
2573   SDLoc dl = CB.DL;
2574 
2575   if (CB.CC == ISD::SETTRUE) {
2576     // Branch or fall through to TrueBB.
2577     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2578     SwitchBB->normalizeSuccProbs();
2579     if (CB.TrueBB != NextBlock(SwitchBB)) {
2580       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2581                               DAG.getBasicBlock(CB.TrueBB)));
2582     }
2583     return;
2584   }
2585 
2586   auto &TLI = DAG.getTargetLoweringInfo();
2587   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2588 
2589   // Build the setcc now.
2590   if (!CB.CmpMHS) {
2591     // Fold "(X == true)" to X and "(X == false)" to !X to
2592     // handle common cases produced by branch lowering.
2593     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2594         CB.CC == ISD::SETEQ)
2595       Cond = CondLHS;
2596     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2597              CB.CC == ISD::SETEQ) {
2598       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2599       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2600     } else {
2601       SDValue CondRHS = getValue(CB.CmpRHS);
2602 
2603       // If a pointer's DAG type is larger than its memory type then the DAG
2604       // values are zero-extended. This breaks signed comparisons so truncate
2605       // back to the underlying type before doing the compare.
2606       if (CondLHS.getValueType() != MemVT) {
2607         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2608         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2609       }
2610       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2611     }
2612   } else {
2613     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2614 
2615     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2616     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2617 
2618     SDValue CmpOp = getValue(CB.CmpMHS);
2619     EVT VT = CmpOp.getValueType();
2620 
2621     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2622       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2623                           ISD::SETLE);
2624     } else {
2625       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2626                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2627       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2628                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2629     }
2630   }
2631 
2632   // Update successor info
2633   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2634   // TrueBB and FalseBB are always different unless the incoming IR is
2635   // degenerate. This only happens when running llc on weird IR.
2636   if (CB.TrueBB != CB.FalseBB)
2637     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2638   SwitchBB->normalizeSuccProbs();
2639 
2640   // If the lhs block is the next block, invert the condition so that we can
2641   // fall through to the lhs instead of the rhs block.
2642   if (CB.TrueBB == NextBlock(SwitchBB)) {
2643     std::swap(CB.TrueBB, CB.FalseBB);
2644     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2645     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2646   }
2647 
2648   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2649                                MVT::Other, getControlRoot(), Cond,
2650                                DAG.getBasicBlock(CB.TrueBB));
2651 
2652   setValue(CurInst, BrCond);
2653 
2654   // Insert the false branch. Do this even if it's a fall through branch,
2655   // this makes it easier to do DAG optimizations which require inverting
2656   // the branch condition.
2657   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2658                        DAG.getBasicBlock(CB.FalseBB));
2659 
2660   DAG.setRoot(BrCond);
2661 }
2662 
2663 /// visitJumpTable - Emit JumpTable node in the current MBB
2664 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2665   // Emit the code for the jump table
2666   assert(JT.Reg != -1U && "Should lower JT Header first!");
2667   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2668   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2669                                      JT.Reg, PTy);
2670   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2671   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2672                                     MVT::Other, Index.getValue(1),
2673                                     Table, Index);
2674   DAG.setRoot(BrJumpTable);
2675 }
2676 
2677 /// visitJumpTableHeader - This function emits necessary code to produce index
2678 /// in the JumpTable from switch case.
2679 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2680                                                JumpTableHeader &JTH,
2681                                                MachineBasicBlock *SwitchBB) {
2682   SDLoc dl = getCurSDLoc();
2683 
2684   // Subtract the lowest switch case value from the value being switched on.
2685   SDValue SwitchOp = getValue(JTH.SValue);
2686   EVT VT = SwitchOp.getValueType();
2687   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2688                             DAG.getConstant(JTH.First, dl, VT));
2689 
2690   // The SDNode we just created, which holds the value being switched on minus
2691   // the smallest case value, needs to be copied to a virtual register so it
2692   // can be used as an index into the jump table in a subsequent basic block.
2693   // This value may be smaller or larger than the target's pointer type, and
2694   // therefore require extension or truncating.
2695   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2696   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2697 
2698   unsigned JumpTableReg =
2699       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2700   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2701                                     JumpTableReg, SwitchOp);
2702   JT.Reg = JumpTableReg;
2703 
2704   if (!JTH.FallthroughUnreachable) {
2705     // Emit the range check for the jump table, and branch to the default block
2706     // for the switch statement if the value being switched on exceeds the
2707     // largest case in the switch.
2708     SDValue CMP = DAG.getSetCC(
2709         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2710                                    Sub.getValueType()),
2711         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2712 
2713     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2714                                  MVT::Other, CopyTo, CMP,
2715                                  DAG.getBasicBlock(JT.Default));
2716 
2717     // Avoid emitting unnecessary branches to the next block.
2718     if (JT.MBB != NextBlock(SwitchBB))
2719       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2720                            DAG.getBasicBlock(JT.MBB));
2721 
2722     DAG.setRoot(BrCond);
2723   } else {
2724     // Avoid emitting unnecessary branches to the next block.
2725     if (JT.MBB != NextBlock(SwitchBB))
2726       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2727                               DAG.getBasicBlock(JT.MBB)));
2728     else
2729       DAG.setRoot(CopyTo);
2730   }
2731 }
2732 
2733 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2734 /// variable if there exists one.
2735 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2736                                  SDValue &Chain) {
2737   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2738   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2739   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2740   MachineFunction &MF = DAG.getMachineFunction();
2741   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2742   MachineSDNode *Node =
2743       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2744   if (Global) {
2745     MachinePointerInfo MPInfo(Global);
2746     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2747                  MachineMemOperand::MODereferenceable;
2748     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2749         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2750     DAG.setNodeMemRefs(Node, {MemRef});
2751   }
2752   if (PtrTy != PtrMemTy)
2753     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2754   return SDValue(Node, 0);
2755 }
2756 
2757 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2758 /// tail spliced into a stack protector check success bb.
2759 ///
2760 /// For a high level explanation of how this fits into the stack protector
2761 /// generation see the comment on the declaration of class
2762 /// StackProtectorDescriptor.
2763 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2764                                                   MachineBasicBlock *ParentBB) {
2765 
2766   // First create the loads to the guard/stack slot for the comparison.
2767   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2768   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2769   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2770 
2771   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2772   int FI = MFI.getStackProtectorIndex();
2773 
2774   SDValue Guard;
2775   SDLoc dl = getCurSDLoc();
2776   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2777   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2778   Align Align =
2779       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2780 
2781   // Generate code to load the content of the guard slot.
2782   SDValue GuardVal = DAG.getLoad(
2783       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2784       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2785       MachineMemOperand::MOVolatile);
2786 
2787   if (TLI.useStackGuardXorFP())
2788     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2789 
2790   // Retrieve guard check function, nullptr if instrumentation is inlined.
2791   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2792     // The target provides a guard check function to validate the guard value.
2793     // Generate a call to that function with the content of the guard slot as
2794     // argument.
2795     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2796     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2797 
2798     TargetLowering::ArgListTy Args;
2799     TargetLowering::ArgListEntry Entry;
2800     Entry.Node = GuardVal;
2801     Entry.Ty = FnTy->getParamType(0);
2802     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2803       Entry.IsInReg = true;
2804     Args.push_back(Entry);
2805 
2806     TargetLowering::CallLoweringInfo CLI(DAG);
2807     CLI.setDebugLoc(getCurSDLoc())
2808         .setChain(DAG.getEntryNode())
2809         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2810                    getValue(GuardCheckFn), std::move(Args));
2811 
2812     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2813     DAG.setRoot(Result.second);
2814     return;
2815   }
2816 
2817   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2818   // Otherwise, emit a volatile load to retrieve the stack guard value.
2819   SDValue Chain = DAG.getEntryNode();
2820   if (TLI.useLoadStackGuardNode()) {
2821     Guard = getLoadStackGuard(DAG, dl, Chain);
2822   } else {
2823     const Value *IRGuard = TLI.getSDagStackGuard(M);
2824     SDValue GuardPtr = getValue(IRGuard);
2825 
2826     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2827                         MachinePointerInfo(IRGuard, 0), Align,
2828                         MachineMemOperand::MOVolatile);
2829   }
2830 
2831   // Perform the comparison via a getsetcc.
2832   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2833                                                         *DAG.getContext(),
2834                                                         Guard.getValueType()),
2835                              Guard, GuardVal, ISD::SETNE);
2836 
2837   // If the guard/stackslot do not equal, branch to failure MBB.
2838   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2839                                MVT::Other, GuardVal.getOperand(0),
2840                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2841   // Otherwise branch to success MBB.
2842   SDValue Br = DAG.getNode(ISD::BR, dl,
2843                            MVT::Other, BrCond,
2844                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2845 
2846   DAG.setRoot(Br);
2847 }
2848 
2849 /// Codegen the failure basic block for a stack protector check.
2850 ///
2851 /// A failure stack protector machine basic block consists simply of a call to
2852 /// __stack_chk_fail().
2853 ///
2854 /// For a high level explanation of how this fits into the stack protector
2855 /// generation see the comment on the declaration of class
2856 /// StackProtectorDescriptor.
2857 void
2858 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2859   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2860   TargetLowering::MakeLibCallOptions CallOptions;
2861   CallOptions.setDiscardResult(true);
2862   SDValue Chain =
2863       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2864                       std::nullopt, CallOptions, getCurSDLoc())
2865           .second;
2866   // On PS4/PS5, the "return address" must still be within the calling
2867   // function, even if it's at the very end, so emit an explicit TRAP here.
2868   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2869   if (TM.getTargetTriple().isPS())
2870     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2871   // WebAssembly needs an unreachable instruction after a non-returning call,
2872   // because the function return type can be different from __stack_chk_fail's
2873   // return type (void).
2874   if (TM.getTargetTriple().isWasm())
2875     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2876 
2877   DAG.setRoot(Chain);
2878 }
2879 
2880 /// visitBitTestHeader - This function emits necessary code to produce value
2881 /// suitable for "bit tests"
2882 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2883                                              MachineBasicBlock *SwitchBB) {
2884   SDLoc dl = getCurSDLoc();
2885 
2886   // Subtract the minimum value.
2887   SDValue SwitchOp = getValue(B.SValue);
2888   EVT VT = SwitchOp.getValueType();
2889   SDValue RangeSub =
2890       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2891 
2892   // Determine the type of the test operands.
2893   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2894   bool UsePtrType = false;
2895   if (!TLI.isTypeLegal(VT)) {
2896     UsePtrType = true;
2897   } else {
2898     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2899       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2900         // Switch table case range are encoded into series of masks.
2901         // Just use pointer type, it's guaranteed to fit.
2902         UsePtrType = true;
2903         break;
2904       }
2905   }
2906   SDValue Sub = RangeSub;
2907   if (UsePtrType) {
2908     VT = TLI.getPointerTy(DAG.getDataLayout());
2909     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2910   }
2911 
2912   B.RegVT = VT.getSimpleVT();
2913   B.Reg = FuncInfo.CreateReg(B.RegVT);
2914   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2915 
2916   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2917 
2918   if (!B.FallthroughUnreachable)
2919     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2920   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2921   SwitchBB->normalizeSuccProbs();
2922 
2923   SDValue Root = CopyTo;
2924   if (!B.FallthroughUnreachable) {
2925     // Conditional branch to the default block.
2926     SDValue RangeCmp = DAG.getSetCC(dl,
2927         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2928                                RangeSub.getValueType()),
2929         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2930         ISD::SETUGT);
2931 
2932     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2933                        DAG.getBasicBlock(B.Default));
2934   }
2935 
2936   // Avoid emitting unnecessary branches to the next block.
2937   if (MBB != NextBlock(SwitchBB))
2938     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2939 
2940   DAG.setRoot(Root);
2941 }
2942 
2943 /// visitBitTestCase - this function produces one "bit test"
2944 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2945                                            MachineBasicBlock* NextMBB,
2946                                            BranchProbability BranchProbToNext,
2947                                            unsigned Reg,
2948                                            BitTestCase &B,
2949                                            MachineBasicBlock *SwitchBB) {
2950   SDLoc dl = getCurSDLoc();
2951   MVT VT = BB.RegVT;
2952   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2953   SDValue Cmp;
2954   unsigned PopCount = llvm::popcount(B.Mask);
2955   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2956   if (PopCount == 1) {
2957     // Testing for a single bit; just compare the shift count with what it
2958     // would need to be to shift a 1 bit in that position.
2959     Cmp = DAG.getSetCC(
2960         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2961         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
2962         ISD::SETEQ);
2963   } else if (PopCount == BB.Range) {
2964     // There is only one zero bit in the range, test for it directly.
2965     Cmp = DAG.getSetCC(
2966         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2967         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
2968   } else {
2969     // Make desired shift
2970     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2971                                     DAG.getConstant(1, dl, VT), ShiftOp);
2972 
2973     // Emit bit tests and jumps
2974     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2975                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2976     Cmp = DAG.getSetCC(
2977         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2978         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2979   }
2980 
2981   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2982   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2983   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2984   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2985   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2986   // one as they are relative probabilities (and thus work more like weights),
2987   // and hence we need to normalize them to let the sum of them become one.
2988   SwitchBB->normalizeSuccProbs();
2989 
2990   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2991                               MVT::Other, getControlRoot(),
2992                               Cmp, DAG.getBasicBlock(B.TargetBB));
2993 
2994   // Avoid emitting unnecessary branches to the next block.
2995   if (NextMBB != NextBlock(SwitchBB))
2996     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2997                         DAG.getBasicBlock(NextMBB));
2998 
2999   DAG.setRoot(BrAnd);
3000 }
3001 
3002 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3003   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3004 
3005   // Retrieve successors. Look through artificial IR level blocks like
3006   // catchswitch for successors.
3007   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3008   const BasicBlock *EHPadBB = I.getSuccessor(1);
3009   MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3010 
3011   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3012   // have to do anything here to lower funclet bundles.
3013   assert(!I.hasOperandBundlesOtherThan(
3014              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3015               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3016               LLVMContext::OB_cfguardtarget,
3017               LLVMContext::OB_clang_arc_attachedcall}) &&
3018          "Cannot lower invokes with arbitrary operand bundles yet!");
3019 
3020   const Value *Callee(I.getCalledOperand());
3021   const Function *Fn = dyn_cast<Function>(Callee);
3022   if (isa<InlineAsm>(Callee))
3023     visitInlineAsm(I, EHPadBB);
3024   else if (Fn && Fn->isIntrinsic()) {
3025     switch (Fn->getIntrinsicID()) {
3026     default:
3027       llvm_unreachable("Cannot invoke this intrinsic");
3028     case Intrinsic::donothing:
3029       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3030     case Intrinsic::seh_try_begin:
3031     case Intrinsic::seh_scope_begin:
3032     case Intrinsic::seh_try_end:
3033     case Intrinsic::seh_scope_end:
3034       if (EHPadMBB)
3035           // a block referenced by EH table
3036           // so dtor-funclet not removed by opts
3037           EHPadMBB->setMachineBlockAddressTaken();
3038       break;
3039     case Intrinsic::experimental_patchpoint_void:
3040     case Intrinsic::experimental_patchpoint_i64:
3041       visitPatchpoint(I, EHPadBB);
3042       break;
3043     case Intrinsic::experimental_gc_statepoint:
3044       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3045       break;
3046     case Intrinsic::wasm_rethrow: {
3047       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3048       // special because it can be invoked, so we manually lower it to a DAG
3049       // node here.
3050       SmallVector<SDValue, 8> Ops;
3051       Ops.push_back(getRoot()); // inchain
3052       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3053       Ops.push_back(
3054           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3055                                 TLI.getPointerTy(DAG.getDataLayout())));
3056       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3057       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3058       break;
3059     }
3060     }
3061   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3062     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3063     // Eventually we will support lowering the @llvm.experimental.deoptimize
3064     // intrinsic, and right now there are no plans to support other intrinsics
3065     // with deopt state.
3066     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3067   } else {
3068     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3069   }
3070 
3071   // If the value of the invoke is used outside of its defining block, make it
3072   // available as a virtual register.
3073   // We already took care of the exported value for the statepoint instruction
3074   // during call to the LowerStatepoint.
3075   if (!isa<GCStatepointInst>(I)) {
3076     CopyToExportRegsIfNeeded(&I);
3077   }
3078 
3079   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3080   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3081   BranchProbability EHPadBBProb =
3082       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3083           : BranchProbability::getZero();
3084   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3085 
3086   // Update successor info.
3087   addSuccessorWithProb(InvokeMBB, Return);
3088   for (auto &UnwindDest : UnwindDests) {
3089     UnwindDest.first->setIsEHPad();
3090     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3091   }
3092   InvokeMBB->normalizeSuccProbs();
3093 
3094   // Drop into normal successor.
3095   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3096                           DAG.getBasicBlock(Return)));
3097 }
3098 
3099 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3100   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3101 
3102   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3103   // have to do anything here to lower funclet bundles.
3104   assert(!I.hasOperandBundlesOtherThan(
3105              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3106          "Cannot lower callbrs with arbitrary operand bundles yet!");
3107 
3108   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3109   visitInlineAsm(I);
3110   CopyToExportRegsIfNeeded(&I);
3111 
3112   // Retrieve successors.
3113   SmallPtrSet<BasicBlock *, 8> Dests;
3114   Dests.insert(I.getDefaultDest());
3115   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3116 
3117   // Update successor info.
3118   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3119   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3120     BasicBlock *Dest = I.getIndirectDest(i);
3121     MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3122     Target->setIsInlineAsmBrIndirectTarget();
3123     Target->setMachineBlockAddressTaken();
3124     Target->setLabelMustBeEmitted();
3125     // Don't add duplicate machine successors.
3126     if (Dests.insert(Dest).second)
3127       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3128   }
3129   CallBrMBB->normalizeSuccProbs();
3130 
3131   // Drop into default successor.
3132   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3133                           MVT::Other, getControlRoot(),
3134                           DAG.getBasicBlock(Return)));
3135 }
3136 
3137 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3138   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3139 }
3140 
3141 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3142   assert(FuncInfo.MBB->isEHPad() &&
3143          "Call to landingpad not in landing pad!");
3144 
3145   // If there aren't registers to copy the values into (e.g., during SjLj
3146   // exceptions), then don't bother to create these DAG nodes.
3147   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3148   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3149   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3150       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3151     return;
3152 
3153   // If landingpad's return type is token type, we don't create DAG nodes
3154   // for its exception pointer and selector value. The extraction of exception
3155   // pointer or selector value from token type landingpads is not currently
3156   // supported.
3157   if (LP.getType()->isTokenTy())
3158     return;
3159 
3160   SmallVector<EVT, 2> ValueVTs;
3161   SDLoc dl = getCurSDLoc();
3162   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3163   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3164 
3165   // Get the two live-in registers as SDValues. The physregs have already been
3166   // copied into virtual registers.
3167   SDValue Ops[2];
3168   if (FuncInfo.ExceptionPointerVirtReg) {
3169     Ops[0] = DAG.getZExtOrTrunc(
3170         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3171                            FuncInfo.ExceptionPointerVirtReg,
3172                            TLI.getPointerTy(DAG.getDataLayout())),
3173         dl, ValueVTs[0]);
3174   } else {
3175     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3176   }
3177   Ops[1] = DAG.getZExtOrTrunc(
3178       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3179                          FuncInfo.ExceptionSelectorVirtReg,
3180                          TLI.getPointerTy(DAG.getDataLayout())),
3181       dl, ValueVTs[1]);
3182 
3183   // Merge into one.
3184   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3185                             DAG.getVTList(ValueVTs), Ops);
3186   setValue(&LP, Res);
3187 }
3188 
3189 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3190                                            MachineBasicBlock *Last) {
3191   // Update JTCases.
3192   for (JumpTableBlock &JTB : SL->JTCases)
3193     if (JTB.first.HeaderBB == First)
3194       JTB.first.HeaderBB = Last;
3195 
3196   // Update BitTestCases.
3197   for (BitTestBlock &BTB : SL->BitTestCases)
3198     if (BTB.Parent == First)
3199       BTB.Parent = Last;
3200 }
3201 
3202 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3203   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3204 
3205   // Update machine-CFG edges with unique successors.
3206   SmallSet<BasicBlock*, 32> Done;
3207   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3208     BasicBlock *BB = I.getSuccessor(i);
3209     bool Inserted = Done.insert(BB).second;
3210     if (!Inserted)
3211         continue;
3212 
3213     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3214     addSuccessorWithProb(IndirectBrMBB, Succ);
3215   }
3216   IndirectBrMBB->normalizeSuccProbs();
3217 
3218   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3219                           MVT::Other, getControlRoot(),
3220                           getValue(I.getAddress())));
3221 }
3222 
3223 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3224   if (!DAG.getTarget().Options.TrapUnreachable)
3225     return;
3226 
3227   // We may be able to ignore unreachable behind a noreturn call.
3228   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3229     const BasicBlock &BB = *I.getParent();
3230     if (&I != &BB.front()) {
3231       BasicBlock::const_iterator PredI =
3232         std::prev(BasicBlock::const_iterator(&I));
3233       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
3234         if (Call->doesNotReturn())
3235           return;
3236       }
3237     }
3238   }
3239 
3240   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3241 }
3242 
3243 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3244   SDNodeFlags Flags;
3245   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3246     Flags.copyFMF(*FPOp);
3247 
3248   SDValue Op = getValue(I.getOperand(0));
3249   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3250                                     Op, Flags);
3251   setValue(&I, UnNodeValue);
3252 }
3253 
3254 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3255   SDNodeFlags Flags;
3256   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3257     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3258     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3259   }
3260   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3261     Flags.setExact(ExactOp->isExact());
3262   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3263     Flags.copyFMF(*FPOp);
3264 
3265   SDValue Op1 = getValue(I.getOperand(0));
3266   SDValue Op2 = getValue(I.getOperand(1));
3267   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3268                                      Op1, Op2, Flags);
3269   setValue(&I, BinNodeValue);
3270 }
3271 
3272 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3273   SDValue Op1 = getValue(I.getOperand(0));
3274   SDValue Op2 = getValue(I.getOperand(1));
3275 
3276   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3277       Op1.getValueType(), DAG.getDataLayout());
3278 
3279   // Coerce the shift amount to the right type if we can. This exposes the
3280   // truncate or zext to optimization early.
3281   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3282     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3283            "Unexpected shift type");
3284     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3285   }
3286 
3287   bool nuw = false;
3288   bool nsw = false;
3289   bool exact = false;
3290 
3291   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3292 
3293     if (const OverflowingBinaryOperator *OFBinOp =
3294             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3295       nuw = OFBinOp->hasNoUnsignedWrap();
3296       nsw = OFBinOp->hasNoSignedWrap();
3297     }
3298     if (const PossiblyExactOperator *ExactOp =
3299             dyn_cast<const PossiblyExactOperator>(&I))
3300       exact = ExactOp->isExact();
3301   }
3302   SDNodeFlags Flags;
3303   Flags.setExact(exact);
3304   Flags.setNoSignedWrap(nsw);
3305   Flags.setNoUnsignedWrap(nuw);
3306   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3307                             Flags);
3308   setValue(&I, Res);
3309 }
3310 
3311 void SelectionDAGBuilder::visitSDiv(const User &I) {
3312   SDValue Op1 = getValue(I.getOperand(0));
3313   SDValue Op2 = getValue(I.getOperand(1));
3314 
3315   SDNodeFlags Flags;
3316   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3317                  cast<PossiblyExactOperator>(&I)->isExact());
3318   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3319                            Op2, Flags));
3320 }
3321 
3322 void SelectionDAGBuilder::visitICmp(const User &I) {
3323   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3324   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3325     predicate = IC->getPredicate();
3326   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3327     predicate = ICmpInst::Predicate(IC->getPredicate());
3328   SDValue Op1 = getValue(I.getOperand(0));
3329   SDValue Op2 = getValue(I.getOperand(1));
3330   ISD::CondCode Opcode = getICmpCondCode(predicate);
3331 
3332   auto &TLI = DAG.getTargetLoweringInfo();
3333   EVT MemVT =
3334       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3335 
3336   // If a pointer's DAG type is larger than its memory type then the DAG values
3337   // are zero-extended. This breaks signed comparisons so truncate back to the
3338   // underlying type before doing the compare.
3339   if (Op1.getValueType() != MemVT) {
3340     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3341     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3342   }
3343 
3344   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3345                                                         I.getType());
3346   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3347 }
3348 
3349 void SelectionDAGBuilder::visitFCmp(const User &I) {
3350   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3351   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3352     predicate = FC->getPredicate();
3353   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3354     predicate = FCmpInst::Predicate(FC->getPredicate());
3355   SDValue Op1 = getValue(I.getOperand(0));
3356   SDValue Op2 = getValue(I.getOperand(1));
3357 
3358   ISD::CondCode Condition = getFCmpCondCode(predicate);
3359   auto *FPMO = cast<FPMathOperator>(&I);
3360   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3361     Condition = getFCmpCodeWithoutNaN(Condition);
3362 
3363   SDNodeFlags Flags;
3364   Flags.copyFMF(*FPMO);
3365   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3366 
3367   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3368                                                         I.getType());
3369   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3370 }
3371 
3372 // Check if the condition of the select has one use or two users that are both
3373 // selects with the same condition.
3374 static bool hasOnlySelectUsers(const Value *Cond) {
3375   return llvm::all_of(Cond->users(), [](const Value *V) {
3376     return isa<SelectInst>(V);
3377   });
3378 }
3379 
3380 void SelectionDAGBuilder::visitSelect(const User &I) {
3381   SmallVector<EVT, 4> ValueVTs;
3382   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3383                   ValueVTs);
3384   unsigned NumValues = ValueVTs.size();
3385   if (NumValues == 0) return;
3386 
3387   SmallVector<SDValue, 4> Values(NumValues);
3388   SDValue Cond     = getValue(I.getOperand(0));
3389   SDValue LHSVal   = getValue(I.getOperand(1));
3390   SDValue RHSVal   = getValue(I.getOperand(2));
3391   SmallVector<SDValue, 1> BaseOps(1, Cond);
3392   ISD::NodeType OpCode =
3393       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3394 
3395   bool IsUnaryAbs = false;
3396   bool Negate = false;
3397 
3398   SDNodeFlags Flags;
3399   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3400     Flags.copyFMF(*FPOp);
3401 
3402   Flags.setUnpredictable(
3403       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3404 
3405   // Min/max matching is only viable if all output VTs are the same.
3406   if (all_equal(ValueVTs)) {
3407     EVT VT = ValueVTs[0];
3408     LLVMContext &Ctx = *DAG.getContext();
3409     auto &TLI = DAG.getTargetLoweringInfo();
3410 
3411     // We care about the legality of the operation after it has been type
3412     // legalized.
3413     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3414       VT = TLI.getTypeToTransformTo(Ctx, VT);
3415 
3416     // If the vselect is legal, assume we want to leave this as a vector setcc +
3417     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3418     // min/max is legal on the scalar type.
3419     bool UseScalarMinMax = VT.isVector() &&
3420       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3421 
3422     // ValueTracking's select pattern matching does not account for -0.0,
3423     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3424     // -0.0 is less than +0.0.
3425     Value *LHS, *RHS;
3426     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3427     ISD::NodeType Opc = ISD::DELETED_NODE;
3428     switch (SPR.Flavor) {
3429     case SPF_UMAX:    Opc = ISD::UMAX; break;
3430     case SPF_UMIN:    Opc = ISD::UMIN; break;
3431     case SPF_SMAX:    Opc = ISD::SMAX; break;
3432     case SPF_SMIN:    Opc = ISD::SMIN; break;
3433     case SPF_FMINNUM:
3434       switch (SPR.NaNBehavior) {
3435       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3436       case SPNB_RETURNS_NAN: break;
3437       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3438       case SPNB_RETURNS_ANY:
3439         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3440             (UseScalarMinMax &&
3441              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3442           Opc = ISD::FMINNUM;
3443         break;
3444       }
3445       break;
3446     case SPF_FMAXNUM:
3447       switch (SPR.NaNBehavior) {
3448       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3449       case SPNB_RETURNS_NAN: break;
3450       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3451       case SPNB_RETURNS_ANY:
3452         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3453             (UseScalarMinMax &&
3454              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3455           Opc = ISD::FMAXNUM;
3456         break;
3457       }
3458       break;
3459     case SPF_NABS:
3460       Negate = true;
3461       [[fallthrough]];
3462     case SPF_ABS:
3463       IsUnaryAbs = true;
3464       Opc = ISD::ABS;
3465       break;
3466     default: break;
3467     }
3468 
3469     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3470         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3471          (UseScalarMinMax &&
3472           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3473         // If the underlying comparison instruction is used by any other
3474         // instruction, the consumed instructions won't be destroyed, so it is
3475         // not profitable to convert to a min/max.
3476         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3477       OpCode = Opc;
3478       LHSVal = getValue(LHS);
3479       RHSVal = getValue(RHS);
3480       BaseOps.clear();
3481     }
3482 
3483     if (IsUnaryAbs) {
3484       OpCode = Opc;
3485       LHSVal = getValue(LHS);
3486       BaseOps.clear();
3487     }
3488   }
3489 
3490   if (IsUnaryAbs) {
3491     for (unsigned i = 0; i != NumValues; ++i) {
3492       SDLoc dl = getCurSDLoc();
3493       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3494       Values[i] =
3495           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3496       if (Negate)
3497         Values[i] = DAG.getNegative(Values[i], dl, VT);
3498     }
3499   } else {
3500     for (unsigned i = 0; i != NumValues; ++i) {
3501       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3502       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3503       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3504       Values[i] = DAG.getNode(
3505           OpCode, getCurSDLoc(),
3506           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3507     }
3508   }
3509 
3510   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3511                            DAG.getVTList(ValueVTs), Values));
3512 }
3513 
3514 void SelectionDAGBuilder::visitTrunc(const User &I) {
3515   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3516   SDValue N = getValue(I.getOperand(0));
3517   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3518                                                         I.getType());
3519   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3520 }
3521 
3522 void SelectionDAGBuilder::visitZExt(const User &I) {
3523   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3524   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3525   SDValue N = getValue(I.getOperand(0));
3526   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3527                                                         I.getType());
3528   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3529 }
3530 
3531 void SelectionDAGBuilder::visitSExt(const User &I) {
3532   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3533   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3534   SDValue N = getValue(I.getOperand(0));
3535   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3536                                                         I.getType());
3537   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3538 }
3539 
3540 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3541   // FPTrunc is never a no-op cast, no need to check
3542   SDValue N = getValue(I.getOperand(0));
3543   SDLoc dl = getCurSDLoc();
3544   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3545   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3546   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3547                            DAG.getTargetConstant(
3548                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3549 }
3550 
3551 void SelectionDAGBuilder::visitFPExt(const User &I) {
3552   // FPExt is never a no-op cast, no need to check
3553   SDValue N = getValue(I.getOperand(0));
3554   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3555                                                         I.getType());
3556   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3557 }
3558 
3559 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3560   // FPToUI is never a no-op cast, no need to check
3561   SDValue N = getValue(I.getOperand(0));
3562   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3563                                                         I.getType());
3564   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3565 }
3566 
3567 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3568   // FPToSI is never a no-op cast, no need to check
3569   SDValue N = getValue(I.getOperand(0));
3570   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3571                                                         I.getType());
3572   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3573 }
3574 
3575 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3576   // UIToFP is never a no-op cast, no need to check
3577   SDValue N = getValue(I.getOperand(0));
3578   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3579                                                         I.getType());
3580   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3581 }
3582 
3583 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3584   // SIToFP is never a no-op cast, no need to check
3585   SDValue N = getValue(I.getOperand(0));
3586   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3587                                                         I.getType());
3588   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3589 }
3590 
3591 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3592   // What to do depends on the size of the integer and the size of the pointer.
3593   // We can either truncate, zero extend, or no-op, accordingly.
3594   SDValue N = getValue(I.getOperand(0));
3595   auto &TLI = DAG.getTargetLoweringInfo();
3596   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3597                                                         I.getType());
3598   EVT PtrMemVT =
3599       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3600   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3601   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3602   setValue(&I, N);
3603 }
3604 
3605 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3606   // What to do depends on the size of the integer and the size of the pointer.
3607   // We can either truncate, zero extend, or no-op, accordingly.
3608   SDValue N = getValue(I.getOperand(0));
3609   auto &TLI = DAG.getTargetLoweringInfo();
3610   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3611   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3612   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3613   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3614   setValue(&I, N);
3615 }
3616 
3617 void SelectionDAGBuilder::visitBitCast(const User &I) {
3618   SDValue N = getValue(I.getOperand(0));
3619   SDLoc dl = getCurSDLoc();
3620   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3621                                                         I.getType());
3622 
3623   // BitCast assures us that source and destination are the same size so this is
3624   // either a BITCAST or a no-op.
3625   if (DestVT != N.getValueType())
3626     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3627                              DestVT, N)); // convert types.
3628   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3629   // might fold any kind of constant expression to an integer constant and that
3630   // is not what we are looking for. Only recognize a bitcast of a genuine
3631   // constant integer as an opaque constant.
3632   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3633     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3634                                  /*isOpaque*/true));
3635   else
3636     setValue(&I, N);            // noop cast.
3637 }
3638 
3639 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3640   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3641   const Value *SV = I.getOperand(0);
3642   SDValue N = getValue(SV);
3643   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3644 
3645   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3646   unsigned DestAS = I.getType()->getPointerAddressSpace();
3647 
3648   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3649     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3650 
3651   setValue(&I, N);
3652 }
3653 
3654 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3655   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3656   SDValue InVec = getValue(I.getOperand(0));
3657   SDValue InVal = getValue(I.getOperand(1));
3658   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3659                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3660   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3661                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3662                            InVec, InVal, InIdx));
3663 }
3664 
3665 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3666   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3667   SDValue InVec = getValue(I.getOperand(0));
3668   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3669                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3670   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3671                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3672                            InVec, InIdx));
3673 }
3674 
3675 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3676   SDValue Src1 = getValue(I.getOperand(0));
3677   SDValue Src2 = getValue(I.getOperand(1));
3678   ArrayRef<int> Mask;
3679   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3680     Mask = SVI->getShuffleMask();
3681   else
3682     Mask = cast<ConstantExpr>(I).getShuffleMask();
3683   SDLoc DL = getCurSDLoc();
3684   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3685   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3686   EVT SrcVT = Src1.getValueType();
3687 
3688   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3689       VT.isScalableVector()) {
3690     // Canonical splat form of first element of first input vector.
3691     SDValue FirstElt =
3692         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3693                     DAG.getVectorIdxConstant(0, DL));
3694     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3695     return;
3696   }
3697 
3698   // For now, we only handle splats for scalable vectors.
3699   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3700   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3701   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3702 
3703   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3704   unsigned MaskNumElts = Mask.size();
3705 
3706   if (SrcNumElts == MaskNumElts) {
3707     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3708     return;
3709   }
3710 
3711   // Normalize the shuffle vector since mask and vector length don't match.
3712   if (SrcNumElts < MaskNumElts) {
3713     // Mask is longer than the source vectors. We can use concatenate vector to
3714     // make the mask and vectors lengths match.
3715 
3716     if (MaskNumElts % SrcNumElts == 0) {
3717       // Mask length is a multiple of the source vector length.
3718       // Check if the shuffle is some kind of concatenation of the input
3719       // vectors.
3720       unsigned NumConcat = MaskNumElts / SrcNumElts;
3721       bool IsConcat = true;
3722       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3723       for (unsigned i = 0; i != MaskNumElts; ++i) {
3724         int Idx = Mask[i];
3725         if (Idx < 0)
3726           continue;
3727         // Ensure the indices in each SrcVT sized piece are sequential and that
3728         // the same source is used for the whole piece.
3729         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3730             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3731              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3732           IsConcat = false;
3733           break;
3734         }
3735         // Remember which source this index came from.
3736         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3737       }
3738 
3739       // The shuffle is concatenating multiple vectors together. Just emit
3740       // a CONCAT_VECTORS operation.
3741       if (IsConcat) {
3742         SmallVector<SDValue, 8> ConcatOps;
3743         for (auto Src : ConcatSrcs) {
3744           if (Src < 0)
3745             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3746           else if (Src == 0)
3747             ConcatOps.push_back(Src1);
3748           else
3749             ConcatOps.push_back(Src2);
3750         }
3751         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3752         return;
3753       }
3754     }
3755 
3756     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3757     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3758     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3759                                     PaddedMaskNumElts);
3760 
3761     // Pad both vectors with undefs to make them the same length as the mask.
3762     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3763 
3764     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3765     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3766     MOps1[0] = Src1;
3767     MOps2[0] = Src2;
3768 
3769     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3770     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3771 
3772     // Readjust mask for new input vector length.
3773     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3774     for (unsigned i = 0; i != MaskNumElts; ++i) {
3775       int Idx = Mask[i];
3776       if (Idx >= (int)SrcNumElts)
3777         Idx -= SrcNumElts - PaddedMaskNumElts;
3778       MappedOps[i] = Idx;
3779     }
3780 
3781     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3782 
3783     // If the concatenated vector was padded, extract a subvector with the
3784     // correct number of elements.
3785     if (MaskNumElts != PaddedMaskNumElts)
3786       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3787                            DAG.getVectorIdxConstant(0, DL));
3788 
3789     setValue(&I, Result);
3790     return;
3791   }
3792 
3793   if (SrcNumElts > MaskNumElts) {
3794     // Analyze the access pattern of the vector to see if we can extract
3795     // two subvectors and do the shuffle.
3796     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3797     bool CanExtract = true;
3798     for (int Idx : Mask) {
3799       unsigned Input = 0;
3800       if (Idx < 0)
3801         continue;
3802 
3803       if (Idx >= (int)SrcNumElts) {
3804         Input = 1;
3805         Idx -= SrcNumElts;
3806       }
3807 
3808       // If all the indices come from the same MaskNumElts sized portion of
3809       // the sources we can use extract. Also make sure the extract wouldn't
3810       // extract past the end of the source.
3811       int NewStartIdx = alignDown(Idx, MaskNumElts);
3812       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3813           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3814         CanExtract = false;
3815       // Make sure we always update StartIdx as we use it to track if all
3816       // elements are undef.
3817       StartIdx[Input] = NewStartIdx;
3818     }
3819 
3820     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3821       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3822       return;
3823     }
3824     if (CanExtract) {
3825       // Extract appropriate subvector and generate a vector shuffle
3826       for (unsigned Input = 0; Input < 2; ++Input) {
3827         SDValue &Src = Input == 0 ? Src1 : Src2;
3828         if (StartIdx[Input] < 0)
3829           Src = DAG.getUNDEF(VT);
3830         else {
3831           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3832                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3833         }
3834       }
3835 
3836       // Calculate new mask.
3837       SmallVector<int, 8> MappedOps(Mask);
3838       for (int &Idx : MappedOps) {
3839         if (Idx >= (int)SrcNumElts)
3840           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3841         else if (Idx >= 0)
3842           Idx -= StartIdx[0];
3843       }
3844 
3845       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3846       return;
3847     }
3848   }
3849 
3850   // We can't use either concat vectors or extract subvectors so fall back to
3851   // replacing the shuffle with extract and build vector.
3852   // to insert and build vector.
3853   EVT EltVT = VT.getVectorElementType();
3854   SmallVector<SDValue,8> Ops;
3855   for (int Idx : Mask) {
3856     SDValue Res;
3857 
3858     if (Idx < 0) {
3859       Res = DAG.getUNDEF(EltVT);
3860     } else {
3861       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3862       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3863 
3864       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3865                         DAG.getVectorIdxConstant(Idx, DL));
3866     }
3867 
3868     Ops.push_back(Res);
3869   }
3870 
3871   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3872 }
3873 
3874 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3875   ArrayRef<unsigned> Indices = I.getIndices();
3876   const Value *Op0 = I.getOperand(0);
3877   const Value *Op1 = I.getOperand(1);
3878   Type *AggTy = I.getType();
3879   Type *ValTy = Op1->getType();
3880   bool IntoUndef = isa<UndefValue>(Op0);
3881   bool FromUndef = isa<UndefValue>(Op1);
3882 
3883   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3884 
3885   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3886   SmallVector<EVT, 4> AggValueVTs;
3887   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3888   SmallVector<EVT, 4> ValValueVTs;
3889   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3890 
3891   unsigned NumAggValues = AggValueVTs.size();
3892   unsigned NumValValues = ValValueVTs.size();
3893   SmallVector<SDValue, 4> Values(NumAggValues);
3894 
3895   // Ignore an insertvalue that produces an empty object
3896   if (!NumAggValues) {
3897     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3898     return;
3899   }
3900 
3901   SDValue Agg = getValue(Op0);
3902   unsigned i = 0;
3903   // Copy the beginning value(s) from the original aggregate.
3904   for (; i != LinearIndex; ++i)
3905     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3906                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3907   // Copy values from the inserted value(s).
3908   if (NumValValues) {
3909     SDValue Val = getValue(Op1);
3910     for (; i != LinearIndex + NumValValues; ++i)
3911       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3912                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3913   }
3914   // Copy remaining value(s) from the original aggregate.
3915   for (; i != NumAggValues; ++i)
3916     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3917                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3918 
3919   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3920                            DAG.getVTList(AggValueVTs), Values));
3921 }
3922 
3923 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3924   ArrayRef<unsigned> Indices = I.getIndices();
3925   const Value *Op0 = I.getOperand(0);
3926   Type *AggTy = Op0->getType();
3927   Type *ValTy = I.getType();
3928   bool OutOfUndef = isa<UndefValue>(Op0);
3929 
3930   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3931 
3932   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3933   SmallVector<EVT, 4> ValValueVTs;
3934   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3935 
3936   unsigned NumValValues = ValValueVTs.size();
3937 
3938   // Ignore a extractvalue that produces an empty object
3939   if (!NumValValues) {
3940     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3941     return;
3942   }
3943 
3944   SmallVector<SDValue, 4> Values(NumValValues);
3945 
3946   SDValue Agg = getValue(Op0);
3947   // Copy out the selected value(s).
3948   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3949     Values[i - LinearIndex] =
3950       OutOfUndef ?
3951         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3952         SDValue(Agg.getNode(), Agg.getResNo() + i);
3953 
3954   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3955                            DAG.getVTList(ValValueVTs), Values));
3956 }
3957 
3958 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3959   Value *Op0 = I.getOperand(0);
3960   // Note that the pointer operand may be a vector of pointers. Take the scalar
3961   // element which holds a pointer.
3962   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3963   SDValue N = getValue(Op0);
3964   SDLoc dl = getCurSDLoc();
3965   auto &TLI = DAG.getTargetLoweringInfo();
3966 
3967   // Normalize Vector GEP - all scalar operands should be converted to the
3968   // splat vector.
3969   bool IsVectorGEP = I.getType()->isVectorTy();
3970   ElementCount VectorElementCount =
3971       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3972                   : ElementCount::getFixed(0);
3973 
3974   if (IsVectorGEP && !N.getValueType().isVector()) {
3975     LLVMContext &Context = *DAG.getContext();
3976     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3977     N = DAG.getSplat(VT, dl, N);
3978   }
3979 
3980   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3981        GTI != E; ++GTI) {
3982     const Value *Idx = GTI.getOperand();
3983     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3984       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3985       if (Field) {
3986         // N = N + Offset
3987         uint64_t Offset =
3988             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
3989 
3990         // In an inbounds GEP with an offset that is nonnegative even when
3991         // interpreted as signed, assume there is no unsigned overflow.
3992         SDNodeFlags Flags;
3993         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3994           Flags.setNoUnsignedWrap(true);
3995 
3996         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3997                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3998       }
3999     } else {
4000       // IdxSize is the width of the arithmetic according to IR semantics.
4001       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4002       // (and fix up the result later).
4003       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4004       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4005       TypeSize ElementSize =
4006           DAG.getDataLayout().getTypeAllocSize(GTI.getIndexedType());
4007       // We intentionally mask away the high bits here; ElementSize may not
4008       // fit in IdxTy.
4009       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4010       bool ElementScalable = ElementSize.isScalable();
4011 
4012       // If this is a scalar constant or a splat vector of constants,
4013       // handle it quickly.
4014       const auto *C = dyn_cast<Constant>(Idx);
4015       if (C && isa<VectorType>(C->getType()))
4016         C = C->getSplatValue();
4017 
4018       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4019       if (CI && CI->isZero())
4020         continue;
4021       if (CI && !ElementScalable) {
4022         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4023         LLVMContext &Context = *DAG.getContext();
4024         SDValue OffsVal;
4025         if (IsVectorGEP)
4026           OffsVal = DAG.getConstant(
4027               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4028         else
4029           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4030 
4031         // In an inbounds GEP with an offset that is nonnegative even when
4032         // interpreted as signed, assume there is no unsigned overflow.
4033         SDNodeFlags Flags;
4034         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4035           Flags.setNoUnsignedWrap(true);
4036 
4037         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4038 
4039         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4040         continue;
4041       }
4042 
4043       // N = N + Idx * ElementMul;
4044       SDValue IdxN = getValue(Idx);
4045 
4046       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4047         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4048                                   VectorElementCount);
4049         IdxN = DAG.getSplat(VT, dl, IdxN);
4050       }
4051 
4052       // If the index is smaller or larger than intptr_t, truncate or extend
4053       // it.
4054       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4055 
4056       if (ElementScalable) {
4057         EVT VScaleTy = N.getValueType().getScalarType();
4058         SDValue VScale = DAG.getNode(
4059             ISD::VSCALE, dl, VScaleTy,
4060             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4061         if (IsVectorGEP)
4062           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4063         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4064       } else {
4065         // If this is a multiply by a power of two, turn it into a shl
4066         // immediately.  This is a very common case.
4067         if (ElementMul != 1) {
4068           if (ElementMul.isPowerOf2()) {
4069             unsigned Amt = ElementMul.logBase2();
4070             IdxN = DAG.getNode(ISD::SHL, dl,
4071                                N.getValueType(), IdxN,
4072                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
4073           } else {
4074             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4075                                             IdxN.getValueType());
4076             IdxN = DAG.getNode(ISD::MUL, dl,
4077                                N.getValueType(), IdxN, Scale);
4078           }
4079         }
4080       }
4081 
4082       N = DAG.getNode(ISD::ADD, dl,
4083                       N.getValueType(), N, IdxN);
4084     }
4085   }
4086 
4087   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4088   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4089   if (IsVectorGEP) {
4090     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4091     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4092   }
4093 
4094   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4095     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4096 
4097   setValue(&I, N);
4098 }
4099 
4100 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4101   // If this is a fixed sized alloca in the entry block of the function,
4102   // allocate it statically on the stack.
4103   if (FuncInfo.StaticAllocaMap.count(&I))
4104     return;   // getValue will auto-populate this.
4105 
4106   SDLoc dl = getCurSDLoc();
4107   Type *Ty = I.getAllocatedType();
4108   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4109   auto &DL = DAG.getDataLayout();
4110   TypeSize TySize = DL.getTypeAllocSize(Ty);
4111   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4112 
4113   SDValue AllocSize = getValue(I.getArraySize());
4114 
4115   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), I.getAddressSpace());
4116   if (AllocSize.getValueType() != IntPtr)
4117     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4118 
4119   if (TySize.isScalable())
4120     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4121                             DAG.getVScale(dl, IntPtr,
4122                                           APInt(IntPtr.getScalarSizeInBits(),
4123                                                 TySize.getKnownMinValue())));
4124   else
4125     AllocSize =
4126         DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4127                     DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
4128 
4129   // Handle alignment.  If the requested alignment is less than or equal to
4130   // the stack alignment, ignore it.  If the size is greater than or equal to
4131   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4132   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4133   if (*Alignment <= StackAlign)
4134     Alignment = std::nullopt;
4135 
4136   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4137   // Round the size of the allocation up to the stack alignment size
4138   // by add SA-1 to the size. This doesn't overflow because we're computing
4139   // an address inside an alloca.
4140   SDNodeFlags Flags;
4141   Flags.setNoUnsignedWrap(true);
4142   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4143                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4144 
4145   // Mask out the low bits for alignment purposes.
4146   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4147                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
4148 
4149   SDValue Ops[] = {
4150       getRoot(), AllocSize,
4151       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4152   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4153   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4154   setValue(&I, DSA);
4155   DAG.setRoot(DSA.getValue(1));
4156 
4157   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4158 }
4159 
4160 static const MDNode *getRangeMetadata(const Instruction &I) {
4161   // If !noundef is not present, then !range violation results in a poison
4162   // value rather than immediate undefined behavior. In theory, transferring
4163   // these annotations to SDAG is fine, but in practice there are key SDAG
4164   // transforms that are known not to be poison-safe, such as folding logical
4165   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4166   // also present.
4167   if (!I.hasMetadata(LLVMContext::MD_noundef))
4168     return nullptr;
4169   return I.getMetadata(LLVMContext::MD_range);
4170 }
4171 
4172 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4173   if (I.isAtomic())
4174     return visitAtomicLoad(I);
4175 
4176   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4177   const Value *SV = I.getOperand(0);
4178   if (TLI.supportSwiftError()) {
4179     // Swifterror values can come from either a function parameter with
4180     // swifterror attribute or an alloca with swifterror attribute.
4181     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4182       if (Arg->hasSwiftErrorAttr())
4183         return visitLoadFromSwiftError(I);
4184     }
4185 
4186     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4187       if (Alloca->isSwiftError())
4188         return visitLoadFromSwiftError(I);
4189     }
4190   }
4191 
4192   SDValue Ptr = getValue(SV);
4193 
4194   Type *Ty = I.getType();
4195   SmallVector<EVT, 4> ValueVTs, MemVTs;
4196   SmallVector<TypeSize, 4> Offsets;
4197   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0);
4198   unsigned NumValues = ValueVTs.size();
4199   if (NumValues == 0)
4200     return;
4201 
4202   Align Alignment = I.getAlign();
4203   AAMDNodes AAInfo = I.getAAMetadata();
4204   const MDNode *Ranges = getRangeMetadata(I);
4205   bool isVolatile = I.isVolatile();
4206   MachineMemOperand::Flags MMOFlags =
4207       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4208 
4209   SDValue Root;
4210   bool ConstantMemory = false;
4211   if (isVolatile)
4212     // Serialize volatile loads with other side effects.
4213     Root = getRoot();
4214   else if (NumValues > MaxParallelChains)
4215     Root = getMemoryRoot();
4216   else if (AA &&
4217            AA->pointsToConstantMemory(MemoryLocation(
4218                SV,
4219                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4220                AAInfo))) {
4221     // Do not serialize (non-volatile) loads of constant memory with anything.
4222     Root = DAG.getEntryNode();
4223     ConstantMemory = true;
4224     MMOFlags |= MachineMemOperand::MOInvariant;
4225   } else {
4226     // Do not serialize non-volatile loads against each other.
4227     Root = DAG.getRoot();
4228   }
4229 
4230   SDLoc dl = getCurSDLoc();
4231 
4232   if (isVolatile)
4233     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4234 
4235   SmallVector<SDValue, 4> Values(NumValues);
4236   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4237 
4238   unsigned ChainI = 0;
4239   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4240     // Serializing loads here may result in excessive register pressure, and
4241     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4242     // could recover a bit by hoisting nodes upward in the chain by recognizing
4243     // they are side-effect free or do not alias. The optimizer should really
4244     // avoid this case by converting large object/array copies to llvm.memcpy
4245     // (MaxParallelChains should always remain as failsafe).
4246     if (ChainI == MaxParallelChains) {
4247       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4248       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4249                                   ArrayRef(Chains.data(), ChainI));
4250       Root = Chain;
4251       ChainI = 0;
4252     }
4253 
4254     // TODO: MachinePointerInfo only supports a fixed length offset.
4255     MachinePointerInfo PtrInfo =
4256         !Offsets[i].isScalable() || Offsets[i].isZero()
4257             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4258             : MachinePointerInfo();
4259 
4260     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4261     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4262                             MMOFlags, AAInfo, Ranges);
4263     Chains[ChainI] = L.getValue(1);
4264 
4265     if (MemVTs[i] != ValueVTs[i])
4266       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4267 
4268     Values[i] = L;
4269   }
4270 
4271   if (!ConstantMemory) {
4272     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4273                                 ArrayRef(Chains.data(), ChainI));
4274     if (isVolatile)
4275       DAG.setRoot(Chain);
4276     else
4277       PendingLoads.push_back(Chain);
4278   }
4279 
4280   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4281                            DAG.getVTList(ValueVTs), Values));
4282 }
4283 
4284 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4285   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4286          "call visitStoreToSwiftError when backend supports swifterror");
4287 
4288   SmallVector<EVT, 4> ValueVTs;
4289   SmallVector<uint64_t, 4> Offsets;
4290   const Value *SrcV = I.getOperand(0);
4291   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4292                   SrcV->getType(), ValueVTs, &Offsets, 0);
4293   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4294          "expect a single EVT for swifterror");
4295 
4296   SDValue Src = getValue(SrcV);
4297   // Create a virtual register, then update the virtual register.
4298   Register VReg =
4299       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4300   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4301   // Chain can be getRoot or getControlRoot.
4302   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4303                                       SDValue(Src.getNode(), Src.getResNo()));
4304   DAG.setRoot(CopyNode);
4305 }
4306 
4307 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4308   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4309          "call visitLoadFromSwiftError when backend supports swifterror");
4310 
4311   assert(!I.isVolatile() &&
4312          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4313          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4314          "Support volatile, non temporal, invariant for load_from_swift_error");
4315 
4316   const Value *SV = I.getOperand(0);
4317   Type *Ty = I.getType();
4318   assert(
4319       (!AA ||
4320        !AA->pointsToConstantMemory(MemoryLocation(
4321            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4322            I.getAAMetadata()))) &&
4323       "load_from_swift_error should not be constant memory");
4324 
4325   SmallVector<EVT, 4> ValueVTs;
4326   SmallVector<uint64_t, 4> Offsets;
4327   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4328                   ValueVTs, &Offsets, 0);
4329   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4330          "expect a single EVT for swifterror");
4331 
4332   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4333   SDValue L = DAG.getCopyFromReg(
4334       getRoot(), getCurSDLoc(),
4335       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4336 
4337   setValue(&I, L);
4338 }
4339 
4340 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4341   if (I.isAtomic())
4342     return visitAtomicStore(I);
4343 
4344   const Value *SrcV = I.getOperand(0);
4345   const Value *PtrV = I.getOperand(1);
4346 
4347   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4348   if (TLI.supportSwiftError()) {
4349     // Swifterror values can come from either a function parameter with
4350     // swifterror attribute or an alloca with swifterror attribute.
4351     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4352       if (Arg->hasSwiftErrorAttr())
4353         return visitStoreToSwiftError(I);
4354     }
4355 
4356     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4357       if (Alloca->isSwiftError())
4358         return visitStoreToSwiftError(I);
4359     }
4360   }
4361 
4362   SmallVector<EVT, 4> ValueVTs, MemVTs;
4363   SmallVector<TypeSize, 4> Offsets;
4364   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4365                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0);
4366   unsigned NumValues = ValueVTs.size();
4367   if (NumValues == 0)
4368     return;
4369 
4370   // Get the lowered operands. Note that we do this after
4371   // checking if NumResults is zero, because with zero results
4372   // the operands won't have values in the map.
4373   SDValue Src = getValue(SrcV);
4374   SDValue Ptr = getValue(PtrV);
4375 
4376   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4377   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4378   SDLoc dl = getCurSDLoc();
4379   Align Alignment = I.getAlign();
4380   AAMDNodes AAInfo = I.getAAMetadata();
4381 
4382   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4383 
4384   unsigned ChainI = 0;
4385   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4386     // See visitLoad comments.
4387     if (ChainI == MaxParallelChains) {
4388       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4389                                   ArrayRef(Chains.data(), ChainI));
4390       Root = Chain;
4391       ChainI = 0;
4392     }
4393 
4394     // TODO: MachinePointerInfo only supports a fixed length offset.
4395     MachinePointerInfo PtrInfo =
4396         !Offsets[i].isScalable() || Offsets[i].isZero()
4397             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4398             : MachinePointerInfo();
4399 
4400     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4401     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4402     if (MemVTs[i] != ValueVTs[i])
4403       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4404     SDValue St =
4405         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4406     Chains[ChainI] = St;
4407   }
4408 
4409   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4410                                   ArrayRef(Chains.data(), ChainI));
4411   setValue(&I, StoreNode);
4412   DAG.setRoot(StoreNode);
4413 }
4414 
4415 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4416                                            bool IsCompressing) {
4417   SDLoc sdl = getCurSDLoc();
4418 
4419   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4420                                MaybeAlign &Alignment) {
4421     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4422     Src0 = I.getArgOperand(0);
4423     Ptr = I.getArgOperand(1);
4424     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4425     Mask = I.getArgOperand(3);
4426   };
4427   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4428                                     MaybeAlign &Alignment) {
4429     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4430     Src0 = I.getArgOperand(0);
4431     Ptr = I.getArgOperand(1);
4432     Mask = I.getArgOperand(2);
4433     Alignment = std::nullopt;
4434   };
4435 
4436   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4437   MaybeAlign Alignment;
4438   if (IsCompressing)
4439     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4440   else
4441     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4442 
4443   SDValue Ptr = getValue(PtrOperand);
4444   SDValue Src0 = getValue(Src0Operand);
4445   SDValue Mask = getValue(MaskOperand);
4446   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4447 
4448   EVT VT = Src0.getValueType();
4449   if (!Alignment)
4450     Alignment = DAG.getEVTAlign(VT);
4451 
4452   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4453       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4454       MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4455   SDValue StoreNode =
4456       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4457                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4458   DAG.setRoot(StoreNode);
4459   setValue(&I, StoreNode);
4460 }
4461 
4462 // Get a uniform base for the Gather/Scatter intrinsic.
4463 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4464 // We try to represent it as a base pointer + vector of indices.
4465 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4466 // The first operand of the GEP may be a single pointer or a vector of pointers
4467 // Example:
4468 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4469 //  or
4470 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4471 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4472 //
4473 // When the first GEP operand is a single pointer - it is the uniform base we
4474 // are looking for. If first operand of the GEP is a splat vector - we
4475 // extract the splat value and use it as a uniform base.
4476 // In all other cases the function returns 'false'.
4477 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4478                            ISD::MemIndexType &IndexType, SDValue &Scale,
4479                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4480                            uint64_t ElemSize) {
4481   SelectionDAG& DAG = SDB->DAG;
4482   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4483   const DataLayout &DL = DAG.getDataLayout();
4484 
4485   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4486 
4487   // Handle splat constant pointer.
4488   if (auto *C = dyn_cast<Constant>(Ptr)) {
4489     C = C->getSplatValue();
4490     if (!C)
4491       return false;
4492 
4493     Base = SDB->getValue(C);
4494 
4495     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4496     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4497     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4498     IndexType = ISD::SIGNED_SCALED;
4499     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4500     return true;
4501   }
4502 
4503   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4504   if (!GEP || GEP->getParent() != CurBB)
4505     return false;
4506 
4507   if (GEP->getNumOperands() != 2)
4508     return false;
4509 
4510   const Value *BasePtr = GEP->getPointerOperand();
4511   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4512 
4513   // Make sure the base is scalar and the index is a vector.
4514   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4515     return false;
4516 
4517   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4518   if (ScaleVal.isScalable())
4519     return false;
4520 
4521   // Target may not support the required addressing mode.
4522   if (ScaleVal != 1 &&
4523       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4524     return false;
4525 
4526   Base = SDB->getValue(BasePtr);
4527   Index = SDB->getValue(IndexVal);
4528   IndexType = ISD::SIGNED_SCALED;
4529 
4530   Scale =
4531       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4532   return true;
4533 }
4534 
4535 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4536   SDLoc sdl = getCurSDLoc();
4537 
4538   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4539   const Value *Ptr = I.getArgOperand(1);
4540   SDValue Src0 = getValue(I.getArgOperand(0));
4541   SDValue Mask = getValue(I.getArgOperand(3));
4542   EVT VT = Src0.getValueType();
4543   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4544                         ->getMaybeAlignValue()
4545                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4546   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4547 
4548   SDValue Base;
4549   SDValue Index;
4550   ISD::MemIndexType IndexType;
4551   SDValue Scale;
4552   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4553                                     I.getParent(), VT.getScalarStoreSize());
4554 
4555   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4556   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4557       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4558       // TODO: Make MachineMemOperands aware of scalable
4559       // vectors.
4560       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4561   if (!UniformBase) {
4562     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4563     Index = getValue(Ptr);
4564     IndexType = ISD::SIGNED_SCALED;
4565     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4566   }
4567 
4568   EVT IdxVT = Index.getValueType();
4569   EVT EltTy = IdxVT.getVectorElementType();
4570   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4571     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4572     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4573   }
4574 
4575   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4576   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4577                                          Ops, MMO, IndexType, false);
4578   DAG.setRoot(Scatter);
4579   setValue(&I, Scatter);
4580 }
4581 
4582 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4583   SDLoc sdl = getCurSDLoc();
4584 
4585   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4586                               MaybeAlign &Alignment) {
4587     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4588     Ptr = I.getArgOperand(0);
4589     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4590     Mask = I.getArgOperand(2);
4591     Src0 = I.getArgOperand(3);
4592   };
4593   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4594                                  MaybeAlign &Alignment) {
4595     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4596     Ptr = I.getArgOperand(0);
4597     Alignment = std::nullopt;
4598     Mask = I.getArgOperand(1);
4599     Src0 = I.getArgOperand(2);
4600   };
4601 
4602   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4603   MaybeAlign Alignment;
4604   if (IsExpanding)
4605     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4606   else
4607     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4608 
4609   SDValue Ptr = getValue(PtrOperand);
4610   SDValue Src0 = getValue(Src0Operand);
4611   SDValue Mask = getValue(MaskOperand);
4612   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4613 
4614   EVT VT = Src0.getValueType();
4615   if (!Alignment)
4616     Alignment = DAG.getEVTAlign(VT);
4617 
4618   AAMDNodes AAInfo = I.getAAMetadata();
4619   const MDNode *Ranges = getRangeMetadata(I);
4620 
4621   // Do not serialize masked loads of constant memory with anything.
4622   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4623   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4624 
4625   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4626 
4627   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4628       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4629       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4630 
4631   SDValue Load =
4632       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4633                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4634   if (AddToChain)
4635     PendingLoads.push_back(Load.getValue(1));
4636   setValue(&I, Load);
4637 }
4638 
4639 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4640   SDLoc sdl = getCurSDLoc();
4641 
4642   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4643   const Value *Ptr = I.getArgOperand(0);
4644   SDValue Src0 = getValue(I.getArgOperand(3));
4645   SDValue Mask = getValue(I.getArgOperand(2));
4646 
4647   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4648   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4649   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4650                         ->getMaybeAlignValue()
4651                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4652 
4653   const MDNode *Ranges = getRangeMetadata(I);
4654 
4655   SDValue Root = DAG.getRoot();
4656   SDValue Base;
4657   SDValue Index;
4658   ISD::MemIndexType IndexType;
4659   SDValue Scale;
4660   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4661                                     I.getParent(), VT.getScalarStoreSize());
4662   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4663   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4664       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4665       // TODO: Make MachineMemOperands aware of scalable
4666       // vectors.
4667       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4668 
4669   if (!UniformBase) {
4670     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4671     Index = getValue(Ptr);
4672     IndexType = ISD::SIGNED_SCALED;
4673     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4674   }
4675 
4676   EVT IdxVT = Index.getValueType();
4677   EVT EltTy = IdxVT.getVectorElementType();
4678   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4679     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4680     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4681   }
4682 
4683   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4684   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4685                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4686 
4687   PendingLoads.push_back(Gather.getValue(1));
4688   setValue(&I, Gather);
4689 }
4690 
4691 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4692   SDLoc dl = getCurSDLoc();
4693   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4694   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4695   SyncScope::ID SSID = I.getSyncScopeID();
4696 
4697   SDValue InChain = getRoot();
4698 
4699   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4700   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4701 
4702   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4703   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4704 
4705   MachineFunction &MF = DAG.getMachineFunction();
4706   MachineMemOperand *MMO = MF.getMachineMemOperand(
4707       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4708       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4709       FailureOrdering);
4710 
4711   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4712                                    dl, MemVT, VTs, InChain,
4713                                    getValue(I.getPointerOperand()),
4714                                    getValue(I.getCompareOperand()),
4715                                    getValue(I.getNewValOperand()), MMO);
4716 
4717   SDValue OutChain = L.getValue(2);
4718 
4719   setValue(&I, L);
4720   DAG.setRoot(OutChain);
4721 }
4722 
4723 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4724   SDLoc dl = getCurSDLoc();
4725   ISD::NodeType NT;
4726   switch (I.getOperation()) {
4727   default: llvm_unreachable("Unknown atomicrmw operation");
4728   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4729   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4730   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4731   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4732   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4733   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4734   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4735   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4736   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4737   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4738   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4739   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4740   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4741   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4742   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4743   case AtomicRMWInst::UIncWrap:
4744     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4745     break;
4746   case AtomicRMWInst::UDecWrap:
4747     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4748     break;
4749   }
4750   AtomicOrdering Ordering = I.getOrdering();
4751   SyncScope::ID SSID = I.getSyncScopeID();
4752 
4753   SDValue InChain = getRoot();
4754 
4755   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4756   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4757   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4758 
4759   MachineFunction &MF = DAG.getMachineFunction();
4760   MachineMemOperand *MMO = MF.getMachineMemOperand(
4761       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4762       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4763 
4764   SDValue L =
4765     DAG.getAtomic(NT, dl, MemVT, InChain,
4766                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4767                   MMO);
4768 
4769   SDValue OutChain = L.getValue(1);
4770 
4771   setValue(&I, L);
4772   DAG.setRoot(OutChain);
4773 }
4774 
4775 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4776   SDLoc dl = getCurSDLoc();
4777   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4778   SDValue Ops[3];
4779   Ops[0] = getRoot();
4780   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4781                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4782   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4783                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4784   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4785   setValue(&I, N);
4786   DAG.setRoot(N);
4787 }
4788 
4789 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4790   SDLoc dl = getCurSDLoc();
4791   AtomicOrdering Order = I.getOrdering();
4792   SyncScope::ID SSID = I.getSyncScopeID();
4793 
4794   SDValue InChain = getRoot();
4795 
4796   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4797   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4798   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4799 
4800   if (!TLI.supportsUnalignedAtomics() &&
4801       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4802     report_fatal_error("Cannot generate unaligned atomic load");
4803 
4804   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4805 
4806   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4807       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4808       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4809 
4810   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4811 
4812   SDValue Ptr = getValue(I.getPointerOperand());
4813 
4814   if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4815     // TODO: Once this is better exercised by tests, it should be merged with
4816     // the normal path for loads to prevent future divergence.
4817     SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4818     if (MemVT != VT)
4819       L = DAG.getPtrExtOrTrunc(L, dl, VT);
4820 
4821     setValue(&I, L);
4822     SDValue OutChain = L.getValue(1);
4823     if (!I.isUnordered())
4824       DAG.setRoot(OutChain);
4825     else
4826       PendingLoads.push_back(OutChain);
4827     return;
4828   }
4829 
4830   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4831                             Ptr, MMO);
4832 
4833   SDValue OutChain = L.getValue(1);
4834   if (MemVT != VT)
4835     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4836 
4837   setValue(&I, L);
4838   DAG.setRoot(OutChain);
4839 }
4840 
4841 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4842   SDLoc dl = getCurSDLoc();
4843 
4844   AtomicOrdering Ordering = I.getOrdering();
4845   SyncScope::ID SSID = I.getSyncScopeID();
4846 
4847   SDValue InChain = getRoot();
4848 
4849   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4850   EVT MemVT =
4851       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4852 
4853   if (!TLI.supportsUnalignedAtomics() &&
4854       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4855     report_fatal_error("Cannot generate unaligned atomic store");
4856 
4857   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4858 
4859   MachineFunction &MF = DAG.getMachineFunction();
4860   MachineMemOperand *MMO = MF.getMachineMemOperand(
4861       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4862       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4863 
4864   SDValue Val = getValue(I.getValueOperand());
4865   if (Val.getValueType() != MemVT)
4866     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4867   SDValue Ptr = getValue(I.getPointerOperand());
4868 
4869   if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4870     // TODO: Once this is better exercised by tests, it should be merged with
4871     // the normal path for stores to prevent future divergence.
4872     SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4873     setValue(&I, S);
4874     DAG.setRoot(S);
4875     return;
4876   }
4877   SDValue OutChain =
4878       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
4879 
4880   setValue(&I, OutChain);
4881   DAG.setRoot(OutChain);
4882 }
4883 
4884 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4885 /// node.
4886 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4887                                                unsigned Intrinsic) {
4888   // Ignore the callsite's attributes. A specific call site may be marked with
4889   // readnone, but the lowering code will expect the chain based on the
4890   // definition.
4891   const Function *F = I.getCalledFunction();
4892   bool HasChain = !F->doesNotAccessMemory();
4893   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4894 
4895   // Build the operand list.
4896   SmallVector<SDValue, 8> Ops;
4897   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4898     if (OnlyLoad) {
4899       // We don't need to serialize loads against other loads.
4900       Ops.push_back(DAG.getRoot());
4901     } else {
4902       Ops.push_back(getRoot());
4903     }
4904   }
4905 
4906   // Info is set by getTgtMemIntrinsic
4907   TargetLowering::IntrinsicInfo Info;
4908   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4909   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4910                                                DAG.getMachineFunction(),
4911                                                Intrinsic);
4912 
4913   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4914   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4915       Info.opc == ISD::INTRINSIC_W_CHAIN)
4916     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4917                                         TLI.getPointerTy(DAG.getDataLayout())));
4918 
4919   // Add all operands of the call to the operand list.
4920   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
4921     const Value *Arg = I.getArgOperand(i);
4922     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4923       Ops.push_back(getValue(Arg));
4924       continue;
4925     }
4926 
4927     // Use TargetConstant instead of a regular constant for immarg.
4928     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
4929     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4930       assert(CI->getBitWidth() <= 64 &&
4931              "large intrinsic immediates not handled");
4932       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4933     } else {
4934       Ops.push_back(
4935           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4936     }
4937   }
4938 
4939   SmallVector<EVT, 4> ValueVTs;
4940   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4941 
4942   if (HasChain)
4943     ValueVTs.push_back(MVT::Other);
4944 
4945   SDVTList VTs = DAG.getVTList(ValueVTs);
4946 
4947   // Propagate fast-math-flags from IR to node(s).
4948   SDNodeFlags Flags;
4949   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
4950     Flags.copyFMF(*FPMO);
4951   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
4952 
4953   // Create the node.
4954   SDValue Result;
4955   // In some cases, custom collection of operands from CallInst I may be needed.
4956   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
4957   if (IsTgtIntrinsic) {
4958     // This is target intrinsic that touches memory
4959     //
4960     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
4961     //       didn't yield anything useful.
4962     MachinePointerInfo MPI;
4963     if (Info.ptrVal)
4964       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
4965     else if (Info.fallbackAddressSpace)
4966       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
4967     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
4968                                      Info.memVT, MPI, Info.align, Info.flags,
4969                                      Info.size, I.getAAMetadata());
4970   } else if (!HasChain) {
4971     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4972   } else if (!I.getType()->isVoidTy()) {
4973     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4974   } else {
4975     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4976   }
4977 
4978   if (HasChain) {
4979     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4980     if (OnlyLoad)
4981       PendingLoads.push_back(Chain);
4982     else
4983       DAG.setRoot(Chain);
4984   }
4985 
4986   if (!I.getType()->isVoidTy()) {
4987     if (!isa<VectorType>(I.getType()))
4988       Result = lowerRangeToAssertZExt(DAG, I, Result);
4989 
4990     MaybeAlign Alignment = I.getRetAlign();
4991 
4992     // Insert `assertalign` node if there's an alignment.
4993     if (InsertAssertAlign && Alignment) {
4994       Result =
4995           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
4996     }
4997 
4998     setValue(&I, Result);
4999   }
5000 }
5001 
5002 /// GetSignificand - Get the significand and build it into a floating-point
5003 /// number with exponent of 1:
5004 ///
5005 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5006 ///
5007 /// where Op is the hexadecimal representation of floating point value.
5008 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5009   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5010                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5011   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5012                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5013   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5014 }
5015 
5016 /// GetExponent - Get the exponent:
5017 ///
5018 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5019 ///
5020 /// where Op is the hexadecimal representation of floating point value.
5021 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5022                            const TargetLowering &TLI, const SDLoc &dl) {
5023   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5024                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5025   SDValue t1 = DAG.getNode(
5026       ISD::SRL, dl, MVT::i32, t0,
5027       DAG.getConstant(23, dl,
5028                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5029   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5030                            DAG.getConstant(127, dl, MVT::i32));
5031   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5032 }
5033 
5034 /// getF32Constant - Get 32-bit floating point constant.
5035 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5036                               const SDLoc &dl) {
5037   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5038                            MVT::f32);
5039 }
5040 
5041 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5042                                        SelectionDAG &DAG) {
5043   // TODO: What fast-math-flags should be set on the floating-point nodes?
5044 
5045   //   IntegerPartOfX = ((int32_t)(t0);
5046   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5047 
5048   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5049   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5050   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5051 
5052   //   IntegerPartOfX <<= 23;
5053   IntegerPartOfX =
5054       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5055                   DAG.getConstant(23, dl,
5056                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5057                                       MVT::i32, DAG.getDataLayout())));
5058 
5059   SDValue TwoToFractionalPartOfX;
5060   if (LimitFloatPrecision <= 6) {
5061     // For floating-point precision of 6:
5062     //
5063     //   TwoToFractionalPartOfX =
5064     //     0.997535578f +
5065     //       (0.735607626f + 0.252464424f * x) * x;
5066     //
5067     // error 0.0144103317, which is 6 bits
5068     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5069                              getF32Constant(DAG, 0x3e814304, dl));
5070     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5071                              getF32Constant(DAG, 0x3f3c50c8, dl));
5072     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5073     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5074                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5075   } else if (LimitFloatPrecision <= 12) {
5076     // For floating-point precision of 12:
5077     //
5078     //   TwoToFractionalPartOfX =
5079     //     0.999892986f +
5080     //       (0.696457318f +
5081     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5082     //
5083     // error 0.000107046256, which is 13 to 14 bits
5084     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5085                              getF32Constant(DAG, 0x3da235e3, dl));
5086     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5087                              getF32Constant(DAG, 0x3e65b8f3, dl));
5088     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5089     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5090                              getF32Constant(DAG, 0x3f324b07, dl));
5091     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5092     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5093                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5094   } else { // LimitFloatPrecision <= 18
5095     // For floating-point precision of 18:
5096     //
5097     //   TwoToFractionalPartOfX =
5098     //     0.999999982f +
5099     //       (0.693148872f +
5100     //         (0.240227044f +
5101     //           (0.554906021e-1f +
5102     //             (0.961591928e-2f +
5103     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5104     // error 2.47208000*10^(-7), which is better than 18 bits
5105     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5106                              getF32Constant(DAG, 0x3924b03e, dl));
5107     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5108                              getF32Constant(DAG, 0x3ab24b87, dl));
5109     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5110     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5111                              getF32Constant(DAG, 0x3c1d8c17, dl));
5112     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5113     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5114                              getF32Constant(DAG, 0x3d634a1d, dl));
5115     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5116     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5117                              getF32Constant(DAG, 0x3e75fe14, dl));
5118     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5119     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5120                               getF32Constant(DAG, 0x3f317234, dl));
5121     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5122     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5123                                          getF32Constant(DAG, 0x3f800000, dl));
5124   }
5125 
5126   // Add the exponent into the result in integer domain.
5127   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5128   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5129                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5130 }
5131 
5132 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5133 /// limited-precision mode.
5134 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5135                          const TargetLowering &TLI, SDNodeFlags Flags) {
5136   if (Op.getValueType() == MVT::f32 &&
5137       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5138 
5139     // Put the exponent in the right bit position for later addition to the
5140     // final result:
5141     //
5142     // t0 = Op * log2(e)
5143 
5144     // TODO: What fast-math-flags should be set here?
5145     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5146                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5147     return getLimitedPrecisionExp2(t0, dl, DAG);
5148   }
5149 
5150   // No special expansion.
5151   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5152 }
5153 
5154 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5155 /// limited-precision mode.
5156 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5157                          const TargetLowering &TLI, SDNodeFlags Flags) {
5158   // TODO: What fast-math-flags should be set on the floating-point nodes?
5159 
5160   if (Op.getValueType() == MVT::f32 &&
5161       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5162     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5163 
5164     // Scale the exponent by log(2).
5165     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5166     SDValue LogOfExponent =
5167         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5168                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5169 
5170     // Get the significand and build it into a floating-point number with
5171     // exponent of 1.
5172     SDValue X = GetSignificand(DAG, Op1, dl);
5173 
5174     SDValue LogOfMantissa;
5175     if (LimitFloatPrecision <= 6) {
5176       // For floating-point precision of 6:
5177       //
5178       //   LogofMantissa =
5179       //     -1.1609546f +
5180       //       (1.4034025f - 0.23903021f * x) * x;
5181       //
5182       // error 0.0034276066, which is better than 8 bits
5183       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5184                                getF32Constant(DAG, 0xbe74c456, dl));
5185       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5186                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5187       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5188       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5189                                   getF32Constant(DAG, 0x3f949a29, dl));
5190     } else if (LimitFloatPrecision <= 12) {
5191       // For floating-point precision of 12:
5192       //
5193       //   LogOfMantissa =
5194       //     -1.7417939f +
5195       //       (2.8212026f +
5196       //         (-1.4699568f +
5197       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5198       //
5199       // error 0.000061011436, which is 14 bits
5200       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5201                                getF32Constant(DAG, 0xbd67b6d6, dl));
5202       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5203                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5204       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5205       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5206                                getF32Constant(DAG, 0x3fbc278b, dl));
5207       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5208       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5209                                getF32Constant(DAG, 0x40348e95, dl));
5210       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5211       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5212                                   getF32Constant(DAG, 0x3fdef31a, dl));
5213     } else { // LimitFloatPrecision <= 18
5214       // For floating-point precision of 18:
5215       //
5216       //   LogOfMantissa =
5217       //     -2.1072184f +
5218       //       (4.2372794f +
5219       //         (-3.7029485f +
5220       //           (2.2781945f +
5221       //             (-0.87823314f +
5222       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5223       //
5224       // error 0.0000023660568, which is better than 18 bits
5225       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5226                                getF32Constant(DAG, 0xbc91e5ac, dl));
5227       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5228                                getF32Constant(DAG, 0x3e4350aa, dl));
5229       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5230       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5231                                getF32Constant(DAG, 0x3f60d3e3, dl));
5232       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5233       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5234                                getF32Constant(DAG, 0x4011cdf0, dl));
5235       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5236       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5237                                getF32Constant(DAG, 0x406cfd1c, dl));
5238       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5239       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5240                                getF32Constant(DAG, 0x408797cb, dl));
5241       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5242       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5243                                   getF32Constant(DAG, 0x4006dcab, dl));
5244     }
5245 
5246     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5247   }
5248 
5249   // No special expansion.
5250   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5251 }
5252 
5253 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5254 /// limited-precision mode.
5255 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5256                           const TargetLowering &TLI, SDNodeFlags Flags) {
5257   // TODO: What fast-math-flags should be set on the floating-point nodes?
5258 
5259   if (Op.getValueType() == MVT::f32 &&
5260       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5261     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5262 
5263     // Get the exponent.
5264     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5265 
5266     // Get the significand and build it into a floating-point number with
5267     // exponent of 1.
5268     SDValue X = GetSignificand(DAG, Op1, dl);
5269 
5270     // Different possible minimax approximations of significand in
5271     // floating-point for various degrees of accuracy over [1,2].
5272     SDValue Log2ofMantissa;
5273     if (LimitFloatPrecision <= 6) {
5274       // For floating-point precision of 6:
5275       //
5276       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5277       //
5278       // error 0.0049451742, which is more than 7 bits
5279       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5280                                getF32Constant(DAG, 0xbeb08fe0, dl));
5281       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5282                                getF32Constant(DAG, 0x40019463, dl));
5283       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5284       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5285                                    getF32Constant(DAG, 0x3fd6633d, dl));
5286     } else if (LimitFloatPrecision <= 12) {
5287       // For floating-point precision of 12:
5288       //
5289       //   Log2ofMantissa =
5290       //     -2.51285454f +
5291       //       (4.07009056f +
5292       //         (-2.12067489f +
5293       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5294       //
5295       // error 0.0000876136000, which is better than 13 bits
5296       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5297                                getF32Constant(DAG, 0xbda7262e, dl));
5298       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5299                                getF32Constant(DAG, 0x3f25280b, dl));
5300       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5301       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5302                                getF32Constant(DAG, 0x4007b923, dl));
5303       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5304       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5305                                getF32Constant(DAG, 0x40823e2f, dl));
5306       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5307       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5308                                    getF32Constant(DAG, 0x4020d29c, dl));
5309     } else { // LimitFloatPrecision <= 18
5310       // For floating-point precision of 18:
5311       //
5312       //   Log2ofMantissa =
5313       //     -3.0400495f +
5314       //       (6.1129976f +
5315       //         (-5.3420409f +
5316       //           (3.2865683f +
5317       //             (-1.2669343f +
5318       //               (0.27515199f -
5319       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5320       //
5321       // error 0.0000018516, which is better than 18 bits
5322       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5323                                getF32Constant(DAG, 0xbcd2769e, dl));
5324       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5325                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5326       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5327       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5328                                getF32Constant(DAG, 0x3fa22ae7, dl));
5329       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5330       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5331                                getF32Constant(DAG, 0x40525723, dl));
5332       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5333       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5334                                getF32Constant(DAG, 0x40aaf200, dl));
5335       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5336       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5337                                getF32Constant(DAG, 0x40c39dad, dl));
5338       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5339       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5340                                    getF32Constant(DAG, 0x4042902c, dl));
5341     }
5342 
5343     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5344   }
5345 
5346   // No special expansion.
5347   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5348 }
5349 
5350 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5351 /// limited-precision mode.
5352 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5353                            const TargetLowering &TLI, SDNodeFlags Flags) {
5354   // TODO: What fast-math-flags should be set on the floating-point nodes?
5355 
5356   if (Op.getValueType() == MVT::f32 &&
5357       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5358     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5359 
5360     // Scale the exponent by log10(2) [0.30102999f].
5361     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5362     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5363                                         getF32Constant(DAG, 0x3e9a209a, dl));
5364 
5365     // Get the significand and build it into a floating-point number with
5366     // exponent of 1.
5367     SDValue X = GetSignificand(DAG, Op1, dl);
5368 
5369     SDValue Log10ofMantissa;
5370     if (LimitFloatPrecision <= 6) {
5371       // For floating-point precision of 6:
5372       //
5373       //   Log10ofMantissa =
5374       //     -0.50419619f +
5375       //       (0.60948995f - 0.10380950f * x) * x;
5376       //
5377       // error 0.0014886165, which is 6 bits
5378       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5379                                getF32Constant(DAG, 0xbdd49a13, dl));
5380       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5381                                getF32Constant(DAG, 0x3f1c0789, dl));
5382       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5383       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5384                                     getF32Constant(DAG, 0x3f011300, dl));
5385     } else if (LimitFloatPrecision <= 12) {
5386       // For floating-point precision of 12:
5387       //
5388       //   Log10ofMantissa =
5389       //     -0.64831180f +
5390       //       (0.91751397f +
5391       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5392       //
5393       // error 0.00019228036, which is better than 12 bits
5394       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5395                                getF32Constant(DAG, 0x3d431f31, dl));
5396       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5397                                getF32Constant(DAG, 0x3ea21fb2, dl));
5398       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5399       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5400                                getF32Constant(DAG, 0x3f6ae232, dl));
5401       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5402       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5403                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5404     } else { // LimitFloatPrecision <= 18
5405       // For floating-point precision of 18:
5406       //
5407       //   Log10ofMantissa =
5408       //     -0.84299375f +
5409       //       (1.5327582f +
5410       //         (-1.0688956f +
5411       //           (0.49102474f +
5412       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5413       //
5414       // error 0.0000037995730, which is better than 18 bits
5415       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5416                                getF32Constant(DAG, 0x3c5d51ce, dl));
5417       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5418                                getF32Constant(DAG, 0x3e00685a, dl));
5419       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5420       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5421                                getF32Constant(DAG, 0x3efb6798, dl));
5422       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5423       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5424                                getF32Constant(DAG, 0x3f88d192, dl));
5425       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5426       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5427                                getF32Constant(DAG, 0x3fc4316c, dl));
5428       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5429       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5430                                     getF32Constant(DAG, 0x3f57ce70, dl));
5431     }
5432 
5433     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5434   }
5435 
5436   // No special expansion.
5437   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5438 }
5439 
5440 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5441 /// limited-precision mode.
5442 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5443                           const TargetLowering &TLI, SDNodeFlags Flags) {
5444   if (Op.getValueType() == MVT::f32 &&
5445       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5446     return getLimitedPrecisionExp2(Op, dl, DAG);
5447 
5448   // No special expansion.
5449   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5450 }
5451 
5452 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5453 /// limited-precision mode with x == 10.0f.
5454 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5455                          SelectionDAG &DAG, const TargetLowering &TLI,
5456                          SDNodeFlags Flags) {
5457   bool IsExp10 = false;
5458   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5459       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5460     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5461       APFloat Ten(10.0f);
5462       IsExp10 = LHSC->isExactlyValue(Ten);
5463     }
5464   }
5465 
5466   // TODO: What fast-math-flags should be set on the FMUL node?
5467   if (IsExp10) {
5468     // Put the exponent in the right bit position for later addition to the
5469     // final result:
5470     //
5471     //   #define LOG2OF10 3.3219281f
5472     //   t0 = Op * LOG2OF10;
5473     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5474                              getF32Constant(DAG, 0x40549a78, dl));
5475     return getLimitedPrecisionExp2(t0, dl, DAG);
5476   }
5477 
5478   // No special expansion.
5479   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5480 }
5481 
5482 /// ExpandPowI - Expand a llvm.powi intrinsic.
5483 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5484                           SelectionDAG &DAG) {
5485   // If RHS is a constant, we can expand this out to a multiplication tree if
5486   // it's beneficial on the target, otherwise we end up lowering to a call to
5487   // __powidf2 (for example).
5488   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5489     unsigned Val = RHSC->getSExtValue();
5490 
5491     // powi(x, 0) -> 1.0
5492     if (Val == 0)
5493       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5494 
5495     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5496             Val, DAG.shouldOptForSize())) {
5497       // Get the exponent as a positive value.
5498       if ((int)Val < 0)
5499         Val = -Val;
5500       // We use the simple binary decomposition method to generate the multiply
5501       // sequence.  There are more optimal ways to do this (for example,
5502       // powi(x,15) generates one more multiply than it should), but this has
5503       // the benefit of being both really simple and much better than a libcall.
5504       SDValue Res; // Logically starts equal to 1.0
5505       SDValue CurSquare = LHS;
5506       // TODO: Intrinsics should have fast-math-flags that propagate to these
5507       // nodes.
5508       while (Val) {
5509         if (Val & 1) {
5510           if (Res.getNode())
5511             Res =
5512                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5513           else
5514             Res = CurSquare; // 1.0*CurSquare.
5515         }
5516 
5517         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5518                                 CurSquare, CurSquare);
5519         Val >>= 1;
5520       }
5521 
5522       // If the original was negative, invert the result, producing 1/(x*x*x).
5523       if (RHSC->getSExtValue() < 0)
5524         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5525                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5526       return Res;
5527     }
5528   }
5529 
5530   // Otherwise, expand to a libcall.
5531   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5532 }
5533 
5534 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5535                             SDValue LHS, SDValue RHS, SDValue Scale,
5536                             SelectionDAG &DAG, const TargetLowering &TLI) {
5537   EVT VT = LHS.getValueType();
5538   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5539   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5540   LLVMContext &Ctx = *DAG.getContext();
5541 
5542   // If the type is legal but the operation isn't, this node might survive all
5543   // the way to operation legalization. If we end up there and we do not have
5544   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5545   // node.
5546 
5547   // Coax the legalizer into expanding the node during type legalization instead
5548   // by bumping the size by one bit. This will force it to Promote, enabling the
5549   // early expansion and avoiding the need to expand later.
5550 
5551   // We don't have to do this if Scale is 0; that can always be expanded, unless
5552   // it's a saturating signed operation. Those can experience true integer
5553   // division overflow, a case which we must avoid.
5554 
5555   // FIXME: We wouldn't have to do this (or any of the early
5556   // expansion/promotion) if it was possible to expand a libcall of an
5557   // illegal type during operation legalization. But it's not, so things
5558   // get a bit hacky.
5559   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5560   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5561       (TLI.isTypeLegal(VT) ||
5562        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5563     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5564         Opcode, VT, ScaleInt);
5565     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5566       EVT PromVT;
5567       if (VT.isScalarInteger())
5568         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5569       else if (VT.isVector()) {
5570         PromVT = VT.getVectorElementType();
5571         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5572         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5573       } else
5574         llvm_unreachable("Wrong VT for DIVFIX?");
5575       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5576       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5577       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5578       // For saturating operations, we need to shift up the LHS to get the
5579       // proper saturation width, and then shift down again afterwards.
5580       if (Saturating)
5581         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5582                           DAG.getConstant(1, DL, ShiftTy));
5583       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5584       if (Saturating)
5585         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5586                           DAG.getConstant(1, DL, ShiftTy));
5587       return DAG.getZExtOrTrunc(Res, DL, VT);
5588     }
5589   }
5590 
5591   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5592 }
5593 
5594 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5595 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5596 static void
5597 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5598                      const SDValue &N) {
5599   switch (N.getOpcode()) {
5600   case ISD::CopyFromReg: {
5601     SDValue Op = N.getOperand(1);
5602     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5603                       Op.getValueType().getSizeInBits());
5604     return;
5605   }
5606   case ISD::BITCAST:
5607   case ISD::AssertZext:
5608   case ISD::AssertSext:
5609   case ISD::TRUNCATE:
5610     getUnderlyingArgRegs(Regs, N.getOperand(0));
5611     return;
5612   case ISD::BUILD_PAIR:
5613   case ISD::BUILD_VECTOR:
5614   case ISD::CONCAT_VECTORS:
5615     for (SDValue Op : N->op_values())
5616       getUnderlyingArgRegs(Regs, Op);
5617     return;
5618   default:
5619     return;
5620   }
5621 }
5622 
5623 /// If the DbgValueInst is a dbg_value of a function argument, create the
5624 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5625 /// instruction selection, they will be inserted to the entry BB.
5626 /// We don't currently support this for variadic dbg_values, as they shouldn't
5627 /// appear for function arguments or in the prologue.
5628 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5629     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5630     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5631   const Argument *Arg = dyn_cast<Argument>(V);
5632   if (!Arg)
5633     return false;
5634 
5635   MachineFunction &MF = DAG.getMachineFunction();
5636   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5637 
5638   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5639   // we've been asked to pursue.
5640   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5641                               bool Indirect) {
5642     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5643       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5644       // pointing at the VReg, which will be patched up later.
5645       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5646       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5647           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5648           /* isKill */ false, /* isDead */ false,
5649           /* isUndef */ false, /* isEarlyClobber */ false,
5650           /* SubReg */ 0, /* isDebug */ true)});
5651 
5652       auto *NewDIExpr = FragExpr;
5653       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5654       // the DIExpression.
5655       if (Indirect)
5656         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5657       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5658       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5659       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5660     } else {
5661       // Create a completely standard DBG_VALUE.
5662       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5663       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5664     }
5665   };
5666 
5667   if (Kind == FuncArgumentDbgValueKind::Value) {
5668     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5669     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5670     // the entry block.
5671     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5672     if (!IsInEntryBlock)
5673       return false;
5674 
5675     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5676     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5677     // variable that also is a param.
5678     //
5679     // Although, if we are at the top of the entry block already, we can still
5680     // emit using ArgDbgValue. This might catch some situations when the
5681     // dbg.value refers to an argument that isn't used in the entry block, so
5682     // any CopyToReg node would be optimized out and the only way to express
5683     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5684     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5685     // we should only emit as ArgDbgValue if the Variable is an argument to the
5686     // current function, and the dbg.value intrinsic is found in the entry
5687     // block.
5688     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5689         !DL->getInlinedAt();
5690     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5691     if (!IsInPrologue && !VariableIsFunctionInputArg)
5692       return false;
5693 
5694     // Here we assume that a function argument on IR level only can be used to
5695     // describe one input parameter on source level. If we for example have
5696     // source code like this
5697     //
5698     //    struct A { long x, y; };
5699     //    void foo(struct A a, long b) {
5700     //      ...
5701     //      b = a.x;
5702     //      ...
5703     //    }
5704     //
5705     // and IR like this
5706     //
5707     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5708     //  entry:
5709     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5710     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5711     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5712     //    ...
5713     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5714     //    ...
5715     //
5716     // then the last dbg.value is describing a parameter "b" using a value that
5717     // is an argument. But since we already has used %a1 to describe a parameter
5718     // we should not handle that last dbg.value here (that would result in an
5719     // incorrect hoisting of the DBG_VALUE to the function entry).
5720     // Notice that we allow one dbg.value per IR level argument, to accommodate
5721     // for the situation with fragments above.
5722     if (VariableIsFunctionInputArg) {
5723       unsigned ArgNo = Arg->getArgNo();
5724       if (ArgNo >= FuncInfo.DescribedArgs.size())
5725         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5726       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5727         return false;
5728       FuncInfo.DescribedArgs.set(ArgNo);
5729     }
5730   }
5731 
5732   bool IsIndirect = false;
5733   std::optional<MachineOperand> Op;
5734   // Some arguments' frame index is recorded during argument lowering.
5735   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5736   if (FI != std::numeric_limits<int>::max())
5737     Op = MachineOperand::CreateFI(FI);
5738 
5739   SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5740   if (!Op && N.getNode()) {
5741     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5742     Register Reg;
5743     if (ArgRegsAndSizes.size() == 1)
5744       Reg = ArgRegsAndSizes.front().first;
5745 
5746     if (Reg && Reg.isVirtual()) {
5747       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5748       Register PR = RegInfo.getLiveInPhysReg(Reg);
5749       if (PR)
5750         Reg = PR;
5751     }
5752     if (Reg) {
5753       Op = MachineOperand::CreateReg(Reg, false);
5754       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5755     }
5756   }
5757 
5758   if (!Op && N.getNode()) {
5759     // Check if frame index is available.
5760     SDValue LCandidate = peekThroughBitcasts(N);
5761     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5762       if (FrameIndexSDNode *FINode =
5763           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5764         Op = MachineOperand::CreateFI(FINode->getIndex());
5765   }
5766 
5767   if (!Op) {
5768     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5769     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5770                                          SplitRegs) {
5771       unsigned Offset = 0;
5772       for (const auto &RegAndSize : SplitRegs) {
5773         // If the expression is already a fragment, the current register
5774         // offset+size might extend beyond the fragment. In this case, only
5775         // the register bits that are inside the fragment are relevant.
5776         int RegFragmentSizeInBits = RegAndSize.second;
5777         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5778           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5779           // The register is entirely outside the expression fragment,
5780           // so is irrelevant for debug info.
5781           if (Offset >= ExprFragmentSizeInBits)
5782             break;
5783           // The register is partially outside the expression fragment, only
5784           // the low bits within the fragment are relevant for debug info.
5785           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5786             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5787           }
5788         }
5789 
5790         auto FragmentExpr = DIExpression::createFragmentExpression(
5791             Expr, Offset, RegFragmentSizeInBits);
5792         Offset += RegAndSize.second;
5793         // If a valid fragment expression cannot be created, the variable's
5794         // correct value cannot be determined and so it is set as Undef.
5795         if (!FragmentExpr) {
5796           SDDbgValue *SDV = DAG.getConstantDbgValue(
5797               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5798           DAG.AddDbgValue(SDV, false);
5799           continue;
5800         }
5801         MachineInstr *NewMI =
5802             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5803                              Kind != FuncArgumentDbgValueKind::Value);
5804         FuncInfo.ArgDbgValues.push_back(NewMI);
5805       }
5806     };
5807 
5808     // Check if ValueMap has reg number.
5809     DenseMap<const Value *, Register>::const_iterator
5810       VMI = FuncInfo.ValueMap.find(V);
5811     if (VMI != FuncInfo.ValueMap.end()) {
5812       const auto &TLI = DAG.getTargetLoweringInfo();
5813       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5814                        V->getType(), std::nullopt);
5815       if (RFV.occupiesMultipleRegs()) {
5816         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5817         return true;
5818       }
5819 
5820       Op = MachineOperand::CreateReg(VMI->second, false);
5821       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5822     } else if (ArgRegsAndSizes.size() > 1) {
5823       // This was split due to the calling convention, and no virtual register
5824       // mapping exists for the value.
5825       splitMultiRegDbgValue(ArgRegsAndSizes);
5826       return true;
5827     }
5828   }
5829 
5830   if (!Op)
5831     return false;
5832 
5833   assert(Variable->isValidLocationForIntrinsic(DL) &&
5834          "Expected inlined-at fields to agree");
5835   MachineInstr *NewMI = nullptr;
5836 
5837   if (Op->isReg())
5838     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5839   else
5840     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5841                     Variable, Expr);
5842 
5843   // Otherwise, use ArgDbgValues.
5844   FuncInfo.ArgDbgValues.push_back(NewMI);
5845   return true;
5846 }
5847 
5848 /// Return the appropriate SDDbgValue based on N.
5849 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5850                                              DILocalVariable *Variable,
5851                                              DIExpression *Expr,
5852                                              const DebugLoc &dl,
5853                                              unsigned DbgSDNodeOrder) {
5854   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5855     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5856     // stack slot locations.
5857     //
5858     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5859     // debug values here after optimization:
5860     //
5861     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5862     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5863     //
5864     // Both describe the direct values of their associated variables.
5865     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5866                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5867   }
5868   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5869                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5870 }
5871 
5872 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5873   switch (Intrinsic) {
5874   case Intrinsic::smul_fix:
5875     return ISD::SMULFIX;
5876   case Intrinsic::umul_fix:
5877     return ISD::UMULFIX;
5878   case Intrinsic::smul_fix_sat:
5879     return ISD::SMULFIXSAT;
5880   case Intrinsic::umul_fix_sat:
5881     return ISD::UMULFIXSAT;
5882   case Intrinsic::sdiv_fix:
5883     return ISD::SDIVFIX;
5884   case Intrinsic::udiv_fix:
5885     return ISD::UDIVFIX;
5886   case Intrinsic::sdiv_fix_sat:
5887     return ISD::SDIVFIXSAT;
5888   case Intrinsic::udiv_fix_sat:
5889     return ISD::UDIVFIXSAT;
5890   default:
5891     llvm_unreachable("Unhandled fixed point intrinsic");
5892   }
5893 }
5894 
5895 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5896                                            const char *FunctionName) {
5897   assert(FunctionName && "FunctionName must not be nullptr");
5898   SDValue Callee = DAG.getExternalSymbol(
5899       FunctionName,
5900       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5901   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
5902 }
5903 
5904 /// Given a @llvm.call.preallocated.setup, return the corresponding
5905 /// preallocated call.
5906 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5907   assert(cast<CallBase>(PreallocatedSetup)
5908                  ->getCalledFunction()
5909                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5910          "expected call_preallocated_setup Value");
5911   for (const auto *U : PreallocatedSetup->users()) {
5912     auto *UseCall = cast<CallBase>(U);
5913     const Function *Fn = UseCall->getCalledFunction();
5914     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5915       return UseCall;
5916     }
5917   }
5918   llvm_unreachable("expected corresponding call to preallocated setup/arg");
5919 }
5920 
5921 /// If DI is a debug value with an EntryValue expression, lower it using the
5922 /// corresponding physical register of the associated Argument value
5923 /// (guaranteed to exist by the verifier).
5924 bool SelectionDAGBuilder::visitEntryValueDbgValue(const DbgValueInst &DI) {
5925   DILocalVariable *Variable = DI.getVariable();
5926   DIExpression *Expr = DI.getExpression();
5927   if (!Expr->isEntryValue() || !hasSingleElement(DI.getValues()))
5928     return false;
5929 
5930   // These properties are guaranteed by the verifier.
5931   Argument *Arg = cast<Argument>(DI.getValue(0));
5932   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
5933 
5934   auto ArgIt = FuncInfo.ValueMap.find(Arg);
5935   if (ArgIt == FuncInfo.ValueMap.end()) {
5936     LLVM_DEBUG(
5937         dbgs() << "Dropping dbg.value: expression is entry_value but "
5938                   "couldn't find an associated register for the Argument\n");
5939     return true;
5940   }
5941   Register ArgVReg = ArgIt->getSecond();
5942 
5943   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
5944     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
5945       SDDbgValue *SDV =
5946           DAG.getVRegDbgValue(Variable, Expr, PhysReg, false /*IsIndidrect*/,
5947                               DI.getDebugLoc(), SDNodeOrder);
5948       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
5949       return true;
5950     }
5951   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
5952                        "couldn't find a physical register\n");
5953   return true;
5954 }
5955 
5956 /// Lower the call to the specified intrinsic function.
5957 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5958                                              unsigned Intrinsic) {
5959   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5960   SDLoc sdl = getCurSDLoc();
5961   DebugLoc dl = getCurDebugLoc();
5962   SDValue Res;
5963 
5964   SDNodeFlags Flags;
5965   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
5966     Flags.copyFMF(*FPOp);
5967 
5968   switch (Intrinsic) {
5969   default:
5970     // By default, turn this into a target intrinsic node.
5971     visitTargetIntrinsic(I, Intrinsic);
5972     return;
5973   case Intrinsic::vscale: {
5974     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5975     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
5976     return;
5977   }
5978   case Intrinsic::vastart:  visitVAStart(I); return;
5979   case Intrinsic::vaend:    visitVAEnd(I); return;
5980   case Intrinsic::vacopy:   visitVACopy(I); return;
5981   case Intrinsic::returnaddress:
5982     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5983                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5984                              getValue(I.getArgOperand(0))));
5985     return;
5986   case Intrinsic::addressofreturnaddress:
5987     setValue(&I,
5988              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5989                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
5990     return;
5991   case Intrinsic::sponentry:
5992     setValue(&I,
5993              DAG.getNode(ISD::SPONENTRY, sdl,
5994                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
5995     return;
5996   case Intrinsic::frameaddress:
5997     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5998                              TLI.getFrameIndexTy(DAG.getDataLayout()),
5999                              getValue(I.getArgOperand(0))));
6000     return;
6001   case Intrinsic::read_volatile_register:
6002   case Intrinsic::read_register: {
6003     Value *Reg = I.getArgOperand(0);
6004     SDValue Chain = getRoot();
6005     SDValue RegName =
6006         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6007     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6008     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6009       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6010     setValue(&I, Res);
6011     DAG.setRoot(Res.getValue(1));
6012     return;
6013   }
6014   case Intrinsic::write_register: {
6015     Value *Reg = I.getArgOperand(0);
6016     Value *RegValue = I.getArgOperand(1);
6017     SDValue Chain = getRoot();
6018     SDValue RegName =
6019         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6020     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6021                             RegName, getValue(RegValue)));
6022     return;
6023   }
6024   case Intrinsic::memcpy: {
6025     const auto &MCI = cast<MemCpyInst>(I);
6026     SDValue Op1 = getValue(I.getArgOperand(0));
6027     SDValue Op2 = getValue(I.getArgOperand(1));
6028     SDValue Op3 = getValue(I.getArgOperand(2));
6029     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6030     Align DstAlign = MCI.getDestAlign().valueOrOne();
6031     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6032     Align Alignment = std::min(DstAlign, SrcAlign);
6033     bool isVol = MCI.isVolatile();
6034     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6035     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6036     // node.
6037     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6038     SDValue MC = DAG.getMemcpy(
6039         Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6040         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6041         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6042     updateDAGForMaybeTailCall(MC);
6043     return;
6044   }
6045   case Intrinsic::memcpy_inline: {
6046     const auto &MCI = cast<MemCpyInlineInst>(I);
6047     SDValue Dst = getValue(I.getArgOperand(0));
6048     SDValue Src = getValue(I.getArgOperand(1));
6049     SDValue Size = getValue(I.getArgOperand(2));
6050     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6051     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6052     Align DstAlign = MCI.getDestAlign().valueOrOne();
6053     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6054     Align Alignment = std::min(DstAlign, SrcAlign);
6055     bool isVol = MCI.isVolatile();
6056     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6057     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6058     // node.
6059     SDValue MC = DAG.getMemcpy(
6060         getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6061         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6062         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6063     updateDAGForMaybeTailCall(MC);
6064     return;
6065   }
6066   case Intrinsic::memset: {
6067     const auto &MSI = cast<MemSetInst>(I);
6068     SDValue Op1 = getValue(I.getArgOperand(0));
6069     SDValue Op2 = getValue(I.getArgOperand(1));
6070     SDValue Op3 = getValue(I.getArgOperand(2));
6071     // @llvm.memset defines 0 and 1 to both mean no alignment.
6072     Align Alignment = MSI.getDestAlign().valueOrOne();
6073     bool isVol = MSI.isVolatile();
6074     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6075     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6076     SDValue MS = DAG.getMemset(
6077         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6078         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6079     updateDAGForMaybeTailCall(MS);
6080     return;
6081   }
6082   case Intrinsic::memset_inline: {
6083     const auto &MSII = cast<MemSetInlineInst>(I);
6084     SDValue Dst = getValue(I.getArgOperand(0));
6085     SDValue Value = getValue(I.getArgOperand(1));
6086     SDValue Size = getValue(I.getArgOperand(2));
6087     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6088     // @llvm.memset defines 0 and 1 to both mean no alignment.
6089     Align DstAlign = MSII.getDestAlign().valueOrOne();
6090     bool isVol = MSII.isVolatile();
6091     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6092     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6093     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6094                                /* AlwaysInline */ true, isTC,
6095                                MachinePointerInfo(I.getArgOperand(0)),
6096                                I.getAAMetadata());
6097     updateDAGForMaybeTailCall(MC);
6098     return;
6099   }
6100   case Intrinsic::memmove: {
6101     const auto &MMI = cast<MemMoveInst>(I);
6102     SDValue Op1 = getValue(I.getArgOperand(0));
6103     SDValue Op2 = getValue(I.getArgOperand(1));
6104     SDValue Op3 = getValue(I.getArgOperand(2));
6105     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6106     Align DstAlign = MMI.getDestAlign().valueOrOne();
6107     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6108     Align Alignment = std::min(DstAlign, SrcAlign);
6109     bool isVol = MMI.isVolatile();
6110     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6111     // FIXME: Support passing different dest/src alignments to the memmove DAG
6112     // node.
6113     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6114     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6115                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
6116                                 MachinePointerInfo(I.getArgOperand(1)),
6117                                 I.getAAMetadata(), AA);
6118     updateDAGForMaybeTailCall(MM);
6119     return;
6120   }
6121   case Intrinsic::memcpy_element_unordered_atomic: {
6122     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6123     SDValue Dst = getValue(MI.getRawDest());
6124     SDValue Src = getValue(MI.getRawSource());
6125     SDValue Length = getValue(MI.getLength());
6126 
6127     Type *LengthTy = MI.getLength()->getType();
6128     unsigned ElemSz = MI.getElementSizeInBytes();
6129     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6130     SDValue MC =
6131         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6132                             isTC, MachinePointerInfo(MI.getRawDest()),
6133                             MachinePointerInfo(MI.getRawSource()));
6134     updateDAGForMaybeTailCall(MC);
6135     return;
6136   }
6137   case Intrinsic::memmove_element_unordered_atomic: {
6138     auto &MI = cast<AtomicMemMoveInst>(I);
6139     SDValue Dst = getValue(MI.getRawDest());
6140     SDValue Src = getValue(MI.getRawSource());
6141     SDValue Length = getValue(MI.getLength());
6142 
6143     Type *LengthTy = MI.getLength()->getType();
6144     unsigned ElemSz = MI.getElementSizeInBytes();
6145     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6146     SDValue MC =
6147         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6148                              isTC, MachinePointerInfo(MI.getRawDest()),
6149                              MachinePointerInfo(MI.getRawSource()));
6150     updateDAGForMaybeTailCall(MC);
6151     return;
6152   }
6153   case Intrinsic::memset_element_unordered_atomic: {
6154     auto &MI = cast<AtomicMemSetInst>(I);
6155     SDValue Dst = getValue(MI.getRawDest());
6156     SDValue Val = getValue(MI.getValue());
6157     SDValue Length = getValue(MI.getLength());
6158 
6159     Type *LengthTy = MI.getLength()->getType();
6160     unsigned ElemSz = MI.getElementSizeInBytes();
6161     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6162     SDValue MC =
6163         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6164                             isTC, MachinePointerInfo(MI.getRawDest()));
6165     updateDAGForMaybeTailCall(MC);
6166     return;
6167   }
6168   case Intrinsic::call_preallocated_setup: {
6169     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6170     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6171     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6172                               getRoot(), SrcValue);
6173     setValue(&I, Res);
6174     DAG.setRoot(Res);
6175     return;
6176   }
6177   case Intrinsic::call_preallocated_arg: {
6178     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6179     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6180     SDValue Ops[3];
6181     Ops[0] = getRoot();
6182     Ops[1] = SrcValue;
6183     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6184                                    MVT::i32); // arg index
6185     SDValue Res = DAG.getNode(
6186         ISD::PREALLOCATED_ARG, sdl,
6187         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6188     setValue(&I, Res);
6189     DAG.setRoot(Res.getValue(1));
6190     return;
6191   }
6192   case Intrinsic::dbg_declare: {
6193     const auto &DI = cast<DbgDeclareInst>(I);
6194     // Debug intrinsics are handled separately in assignment tracking mode.
6195     // Some intrinsics are handled right after Argument lowering.
6196     if (AssignmentTrackingEnabled ||
6197         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6198       return;
6199     // Assume dbg.declare can not currently use DIArgList, i.e.
6200     // it is non-variadic.
6201     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6202     DILocalVariable *Variable = DI.getVariable();
6203     DIExpression *Expression = DI.getExpression();
6204     dropDanglingDebugInfo(Variable, Expression);
6205     assert(Variable && "Missing variable");
6206     LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
6207                       << "\n");
6208     // Check if address has undef value.
6209     const Value *Address = DI.getVariableLocationOp(0);
6210     if (!Address || isa<UndefValue>(Address) ||
6211         (Address->use_empty() && !isa<Argument>(Address))) {
6212       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6213                         << " (bad/undef/unused-arg address)\n");
6214       return;
6215     }
6216 
6217     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
6218 
6219     SDValue &N = NodeMap[Address];
6220     if (!N.getNode() && isa<Argument>(Address))
6221       // Check unused arguments map.
6222       N = UnusedArgNodeMap[Address];
6223     SDDbgValue *SDV;
6224     if (N.getNode()) {
6225       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
6226         Address = BCI->getOperand(0);
6227       // Parameters are handled specially.
6228       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
6229       if (isParameter && FINode) {
6230         // Byval parameter. We have a frame index at this point.
6231         SDV =
6232             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
6233                                       /*IsIndirect*/ true, dl, SDNodeOrder);
6234       } else if (isa<Argument>(Address)) {
6235         // Address is an argument, so try to emit its dbg value using
6236         // virtual register info from the FuncInfo.ValueMap.
6237         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6238                                  FuncArgumentDbgValueKind::Declare, N);
6239         return;
6240       } else {
6241         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
6242                               true, dl, SDNodeOrder);
6243       }
6244       DAG.AddDbgValue(SDV, isParameter);
6245     } else {
6246       // If Address is an argument then try to emit its dbg value using
6247       // virtual register info from the FuncInfo.ValueMap.
6248       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6249                                     FuncArgumentDbgValueKind::Declare, N)) {
6250         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6251                           << " (could not emit func-arg dbg_value)\n");
6252       }
6253     }
6254     return;
6255   }
6256   case Intrinsic::dbg_label: {
6257     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6258     DILabel *Label = DI.getLabel();
6259     assert(Label && "Missing label");
6260 
6261     SDDbgLabel *SDV;
6262     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6263     DAG.AddDbgLabel(SDV);
6264     return;
6265   }
6266   case Intrinsic::dbg_assign: {
6267     // Debug intrinsics are handled seperately in assignment tracking mode.
6268     if (AssignmentTrackingEnabled)
6269       return;
6270     // If assignment tracking hasn't been enabled then fall through and treat
6271     // the dbg.assign as a dbg.value.
6272     [[fallthrough]];
6273   }
6274   case Intrinsic::dbg_value: {
6275     // Debug intrinsics are handled seperately in assignment tracking mode.
6276     if (AssignmentTrackingEnabled)
6277       return;
6278     const DbgValueInst &DI = cast<DbgValueInst>(I);
6279     assert(DI.getVariable() && "Missing variable");
6280 
6281     DILocalVariable *Variable = DI.getVariable();
6282     DIExpression *Expression = DI.getExpression();
6283     dropDanglingDebugInfo(Variable, Expression);
6284 
6285     if (visitEntryValueDbgValue(DI))
6286       return;
6287 
6288     if (DI.isKillLocation()) {
6289       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6290       return;
6291     }
6292 
6293     SmallVector<Value *, 4> Values(DI.getValues());
6294     if (Values.empty())
6295       return;
6296 
6297     bool IsVariadic = DI.hasArgList();
6298     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6299                           SDNodeOrder, IsVariadic))
6300       addDanglingDebugInfo(&DI, SDNodeOrder);
6301     return;
6302   }
6303 
6304   case Intrinsic::eh_typeid_for: {
6305     // Find the type id for the given typeinfo.
6306     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6307     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6308     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6309     setValue(&I, Res);
6310     return;
6311   }
6312 
6313   case Intrinsic::eh_return_i32:
6314   case Intrinsic::eh_return_i64:
6315     DAG.getMachineFunction().setCallsEHReturn(true);
6316     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6317                             MVT::Other,
6318                             getControlRoot(),
6319                             getValue(I.getArgOperand(0)),
6320                             getValue(I.getArgOperand(1))));
6321     return;
6322   case Intrinsic::eh_unwind_init:
6323     DAG.getMachineFunction().setCallsUnwindInit(true);
6324     return;
6325   case Intrinsic::eh_dwarf_cfa:
6326     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6327                              TLI.getPointerTy(DAG.getDataLayout()),
6328                              getValue(I.getArgOperand(0))));
6329     return;
6330   case Intrinsic::eh_sjlj_callsite: {
6331     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6332     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6333     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6334 
6335     MMI.setCurrentCallSite(CI->getZExtValue());
6336     return;
6337   }
6338   case Intrinsic::eh_sjlj_functioncontext: {
6339     // Get and store the index of the function context.
6340     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6341     AllocaInst *FnCtx =
6342       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6343     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6344     MFI.setFunctionContextIndex(FI);
6345     return;
6346   }
6347   case Intrinsic::eh_sjlj_setjmp: {
6348     SDValue Ops[2];
6349     Ops[0] = getRoot();
6350     Ops[1] = getValue(I.getArgOperand(0));
6351     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6352                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6353     setValue(&I, Op.getValue(0));
6354     DAG.setRoot(Op.getValue(1));
6355     return;
6356   }
6357   case Intrinsic::eh_sjlj_longjmp:
6358     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6359                             getRoot(), getValue(I.getArgOperand(0))));
6360     return;
6361   case Intrinsic::eh_sjlj_setup_dispatch:
6362     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6363                             getRoot()));
6364     return;
6365   case Intrinsic::masked_gather:
6366     visitMaskedGather(I);
6367     return;
6368   case Intrinsic::masked_load:
6369     visitMaskedLoad(I);
6370     return;
6371   case Intrinsic::masked_scatter:
6372     visitMaskedScatter(I);
6373     return;
6374   case Intrinsic::masked_store:
6375     visitMaskedStore(I);
6376     return;
6377   case Intrinsic::masked_expandload:
6378     visitMaskedLoad(I, true /* IsExpanding */);
6379     return;
6380   case Intrinsic::masked_compressstore:
6381     visitMaskedStore(I, true /* IsCompressing */);
6382     return;
6383   case Intrinsic::powi:
6384     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6385                             getValue(I.getArgOperand(1)), DAG));
6386     return;
6387   case Intrinsic::log:
6388     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6389     return;
6390   case Intrinsic::log2:
6391     setValue(&I,
6392              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6393     return;
6394   case Intrinsic::log10:
6395     setValue(&I,
6396              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6397     return;
6398   case Intrinsic::exp:
6399     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6400     return;
6401   case Intrinsic::exp2:
6402     setValue(&I,
6403              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6404     return;
6405   case Intrinsic::pow:
6406     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6407                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6408     return;
6409   case Intrinsic::sqrt:
6410   case Intrinsic::fabs:
6411   case Intrinsic::sin:
6412   case Intrinsic::cos:
6413   case Intrinsic::exp10:
6414   case Intrinsic::floor:
6415   case Intrinsic::ceil:
6416   case Intrinsic::trunc:
6417   case Intrinsic::rint:
6418   case Intrinsic::nearbyint:
6419   case Intrinsic::round:
6420   case Intrinsic::roundeven:
6421   case Intrinsic::canonicalize: {
6422     unsigned Opcode;
6423     switch (Intrinsic) {
6424     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6425     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6426     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6427     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6428     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6429     case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6430     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6431     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6432     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6433     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6434     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6435     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6436     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6437     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6438     }
6439 
6440     setValue(&I, DAG.getNode(Opcode, sdl,
6441                              getValue(I.getArgOperand(0)).getValueType(),
6442                              getValue(I.getArgOperand(0)), Flags));
6443     return;
6444   }
6445   case Intrinsic::lround:
6446   case Intrinsic::llround:
6447   case Intrinsic::lrint:
6448   case Intrinsic::llrint: {
6449     unsigned Opcode;
6450     switch (Intrinsic) {
6451     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6452     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6453     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6454     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6455     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6456     }
6457 
6458     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6459     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6460                              getValue(I.getArgOperand(0))));
6461     return;
6462   }
6463   case Intrinsic::minnum:
6464     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6465                              getValue(I.getArgOperand(0)).getValueType(),
6466                              getValue(I.getArgOperand(0)),
6467                              getValue(I.getArgOperand(1)), Flags));
6468     return;
6469   case Intrinsic::maxnum:
6470     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6471                              getValue(I.getArgOperand(0)).getValueType(),
6472                              getValue(I.getArgOperand(0)),
6473                              getValue(I.getArgOperand(1)), Flags));
6474     return;
6475   case Intrinsic::minimum:
6476     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6477                              getValue(I.getArgOperand(0)).getValueType(),
6478                              getValue(I.getArgOperand(0)),
6479                              getValue(I.getArgOperand(1)), Flags));
6480     return;
6481   case Intrinsic::maximum:
6482     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6483                              getValue(I.getArgOperand(0)).getValueType(),
6484                              getValue(I.getArgOperand(0)),
6485                              getValue(I.getArgOperand(1)), Flags));
6486     return;
6487   case Intrinsic::copysign:
6488     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6489                              getValue(I.getArgOperand(0)).getValueType(),
6490                              getValue(I.getArgOperand(0)),
6491                              getValue(I.getArgOperand(1)), Flags));
6492     return;
6493   case Intrinsic::ldexp:
6494     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6495                              getValue(I.getArgOperand(0)).getValueType(),
6496                              getValue(I.getArgOperand(0)),
6497                              getValue(I.getArgOperand(1)), Flags));
6498     return;
6499   case Intrinsic::frexp: {
6500     SmallVector<EVT, 2> ValueVTs;
6501     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6502     SDVTList VTs = DAG.getVTList(ValueVTs);
6503     setValue(&I,
6504              DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6505     return;
6506   }
6507   case Intrinsic::arithmetic_fence: {
6508     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6509                              getValue(I.getArgOperand(0)).getValueType(),
6510                              getValue(I.getArgOperand(0)), Flags));
6511     return;
6512   }
6513   case Intrinsic::fma:
6514     setValue(&I, DAG.getNode(
6515                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6516                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6517                      getValue(I.getArgOperand(2)), Flags));
6518     return;
6519 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6520   case Intrinsic::INTRINSIC:
6521 #include "llvm/IR/ConstrainedOps.def"
6522     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6523     return;
6524 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6525 #include "llvm/IR/VPIntrinsics.def"
6526     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6527     return;
6528   case Intrinsic::fptrunc_round: {
6529     // Get the last argument, the metadata and convert it to an integer in the
6530     // call
6531     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6532     std::optional<RoundingMode> RoundMode =
6533         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6534 
6535     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6536 
6537     // Propagate fast-math-flags from IR to node(s).
6538     SDNodeFlags Flags;
6539     Flags.copyFMF(*cast<FPMathOperator>(&I));
6540     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6541 
6542     SDValue Result;
6543     Result = DAG.getNode(
6544         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6545         DAG.getTargetConstant((int)*RoundMode, sdl,
6546                               TLI.getPointerTy(DAG.getDataLayout())));
6547     setValue(&I, Result);
6548 
6549     return;
6550   }
6551   case Intrinsic::fmuladd: {
6552     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6553     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6554         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6555       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6556                                getValue(I.getArgOperand(0)).getValueType(),
6557                                getValue(I.getArgOperand(0)),
6558                                getValue(I.getArgOperand(1)),
6559                                getValue(I.getArgOperand(2)), Flags));
6560     } else {
6561       // TODO: Intrinsic calls should have fast-math-flags.
6562       SDValue Mul = DAG.getNode(
6563           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6564           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6565       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6566                                 getValue(I.getArgOperand(0)).getValueType(),
6567                                 Mul, getValue(I.getArgOperand(2)), Flags);
6568       setValue(&I, Add);
6569     }
6570     return;
6571   }
6572   case Intrinsic::convert_to_fp16:
6573     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6574                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6575                                          getValue(I.getArgOperand(0)),
6576                                          DAG.getTargetConstant(0, sdl,
6577                                                                MVT::i32))));
6578     return;
6579   case Intrinsic::convert_from_fp16:
6580     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6581                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6582                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6583                                          getValue(I.getArgOperand(0)))));
6584     return;
6585   case Intrinsic::fptosi_sat: {
6586     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6587     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6588                              getValue(I.getArgOperand(0)),
6589                              DAG.getValueType(VT.getScalarType())));
6590     return;
6591   }
6592   case Intrinsic::fptoui_sat: {
6593     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6594     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6595                              getValue(I.getArgOperand(0)),
6596                              DAG.getValueType(VT.getScalarType())));
6597     return;
6598   }
6599   case Intrinsic::set_rounding:
6600     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6601                       {getRoot(), getValue(I.getArgOperand(0))});
6602     setValue(&I, Res);
6603     DAG.setRoot(Res.getValue(0));
6604     return;
6605   case Intrinsic::is_fpclass: {
6606     const DataLayout DLayout = DAG.getDataLayout();
6607     EVT DestVT = TLI.getValueType(DLayout, I.getType());
6608     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6609     FPClassTest Test = static_cast<FPClassTest>(
6610         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6611     MachineFunction &MF = DAG.getMachineFunction();
6612     const Function &F = MF.getFunction();
6613     SDValue Op = getValue(I.getArgOperand(0));
6614     SDNodeFlags Flags;
6615     Flags.setNoFPExcept(
6616         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6617     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6618     // expansion can use illegal types. Making expansion early allows
6619     // legalizing these types prior to selection.
6620     if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6621       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6622       setValue(&I, Result);
6623       return;
6624     }
6625 
6626     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6627     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6628     setValue(&I, V);
6629     return;
6630   }
6631   case Intrinsic::get_fpenv: {
6632     const DataLayout DLayout = DAG.getDataLayout();
6633     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6634     Align TempAlign = DAG.getEVTAlign(EnvVT);
6635     SDValue Chain = getRoot();
6636     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6637     // and temporary storage in stack.
6638     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6639       Res = DAG.getNode(
6640           ISD::GET_FPENV, sdl,
6641           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6642                         MVT::Other),
6643           Chain);
6644     } else {
6645       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6646       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6647       auto MPI =
6648           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6649       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6650           MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6651           TempAlign);
6652       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6653       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6654     }
6655     setValue(&I, Res);
6656     DAG.setRoot(Res.getValue(1));
6657     return;
6658   }
6659   case Intrinsic::set_fpenv: {
6660     const DataLayout DLayout = DAG.getDataLayout();
6661     SDValue Env = getValue(I.getArgOperand(0));
6662     EVT EnvVT = Env.getValueType();
6663     Align TempAlign = DAG.getEVTAlign(EnvVT);
6664     SDValue Chain = getRoot();
6665     // If SET_FPENV is custom or legal, use it. Otherwise use loading
6666     // environment from memory.
6667     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6668       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6669     } else {
6670       // Allocate space in stack, copy environment bits into it and use this
6671       // memory in SET_FPENV_MEM.
6672       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6673       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6674       auto MPI =
6675           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6676       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6677                            MachineMemOperand::MOStore);
6678       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6679           MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6680           TempAlign);
6681       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6682     }
6683     DAG.setRoot(Chain);
6684     return;
6685   }
6686   case Intrinsic::reset_fpenv:
6687     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6688     return;
6689   case Intrinsic::get_fpmode:
6690     Res = DAG.getNode(
6691         ISD::GET_FPMODE, sdl,
6692         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6693                       MVT::Other),
6694         DAG.getRoot());
6695     setValue(&I, Res);
6696     DAG.setRoot(Res.getValue(1));
6697     return;
6698   case Intrinsic::set_fpmode:
6699     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6700                       getValue(I.getArgOperand(0)));
6701     DAG.setRoot(Res);
6702     return;
6703   case Intrinsic::reset_fpmode: {
6704     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6705     DAG.setRoot(Res);
6706     return;
6707   }
6708   case Intrinsic::pcmarker: {
6709     SDValue Tmp = getValue(I.getArgOperand(0));
6710     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6711     return;
6712   }
6713   case Intrinsic::readcyclecounter: {
6714     SDValue Op = getRoot();
6715     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6716                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6717     setValue(&I, Res);
6718     DAG.setRoot(Res.getValue(1));
6719     return;
6720   }
6721   case Intrinsic::bitreverse:
6722     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6723                              getValue(I.getArgOperand(0)).getValueType(),
6724                              getValue(I.getArgOperand(0))));
6725     return;
6726   case Intrinsic::bswap:
6727     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6728                              getValue(I.getArgOperand(0)).getValueType(),
6729                              getValue(I.getArgOperand(0))));
6730     return;
6731   case Intrinsic::cttz: {
6732     SDValue Arg = getValue(I.getArgOperand(0));
6733     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6734     EVT Ty = Arg.getValueType();
6735     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6736                              sdl, Ty, Arg));
6737     return;
6738   }
6739   case Intrinsic::ctlz: {
6740     SDValue Arg = getValue(I.getArgOperand(0));
6741     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6742     EVT Ty = Arg.getValueType();
6743     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6744                              sdl, Ty, Arg));
6745     return;
6746   }
6747   case Intrinsic::ctpop: {
6748     SDValue Arg = getValue(I.getArgOperand(0));
6749     EVT Ty = Arg.getValueType();
6750     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6751     return;
6752   }
6753   case Intrinsic::fshl:
6754   case Intrinsic::fshr: {
6755     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6756     SDValue X = getValue(I.getArgOperand(0));
6757     SDValue Y = getValue(I.getArgOperand(1));
6758     SDValue Z = getValue(I.getArgOperand(2));
6759     EVT VT = X.getValueType();
6760 
6761     if (X == Y) {
6762       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6763       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6764     } else {
6765       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6766       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6767     }
6768     return;
6769   }
6770   case Intrinsic::sadd_sat: {
6771     SDValue Op1 = getValue(I.getArgOperand(0));
6772     SDValue Op2 = getValue(I.getArgOperand(1));
6773     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6774     return;
6775   }
6776   case Intrinsic::uadd_sat: {
6777     SDValue Op1 = getValue(I.getArgOperand(0));
6778     SDValue Op2 = getValue(I.getArgOperand(1));
6779     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6780     return;
6781   }
6782   case Intrinsic::ssub_sat: {
6783     SDValue Op1 = getValue(I.getArgOperand(0));
6784     SDValue Op2 = getValue(I.getArgOperand(1));
6785     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6786     return;
6787   }
6788   case Intrinsic::usub_sat: {
6789     SDValue Op1 = getValue(I.getArgOperand(0));
6790     SDValue Op2 = getValue(I.getArgOperand(1));
6791     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6792     return;
6793   }
6794   case Intrinsic::sshl_sat: {
6795     SDValue Op1 = getValue(I.getArgOperand(0));
6796     SDValue Op2 = getValue(I.getArgOperand(1));
6797     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6798     return;
6799   }
6800   case Intrinsic::ushl_sat: {
6801     SDValue Op1 = getValue(I.getArgOperand(0));
6802     SDValue Op2 = getValue(I.getArgOperand(1));
6803     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6804     return;
6805   }
6806   case Intrinsic::smul_fix:
6807   case Intrinsic::umul_fix:
6808   case Intrinsic::smul_fix_sat:
6809   case Intrinsic::umul_fix_sat: {
6810     SDValue Op1 = getValue(I.getArgOperand(0));
6811     SDValue Op2 = getValue(I.getArgOperand(1));
6812     SDValue Op3 = getValue(I.getArgOperand(2));
6813     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6814                              Op1.getValueType(), Op1, Op2, Op3));
6815     return;
6816   }
6817   case Intrinsic::sdiv_fix:
6818   case Intrinsic::udiv_fix:
6819   case Intrinsic::sdiv_fix_sat:
6820   case Intrinsic::udiv_fix_sat: {
6821     SDValue Op1 = getValue(I.getArgOperand(0));
6822     SDValue Op2 = getValue(I.getArgOperand(1));
6823     SDValue Op3 = getValue(I.getArgOperand(2));
6824     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6825                               Op1, Op2, Op3, DAG, TLI));
6826     return;
6827   }
6828   case Intrinsic::smax: {
6829     SDValue Op1 = getValue(I.getArgOperand(0));
6830     SDValue Op2 = getValue(I.getArgOperand(1));
6831     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6832     return;
6833   }
6834   case Intrinsic::smin: {
6835     SDValue Op1 = getValue(I.getArgOperand(0));
6836     SDValue Op2 = getValue(I.getArgOperand(1));
6837     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6838     return;
6839   }
6840   case Intrinsic::umax: {
6841     SDValue Op1 = getValue(I.getArgOperand(0));
6842     SDValue Op2 = getValue(I.getArgOperand(1));
6843     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6844     return;
6845   }
6846   case Intrinsic::umin: {
6847     SDValue Op1 = getValue(I.getArgOperand(0));
6848     SDValue Op2 = getValue(I.getArgOperand(1));
6849     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6850     return;
6851   }
6852   case Intrinsic::abs: {
6853     // TODO: Preserve "int min is poison" arg in SDAG?
6854     SDValue Op1 = getValue(I.getArgOperand(0));
6855     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6856     return;
6857   }
6858   case Intrinsic::stacksave: {
6859     SDValue Op = getRoot();
6860     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6861     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6862     setValue(&I, Res);
6863     DAG.setRoot(Res.getValue(1));
6864     return;
6865   }
6866   case Intrinsic::stackrestore:
6867     Res = getValue(I.getArgOperand(0));
6868     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6869     return;
6870   case Intrinsic::get_dynamic_area_offset: {
6871     SDValue Op = getRoot();
6872     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6873     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6874     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6875     // target.
6876     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6877       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6878                          " intrinsic!");
6879     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6880                       Op);
6881     DAG.setRoot(Op);
6882     setValue(&I, Res);
6883     return;
6884   }
6885   case Intrinsic::stackguard: {
6886     MachineFunction &MF = DAG.getMachineFunction();
6887     const Module &M = *MF.getFunction().getParent();
6888     SDValue Chain = getRoot();
6889     if (TLI.useLoadStackGuardNode()) {
6890       Res = getLoadStackGuard(DAG, sdl, Chain);
6891     } else {
6892       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6893       const Value *Global = TLI.getSDagStackGuard(M);
6894       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6895       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6896                         MachinePointerInfo(Global, 0), Align,
6897                         MachineMemOperand::MOVolatile);
6898     }
6899     if (TLI.useStackGuardXorFP())
6900       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6901     DAG.setRoot(Chain);
6902     setValue(&I, Res);
6903     return;
6904   }
6905   case Intrinsic::stackprotector: {
6906     // Emit code into the DAG to store the stack guard onto the stack.
6907     MachineFunction &MF = DAG.getMachineFunction();
6908     MachineFrameInfo &MFI = MF.getFrameInfo();
6909     SDValue Src, Chain = getRoot();
6910 
6911     if (TLI.useLoadStackGuardNode())
6912       Src = getLoadStackGuard(DAG, sdl, Chain);
6913     else
6914       Src = getValue(I.getArgOperand(0));   // The guard's value.
6915 
6916     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6917 
6918     int FI = FuncInfo.StaticAllocaMap[Slot];
6919     MFI.setStackProtectorIndex(FI);
6920     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6921 
6922     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6923 
6924     // Store the stack protector onto the stack.
6925     Res = DAG.getStore(
6926         Chain, sdl, Src, FIN,
6927         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6928         MaybeAlign(), MachineMemOperand::MOVolatile);
6929     setValue(&I, Res);
6930     DAG.setRoot(Res);
6931     return;
6932   }
6933   case Intrinsic::objectsize:
6934     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6935 
6936   case Intrinsic::is_constant:
6937     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6938 
6939   case Intrinsic::annotation:
6940   case Intrinsic::ptr_annotation:
6941   case Intrinsic::launder_invariant_group:
6942   case Intrinsic::strip_invariant_group:
6943     // Drop the intrinsic, but forward the value
6944     setValue(&I, getValue(I.getOperand(0)));
6945     return;
6946 
6947   case Intrinsic::assume:
6948   case Intrinsic::experimental_noalias_scope_decl:
6949   case Intrinsic::var_annotation:
6950   case Intrinsic::sideeffect:
6951     // Discard annotate attributes, noalias scope declarations, assumptions, and
6952     // artificial side-effects.
6953     return;
6954 
6955   case Intrinsic::codeview_annotation: {
6956     // Emit a label associated with this metadata.
6957     MachineFunction &MF = DAG.getMachineFunction();
6958     MCSymbol *Label =
6959         MF.getMMI().getContext().createTempSymbol("annotation", true);
6960     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6961     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6962     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6963     DAG.setRoot(Res);
6964     return;
6965   }
6966 
6967   case Intrinsic::init_trampoline: {
6968     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6969 
6970     SDValue Ops[6];
6971     Ops[0] = getRoot();
6972     Ops[1] = getValue(I.getArgOperand(0));
6973     Ops[2] = getValue(I.getArgOperand(1));
6974     Ops[3] = getValue(I.getArgOperand(2));
6975     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6976     Ops[5] = DAG.getSrcValue(F);
6977 
6978     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6979 
6980     DAG.setRoot(Res);
6981     return;
6982   }
6983   case Intrinsic::adjust_trampoline:
6984     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6985                              TLI.getPointerTy(DAG.getDataLayout()),
6986                              getValue(I.getArgOperand(0))));
6987     return;
6988   case Intrinsic::gcroot: {
6989     assert(DAG.getMachineFunction().getFunction().hasGC() &&
6990            "only valid in functions with gc specified, enforced by Verifier");
6991     assert(GFI && "implied by previous");
6992     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6993     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6994 
6995     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6996     GFI->addStackRoot(FI->getIndex(), TypeMap);
6997     return;
6998   }
6999   case Intrinsic::gcread:
7000   case Intrinsic::gcwrite:
7001     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7002   case Intrinsic::get_rounding:
7003     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7004     setValue(&I, Res);
7005     DAG.setRoot(Res.getValue(1));
7006     return;
7007 
7008   case Intrinsic::expect:
7009     // Just replace __builtin_expect(exp, c) with EXP.
7010     setValue(&I, getValue(I.getArgOperand(0)));
7011     return;
7012 
7013   case Intrinsic::ubsantrap:
7014   case Intrinsic::debugtrap:
7015   case Intrinsic::trap: {
7016     StringRef TrapFuncName =
7017         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7018     if (TrapFuncName.empty()) {
7019       switch (Intrinsic) {
7020       case Intrinsic::trap:
7021         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7022         break;
7023       case Intrinsic::debugtrap:
7024         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7025         break;
7026       case Intrinsic::ubsantrap:
7027         DAG.setRoot(DAG.getNode(
7028             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7029             DAG.getTargetConstant(
7030                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7031                 MVT::i32)));
7032         break;
7033       default: llvm_unreachable("unknown trap intrinsic");
7034       }
7035       return;
7036     }
7037     TargetLowering::ArgListTy Args;
7038     if (Intrinsic == Intrinsic::ubsantrap) {
7039       Args.push_back(TargetLoweringBase::ArgListEntry());
7040       Args[0].Val = I.getArgOperand(0);
7041       Args[0].Node = getValue(Args[0].Val);
7042       Args[0].Ty = Args[0].Val->getType();
7043     }
7044 
7045     TargetLowering::CallLoweringInfo CLI(DAG);
7046     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7047         CallingConv::C, I.getType(),
7048         DAG.getExternalSymbol(TrapFuncName.data(),
7049                               TLI.getPointerTy(DAG.getDataLayout())),
7050         std::move(Args));
7051 
7052     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7053     DAG.setRoot(Result.second);
7054     return;
7055   }
7056 
7057   case Intrinsic::uadd_with_overflow:
7058   case Intrinsic::sadd_with_overflow:
7059   case Intrinsic::usub_with_overflow:
7060   case Intrinsic::ssub_with_overflow:
7061   case Intrinsic::umul_with_overflow:
7062   case Intrinsic::smul_with_overflow: {
7063     ISD::NodeType Op;
7064     switch (Intrinsic) {
7065     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7066     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7067     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7068     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7069     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7070     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7071     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7072     }
7073     SDValue Op1 = getValue(I.getArgOperand(0));
7074     SDValue Op2 = getValue(I.getArgOperand(1));
7075 
7076     EVT ResultVT = Op1.getValueType();
7077     EVT OverflowVT = MVT::i1;
7078     if (ResultVT.isVector())
7079       OverflowVT = EVT::getVectorVT(
7080           *Context, OverflowVT, ResultVT.getVectorElementCount());
7081 
7082     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7083     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7084     return;
7085   }
7086   case Intrinsic::prefetch: {
7087     SDValue Ops[5];
7088     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7089     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7090     Ops[0] = DAG.getRoot();
7091     Ops[1] = getValue(I.getArgOperand(0));
7092     Ops[2] = getValue(I.getArgOperand(1));
7093     Ops[3] = getValue(I.getArgOperand(2));
7094     Ops[4] = getValue(I.getArgOperand(3));
7095     SDValue Result = DAG.getMemIntrinsicNode(
7096         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7097         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7098         /* align */ std::nullopt, Flags);
7099 
7100     // Chain the prefetch in parallell with any pending loads, to stay out of
7101     // the way of later optimizations.
7102     PendingLoads.push_back(Result);
7103     Result = getRoot();
7104     DAG.setRoot(Result);
7105     return;
7106   }
7107   case Intrinsic::lifetime_start:
7108   case Intrinsic::lifetime_end: {
7109     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7110     // Stack coloring is not enabled in O0, discard region information.
7111     if (TM.getOptLevel() == CodeGenOptLevel::None)
7112       return;
7113 
7114     const int64_t ObjectSize =
7115         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7116     Value *const ObjectPtr = I.getArgOperand(1);
7117     SmallVector<const Value *, 4> Allocas;
7118     getUnderlyingObjects(ObjectPtr, Allocas);
7119 
7120     for (const Value *Alloca : Allocas) {
7121       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7122 
7123       // Could not find an Alloca.
7124       if (!LifetimeObject)
7125         continue;
7126 
7127       // First check that the Alloca is static, otherwise it won't have a
7128       // valid frame index.
7129       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7130       if (SI == FuncInfo.StaticAllocaMap.end())
7131         return;
7132 
7133       const int FrameIndex = SI->second;
7134       int64_t Offset;
7135       if (GetPointerBaseWithConstantOffset(
7136               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7137         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7138       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7139                                 Offset);
7140       DAG.setRoot(Res);
7141     }
7142     return;
7143   }
7144   case Intrinsic::pseudoprobe: {
7145     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7146     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7147     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7148     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7149     DAG.setRoot(Res);
7150     return;
7151   }
7152   case Intrinsic::invariant_start:
7153     // Discard region information.
7154     setValue(&I,
7155              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7156     return;
7157   case Intrinsic::invariant_end:
7158     // Discard region information.
7159     return;
7160   case Intrinsic::clear_cache:
7161     /// FunctionName may be null.
7162     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7163       lowerCallToExternalSymbol(I, FunctionName);
7164     return;
7165   case Intrinsic::donothing:
7166   case Intrinsic::seh_try_begin:
7167   case Intrinsic::seh_scope_begin:
7168   case Intrinsic::seh_try_end:
7169   case Intrinsic::seh_scope_end:
7170     // ignore
7171     return;
7172   case Intrinsic::experimental_stackmap:
7173     visitStackmap(I);
7174     return;
7175   case Intrinsic::experimental_patchpoint_void:
7176   case Intrinsic::experimental_patchpoint_i64:
7177     visitPatchpoint(I);
7178     return;
7179   case Intrinsic::experimental_gc_statepoint:
7180     LowerStatepoint(cast<GCStatepointInst>(I));
7181     return;
7182   case Intrinsic::experimental_gc_result:
7183     visitGCResult(cast<GCResultInst>(I));
7184     return;
7185   case Intrinsic::experimental_gc_relocate:
7186     visitGCRelocate(cast<GCRelocateInst>(I));
7187     return;
7188   case Intrinsic::instrprof_cover:
7189     llvm_unreachable("instrprof failed to lower a cover");
7190   case Intrinsic::instrprof_increment:
7191     llvm_unreachable("instrprof failed to lower an increment");
7192   case Intrinsic::instrprof_timestamp:
7193     llvm_unreachable("instrprof failed to lower a timestamp");
7194   case Intrinsic::instrprof_value_profile:
7195     llvm_unreachable("instrprof failed to lower a value profiling call");
7196   case Intrinsic::localescape: {
7197     MachineFunction &MF = DAG.getMachineFunction();
7198     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7199 
7200     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7201     // is the same on all targets.
7202     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7203       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7204       if (isa<ConstantPointerNull>(Arg))
7205         continue; // Skip null pointers. They represent a hole in index space.
7206       AllocaInst *Slot = cast<AllocaInst>(Arg);
7207       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7208              "can only escape static allocas");
7209       int FI = FuncInfo.StaticAllocaMap[Slot];
7210       MCSymbol *FrameAllocSym =
7211           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7212               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7213       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7214               TII->get(TargetOpcode::LOCAL_ESCAPE))
7215           .addSym(FrameAllocSym)
7216           .addFrameIndex(FI);
7217     }
7218 
7219     return;
7220   }
7221 
7222   case Intrinsic::localrecover: {
7223     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7224     MachineFunction &MF = DAG.getMachineFunction();
7225 
7226     // Get the symbol that defines the frame offset.
7227     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7228     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7229     unsigned IdxVal =
7230         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7231     MCSymbol *FrameAllocSym =
7232         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7233             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7234 
7235     Value *FP = I.getArgOperand(1);
7236     SDValue FPVal = getValue(FP);
7237     EVT PtrVT = FPVal.getValueType();
7238 
7239     // Create a MCSymbol for the label to avoid any target lowering
7240     // that would make this PC relative.
7241     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7242     SDValue OffsetVal =
7243         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7244 
7245     // Add the offset to the FP.
7246     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7247     setValue(&I, Add);
7248 
7249     return;
7250   }
7251 
7252   case Intrinsic::eh_exceptionpointer:
7253   case Intrinsic::eh_exceptioncode: {
7254     // Get the exception pointer vreg, copy from it, and resize it to fit.
7255     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7256     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7257     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7258     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7259     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7260     if (Intrinsic == Intrinsic::eh_exceptioncode)
7261       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7262     setValue(&I, N);
7263     return;
7264   }
7265   case Intrinsic::xray_customevent: {
7266     // Here we want to make sure that the intrinsic behaves as if it has a
7267     // specific calling convention.
7268     const auto &Triple = DAG.getTarget().getTargetTriple();
7269     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7270       return;
7271 
7272     SmallVector<SDValue, 8> Ops;
7273 
7274     // We want to say that we always want the arguments in registers.
7275     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7276     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7277     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7278     SDValue Chain = getRoot();
7279     Ops.push_back(LogEntryVal);
7280     Ops.push_back(StrSizeVal);
7281     Ops.push_back(Chain);
7282 
7283     // We need to enforce the calling convention for the callsite, so that
7284     // argument ordering is enforced correctly, and that register allocation can
7285     // see that some registers may be assumed clobbered and have to preserve
7286     // them across calls to the intrinsic.
7287     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7288                                            sdl, NodeTys, Ops);
7289     SDValue patchableNode = SDValue(MN, 0);
7290     DAG.setRoot(patchableNode);
7291     setValue(&I, patchableNode);
7292     return;
7293   }
7294   case Intrinsic::xray_typedevent: {
7295     // Here we want to make sure that the intrinsic behaves as if it has a
7296     // specific calling convention.
7297     const auto &Triple = DAG.getTarget().getTargetTriple();
7298     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7299       return;
7300 
7301     SmallVector<SDValue, 8> Ops;
7302 
7303     // We want to say that we always want the arguments in registers.
7304     // It's unclear to me how manipulating the selection DAG here forces callers
7305     // to provide arguments in registers instead of on the stack.
7306     SDValue LogTypeId = getValue(I.getArgOperand(0));
7307     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7308     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7309     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7310     SDValue Chain = getRoot();
7311     Ops.push_back(LogTypeId);
7312     Ops.push_back(LogEntryVal);
7313     Ops.push_back(StrSizeVal);
7314     Ops.push_back(Chain);
7315 
7316     // We need to enforce the calling convention for the callsite, so that
7317     // argument ordering is enforced correctly, and that register allocation can
7318     // see that some registers may be assumed clobbered and have to preserve
7319     // them across calls to the intrinsic.
7320     MachineSDNode *MN = DAG.getMachineNode(
7321         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7322     SDValue patchableNode = SDValue(MN, 0);
7323     DAG.setRoot(patchableNode);
7324     setValue(&I, patchableNode);
7325     return;
7326   }
7327   case Intrinsic::experimental_deoptimize:
7328     LowerDeoptimizeCall(&I);
7329     return;
7330   case Intrinsic::experimental_stepvector:
7331     visitStepVector(I);
7332     return;
7333   case Intrinsic::vector_reduce_fadd:
7334   case Intrinsic::vector_reduce_fmul:
7335   case Intrinsic::vector_reduce_add:
7336   case Intrinsic::vector_reduce_mul:
7337   case Intrinsic::vector_reduce_and:
7338   case Intrinsic::vector_reduce_or:
7339   case Intrinsic::vector_reduce_xor:
7340   case Intrinsic::vector_reduce_smax:
7341   case Intrinsic::vector_reduce_smin:
7342   case Intrinsic::vector_reduce_umax:
7343   case Intrinsic::vector_reduce_umin:
7344   case Intrinsic::vector_reduce_fmax:
7345   case Intrinsic::vector_reduce_fmin:
7346   case Intrinsic::vector_reduce_fmaximum:
7347   case Intrinsic::vector_reduce_fminimum:
7348     visitVectorReduce(I, Intrinsic);
7349     return;
7350 
7351   case Intrinsic::icall_branch_funnel: {
7352     SmallVector<SDValue, 16> Ops;
7353     Ops.push_back(getValue(I.getArgOperand(0)));
7354 
7355     int64_t Offset;
7356     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7357         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7358     if (!Base)
7359       report_fatal_error(
7360           "llvm.icall.branch.funnel operand must be a GlobalValue");
7361     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7362 
7363     struct BranchFunnelTarget {
7364       int64_t Offset;
7365       SDValue Target;
7366     };
7367     SmallVector<BranchFunnelTarget, 8> Targets;
7368 
7369     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7370       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7371           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7372       if (ElemBase != Base)
7373         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7374                            "to the same GlobalValue");
7375 
7376       SDValue Val = getValue(I.getArgOperand(Op + 1));
7377       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7378       if (!GA)
7379         report_fatal_error(
7380             "llvm.icall.branch.funnel operand must be a GlobalValue");
7381       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7382                                      GA->getGlobal(), sdl, Val.getValueType(),
7383                                      GA->getOffset())});
7384     }
7385     llvm::sort(Targets,
7386                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7387                  return T1.Offset < T2.Offset;
7388                });
7389 
7390     for (auto &T : Targets) {
7391       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7392       Ops.push_back(T.Target);
7393     }
7394 
7395     Ops.push_back(DAG.getRoot()); // Chain
7396     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7397                                  MVT::Other, Ops),
7398               0);
7399     DAG.setRoot(N);
7400     setValue(&I, N);
7401     HasTailCall = true;
7402     return;
7403   }
7404 
7405   case Intrinsic::wasm_landingpad_index:
7406     // Information this intrinsic contained has been transferred to
7407     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7408     // delete it now.
7409     return;
7410 
7411   case Intrinsic::aarch64_settag:
7412   case Intrinsic::aarch64_settag_zero: {
7413     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7414     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7415     SDValue Val = TSI.EmitTargetCodeForSetTag(
7416         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7417         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7418         ZeroMemory);
7419     DAG.setRoot(Val);
7420     setValue(&I, Val);
7421     return;
7422   }
7423   case Intrinsic::ptrmask: {
7424     SDValue Ptr = getValue(I.getOperand(0));
7425     SDValue Const = getValue(I.getOperand(1));
7426 
7427     EVT PtrVT = Ptr.getValueType();
7428     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr,
7429                              DAG.getZExtOrTrunc(Const, sdl, PtrVT)));
7430     return;
7431   }
7432   case Intrinsic::threadlocal_address: {
7433     setValue(&I, getValue(I.getOperand(0)));
7434     return;
7435   }
7436   case Intrinsic::get_active_lane_mask: {
7437     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7438     SDValue Index = getValue(I.getOperand(0));
7439     EVT ElementVT = Index.getValueType();
7440 
7441     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7442       visitTargetIntrinsic(I, Intrinsic);
7443       return;
7444     }
7445 
7446     SDValue TripCount = getValue(I.getOperand(1));
7447     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7448                                  CCVT.getVectorElementCount());
7449 
7450     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7451     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7452     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7453     SDValue VectorInduction = DAG.getNode(
7454         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7455     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7456                                  VectorTripCount, ISD::CondCode::SETULT);
7457     setValue(&I, SetCC);
7458     return;
7459   }
7460   case Intrinsic::experimental_get_vector_length: {
7461     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7462            "Expected positive VF");
7463     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7464     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7465 
7466     SDValue Count = getValue(I.getOperand(0));
7467     EVT CountVT = Count.getValueType();
7468 
7469     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7470       visitTargetIntrinsic(I, Intrinsic);
7471       return;
7472     }
7473 
7474     // Expand to a umin between the trip count and the maximum elements the type
7475     // can hold.
7476     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7477 
7478     // Extend the trip count to at least the result VT.
7479     if (CountVT.bitsLT(VT)) {
7480       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7481       CountVT = VT;
7482     }
7483 
7484     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7485                                          ElementCount::get(VF, IsScalable));
7486 
7487     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7488     // Clip to the result type if needed.
7489     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7490 
7491     setValue(&I, Trunc);
7492     return;
7493   }
7494   case Intrinsic::vector_insert: {
7495     SDValue Vec = getValue(I.getOperand(0));
7496     SDValue SubVec = getValue(I.getOperand(1));
7497     SDValue Index = getValue(I.getOperand(2));
7498 
7499     // The intrinsic's index type is i64, but the SDNode requires an index type
7500     // suitable for the target. Convert the index as required.
7501     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7502     if (Index.getValueType() != VectorIdxTy)
7503       Index = DAG.getVectorIdxConstant(
7504           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7505 
7506     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7507     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7508                              Index));
7509     return;
7510   }
7511   case Intrinsic::vector_extract: {
7512     SDValue Vec = getValue(I.getOperand(0));
7513     SDValue Index = getValue(I.getOperand(1));
7514     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7515 
7516     // The intrinsic's index type is i64, but the SDNode requires an index type
7517     // suitable for the target. Convert the index as required.
7518     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7519     if (Index.getValueType() != VectorIdxTy)
7520       Index = DAG.getVectorIdxConstant(
7521           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7522 
7523     setValue(&I,
7524              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7525     return;
7526   }
7527   case Intrinsic::experimental_vector_reverse:
7528     visitVectorReverse(I);
7529     return;
7530   case Intrinsic::experimental_vector_splice:
7531     visitVectorSplice(I);
7532     return;
7533   case Intrinsic::callbr_landingpad:
7534     visitCallBrLandingPad(I);
7535     return;
7536   case Intrinsic::experimental_vector_interleave2:
7537     visitVectorInterleave(I);
7538     return;
7539   case Intrinsic::experimental_vector_deinterleave2:
7540     visitVectorDeinterleave(I);
7541     return;
7542   }
7543 }
7544 
7545 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7546     const ConstrainedFPIntrinsic &FPI) {
7547   SDLoc sdl = getCurSDLoc();
7548 
7549   // We do not need to serialize constrained FP intrinsics against
7550   // each other or against (nonvolatile) loads, so they can be
7551   // chained like loads.
7552   SDValue Chain = DAG.getRoot();
7553   SmallVector<SDValue, 4> Opers;
7554   Opers.push_back(Chain);
7555   if (FPI.isUnaryOp()) {
7556     Opers.push_back(getValue(FPI.getArgOperand(0)));
7557   } else if (FPI.isTernaryOp()) {
7558     Opers.push_back(getValue(FPI.getArgOperand(0)));
7559     Opers.push_back(getValue(FPI.getArgOperand(1)));
7560     Opers.push_back(getValue(FPI.getArgOperand(2)));
7561   } else {
7562     Opers.push_back(getValue(FPI.getArgOperand(0)));
7563     Opers.push_back(getValue(FPI.getArgOperand(1)));
7564   }
7565 
7566   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7567     assert(Result.getNode()->getNumValues() == 2);
7568 
7569     // Push node to the appropriate list so that future instructions can be
7570     // chained up correctly.
7571     SDValue OutChain = Result.getValue(1);
7572     switch (EB) {
7573     case fp::ExceptionBehavior::ebIgnore:
7574       // The only reason why ebIgnore nodes still need to be chained is that
7575       // they might depend on the current rounding mode, and therefore must
7576       // not be moved across instruction that may change that mode.
7577       [[fallthrough]];
7578     case fp::ExceptionBehavior::ebMayTrap:
7579       // These must not be moved across calls or instructions that may change
7580       // floating-point exception masks.
7581       PendingConstrainedFP.push_back(OutChain);
7582       break;
7583     case fp::ExceptionBehavior::ebStrict:
7584       // These must not be moved across calls or instructions that may change
7585       // floating-point exception masks or read floating-point exception flags.
7586       // In addition, they cannot be optimized out even if unused.
7587       PendingConstrainedFPStrict.push_back(OutChain);
7588       break;
7589     }
7590   };
7591 
7592   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7593   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7594   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7595   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7596 
7597   SDNodeFlags Flags;
7598   if (EB == fp::ExceptionBehavior::ebIgnore)
7599     Flags.setNoFPExcept(true);
7600 
7601   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7602     Flags.copyFMF(*FPOp);
7603 
7604   unsigned Opcode;
7605   switch (FPI.getIntrinsicID()) {
7606   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7607 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7608   case Intrinsic::INTRINSIC:                                                   \
7609     Opcode = ISD::STRICT_##DAGN;                                               \
7610     break;
7611 #include "llvm/IR/ConstrainedOps.def"
7612   case Intrinsic::experimental_constrained_fmuladd: {
7613     Opcode = ISD::STRICT_FMA;
7614     // Break fmuladd into fmul and fadd.
7615     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7616         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7617       Opers.pop_back();
7618       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7619       pushOutChain(Mul, EB);
7620       Opcode = ISD::STRICT_FADD;
7621       Opers.clear();
7622       Opers.push_back(Mul.getValue(1));
7623       Opers.push_back(Mul.getValue(0));
7624       Opers.push_back(getValue(FPI.getArgOperand(2)));
7625     }
7626     break;
7627   }
7628   }
7629 
7630   // A few strict DAG nodes carry additional operands that are not
7631   // set up by the default code above.
7632   switch (Opcode) {
7633   default: break;
7634   case ISD::STRICT_FP_ROUND:
7635     Opers.push_back(
7636         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7637     break;
7638   case ISD::STRICT_FSETCC:
7639   case ISD::STRICT_FSETCCS: {
7640     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7641     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7642     if (TM.Options.NoNaNsFPMath)
7643       Condition = getFCmpCodeWithoutNaN(Condition);
7644     Opers.push_back(DAG.getCondCode(Condition));
7645     break;
7646   }
7647   }
7648 
7649   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7650   pushOutChain(Result, EB);
7651 
7652   SDValue FPResult = Result.getValue(0);
7653   setValue(&FPI, FPResult);
7654 }
7655 
7656 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7657   std::optional<unsigned> ResOPC;
7658   switch (VPIntrin.getIntrinsicID()) {
7659   case Intrinsic::vp_ctlz: {
7660     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7661     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7662     break;
7663   }
7664   case Intrinsic::vp_cttz: {
7665     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7666     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7667     break;
7668   }
7669 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7670   case Intrinsic::VPID:                                                        \
7671     ResOPC = ISD::VPSD;                                                        \
7672     break;
7673 #include "llvm/IR/VPIntrinsics.def"
7674   }
7675 
7676   if (!ResOPC)
7677     llvm_unreachable(
7678         "Inconsistency: no SDNode available for this VPIntrinsic!");
7679 
7680   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7681       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7682     if (VPIntrin.getFastMathFlags().allowReassoc())
7683       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7684                                                 : ISD::VP_REDUCE_FMUL;
7685   }
7686 
7687   return *ResOPC;
7688 }
7689 
7690 void SelectionDAGBuilder::visitVPLoad(
7691     const VPIntrinsic &VPIntrin, EVT VT,
7692     const SmallVectorImpl<SDValue> &OpValues) {
7693   SDLoc DL = getCurSDLoc();
7694   Value *PtrOperand = VPIntrin.getArgOperand(0);
7695   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7696   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7697   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7698   SDValue LD;
7699   // Do not serialize variable-length loads of constant memory with
7700   // anything.
7701   if (!Alignment)
7702     Alignment = DAG.getEVTAlign(VT);
7703   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7704   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7705   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7706   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7707       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7708       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7709   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7710                      MMO, false /*IsExpanding */);
7711   if (AddToChain)
7712     PendingLoads.push_back(LD.getValue(1));
7713   setValue(&VPIntrin, LD);
7714 }
7715 
7716 void SelectionDAGBuilder::visitVPGather(
7717     const VPIntrinsic &VPIntrin, EVT VT,
7718     const SmallVectorImpl<SDValue> &OpValues) {
7719   SDLoc DL = getCurSDLoc();
7720   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7721   Value *PtrOperand = VPIntrin.getArgOperand(0);
7722   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7723   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7724   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7725   SDValue LD;
7726   if (!Alignment)
7727     Alignment = DAG.getEVTAlign(VT.getScalarType());
7728   unsigned AS =
7729     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7730   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7731      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7732      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7733   SDValue Base, Index, Scale;
7734   ISD::MemIndexType IndexType;
7735   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7736                                     this, VPIntrin.getParent(),
7737                                     VT.getScalarStoreSize());
7738   if (!UniformBase) {
7739     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7740     Index = getValue(PtrOperand);
7741     IndexType = ISD::SIGNED_SCALED;
7742     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7743   }
7744   EVT IdxVT = Index.getValueType();
7745   EVT EltTy = IdxVT.getVectorElementType();
7746   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7747     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7748     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7749   }
7750   LD = DAG.getGatherVP(
7751       DAG.getVTList(VT, MVT::Other), VT, DL,
7752       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7753       IndexType);
7754   PendingLoads.push_back(LD.getValue(1));
7755   setValue(&VPIntrin, LD);
7756 }
7757 
7758 void SelectionDAGBuilder::visitVPStore(
7759     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7760   SDLoc DL = getCurSDLoc();
7761   Value *PtrOperand = VPIntrin.getArgOperand(1);
7762   EVT VT = OpValues[0].getValueType();
7763   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7764   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7765   SDValue ST;
7766   if (!Alignment)
7767     Alignment = DAG.getEVTAlign(VT);
7768   SDValue Ptr = OpValues[1];
7769   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7770   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7771       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7772       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7773   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7774                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7775                       /* IsTruncating */ false, /*IsCompressing*/ false);
7776   DAG.setRoot(ST);
7777   setValue(&VPIntrin, ST);
7778 }
7779 
7780 void SelectionDAGBuilder::visitVPScatter(
7781     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7782   SDLoc DL = getCurSDLoc();
7783   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7784   Value *PtrOperand = VPIntrin.getArgOperand(1);
7785   EVT VT = OpValues[0].getValueType();
7786   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7787   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7788   SDValue ST;
7789   if (!Alignment)
7790     Alignment = DAG.getEVTAlign(VT.getScalarType());
7791   unsigned AS =
7792       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7793   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7794       MachinePointerInfo(AS), MachineMemOperand::MOStore,
7795       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7796   SDValue Base, Index, Scale;
7797   ISD::MemIndexType IndexType;
7798   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7799                                     this, VPIntrin.getParent(),
7800                                     VT.getScalarStoreSize());
7801   if (!UniformBase) {
7802     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7803     Index = getValue(PtrOperand);
7804     IndexType = ISD::SIGNED_SCALED;
7805     Scale =
7806       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7807   }
7808   EVT IdxVT = Index.getValueType();
7809   EVT EltTy = IdxVT.getVectorElementType();
7810   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7811     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7812     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7813   }
7814   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7815                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7816                          OpValues[2], OpValues[3]},
7817                         MMO, IndexType);
7818   DAG.setRoot(ST);
7819   setValue(&VPIntrin, ST);
7820 }
7821 
7822 void SelectionDAGBuilder::visitVPStridedLoad(
7823     const VPIntrinsic &VPIntrin, EVT VT,
7824     const SmallVectorImpl<SDValue> &OpValues) {
7825   SDLoc DL = getCurSDLoc();
7826   Value *PtrOperand = VPIntrin.getArgOperand(0);
7827   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7828   if (!Alignment)
7829     Alignment = DAG.getEVTAlign(VT.getScalarType());
7830   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7831   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7832   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7833   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7834   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7835   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7836       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7837       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7838 
7839   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
7840                                     OpValues[2], OpValues[3], MMO,
7841                                     false /*IsExpanding*/);
7842 
7843   if (AddToChain)
7844     PendingLoads.push_back(LD.getValue(1));
7845   setValue(&VPIntrin, LD);
7846 }
7847 
7848 void SelectionDAGBuilder::visitVPStridedStore(
7849     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7850   SDLoc DL = getCurSDLoc();
7851   Value *PtrOperand = VPIntrin.getArgOperand(1);
7852   EVT VT = OpValues[0].getValueType();
7853   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7854   if (!Alignment)
7855     Alignment = DAG.getEVTAlign(VT.getScalarType());
7856   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7857   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7858       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7859       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7860 
7861   SDValue ST = DAG.getStridedStoreVP(
7862       getMemoryRoot(), DL, OpValues[0], OpValues[1],
7863       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
7864       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
7865       /*IsCompressing*/ false);
7866 
7867   DAG.setRoot(ST);
7868   setValue(&VPIntrin, ST);
7869 }
7870 
7871 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
7872   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7873   SDLoc DL = getCurSDLoc();
7874 
7875   ISD::CondCode Condition;
7876   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
7877   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
7878   if (IsFP) {
7879     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
7880     // flags, but calls that don't return floating-point types can't be
7881     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
7882     Condition = getFCmpCondCode(CondCode);
7883     if (TM.Options.NoNaNsFPMath)
7884       Condition = getFCmpCodeWithoutNaN(Condition);
7885   } else {
7886     Condition = getICmpCondCode(CondCode);
7887   }
7888 
7889   SDValue Op1 = getValue(VPIntrin.getOperand(0));
7890   SDValue Op2 = getValue(VPIntrin.getOperand(1));
7891   // #2 is the condition code
7892   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
7893   SDValue EVL = getValue(VPIntrin.getOperand(4));
7894   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
7895   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
7896          "Unexpected target EVL type");
7897   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
7898 
7899   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7900                                                         VPIntrin.getType());
7901   setValue(&VPIntrin,
7902            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
7903 }
7904 
7905 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
7906     const VPIntrinsic &VPIntrin) {
7907   SDLoc DL = getCurSDLoc();
7908   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
7909 
7910   auto IID = VPIntrin.getIntrinsicID();
7911 
7912   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
7913     return visitVPCmp(*CmpI);
7914 
7915   SmallVector<EVT, 4> ValueVTs;
7916   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7917   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
7918   SDVTList VTs = DAG.getVTList(ValueVTs);
7919 
7920   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
7921 
7922   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
7923   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
7924          "Unexpected target EVL type");
7925 
7926   // Request operands.
7927   SmallVector<SDValue, 7> OpValues;
7928   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
7929     auto Op = getValue(VPIntrin.getArgOperand(I));
7930     if (I == EVLParamPos)
7931       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
7932     OpValues.push_back(Op);
7933   }
7934 
7935   switch (Opcode) {
7936   default: {
7937     SDNodeFlags SDFlags;
7938     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
7939       SDFlags.copyFMF(*FPMO);
7940     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
7941     setValue(&VPIntrin, Result);
7942     break;
7943   }
7944   case ISD::VP_LOAD:
7945     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
7946     break;
7947   case ISD::VP_GATHER:
7948     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
7949     break;
7950   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
7951     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
7952     break;
7953   case ISD::VP_STORE:
7954     visitVPStore(VPIntrin, OpValues);
7955     break;
7956   case ISD::VP_SCATTER:
7957     visitVPScatter(VPIntrin, OpValues);
7958     break;
7959   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
7960     visitVPStridedStore(VPIntrin, OpValues);
7961     break;
7962   case ISD::VP_FMULADD: {
7963     assert(OpValues.size() == 5 && "Unexpected number of operands");
7964     SDNodeFlags SDFlags;
7965     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
7966       SDFlags.copyFMF(*FPMO);
7967     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7968         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
7969       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
7970     } else {
7971       SDValue Mul = DAG.getNode(
7972           ISD::VP_FMUL, DL, VTs,
7973           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
7974       SDValue Add =
7975           DAG.getNode(ISD::VP_FADD, DL, VTs,
7976                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
7977       setValue(&VPIntrin, Add);
7978     }
7979     break;
7980   }
7981   case ISD::VP_IS_FPCLASS: {
7982     const DataLayout DLayout = DAG.getDataLayout();
7983     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
7984     auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
7985     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
7986     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
7987                             {OpValues[0], Check, OpValues[2], OpValues[3]});
7988     setValue(&VPIntrin, V);
7989     return;
7990   }
7991   case ISD::VP_INTTOPTR: {
7992     SDValue N = OpValues[0];
7993     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
7994     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
7995     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
7996                                OpValues[2]);
7997     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
7998                              OpValues[2]);
7999     setValue(&VPIntrin, N);
8000     break;
8001   }
8002   case ISD::VP_PTRTOINT: {
8003     SDValue N = OpValues[0];
8004     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8005                                                           VPIntrin.getType());
8006     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8007                                        VPIntrin.getOperand(0)->getType());
8008     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8009                                OpValues[2]);
8010     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8011                              OpValues[2]);
8012     setValue(&VPIntrin, N);
8013     break;
8014   }
8015   case ISD::VP_ABS:
8016   case ISD::VP_CTLZ:
8017   case ISD::VP_CTLZ_ZERO_UNDEF:
8018   case ISD::VP_CTTZ:
8019   case ISD::VP_CTTZ_ZERO_UNDEF: {
8020     SDValue Result =
8021         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8022     setValue(&VPIntrin, Result);
8023     break;
8024   }
8025   }
8026 }
8027 
8028 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8029                                           const BasicBlock *EHPadBB,
8030                                           MCSymbol *&BeginLabel) {
8031   MachineFunction &MF = DAG.getMachineFunction();
8032   MachineModuleInfo &MMI = MF.getMMI();
8033 
8034   // Insert a label before the invoke call to mark the try range.  This can be
8035   // used to detect deletion of the invoke via the MachineModuleInfo.
8036   BeginLabel = MMI.getContext().createTempSymbol();
8037 
8038   // For SjLj, keep track of which landing pads go with which invokes
8039   // so as to maintain the ordering of pads in the LSDA.
8040   unsigned CallSiteIndex = MMI.getCurrentCallSite();
8041   if (CallSiteIndex) {
8042     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8043     LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8044 
8045     // Now that the call site is handled, stop tracking it.
8046     MMI.setCurrentCallSite(0);
8047   }
8048 
8049   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8050 }
8051 
8052 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8053                                         const BasicBlock *EHPadBB,
8054                                         MCSymbol *BeginLabel) {
8055   assert(BeginLabel && "BeginLabel should've been set");
8056 
8057   MachineFunction &MF = DAG.getMachineFunction();
8058   MachineModuleInfo &MMI = MF.getMMI();
8059 
8060   // Insert a label at the end of the invoke call to mark the try range.  This
8061   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8062   MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8063   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8064 
8065   // Inform MachineModuleInfo of range.
8066   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8067   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8068   // actually use outlined funclets and their LSDA info style.
8069   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8070     assert(II && "II should've been set");
8071     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8072     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8073   } else if (!isScopedEHPersonality(Pers)) {
8074     assert(EHPadBB);
8075     MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8076   }
8077 
8078   return Chain;
8079 }
8080 
8081 std::pair<SDValue, SDValue>
8082 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8083                                     const BasicBlock *EHPadBB) {
8084   MCSymbol *BeginLabel = nullptr;
8085 
8086   if (EHPadBB) {
8087     // Both PendingLoads and PendingExports must be flushed here;
8088     // this call might not return.
8089     (void)getRoot();
8090     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8091     CLI.setChain(getRoot());
8092   }
8093 
8094   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8095   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8096 
8097   assert((CLI.IsTailCall || Result.second.getNode()) &&
8098          "Non-null chain expected with non-tail call!");
8099   assert((Result.second.getNode() || !Result.first.getNode()) &&
8100          "Null value expected with tail call!");
8101 
8102   if (!Result.second.getNode()) {
8103     // As a special case, a null chain means that a tail call has been emitted
8104     // and the DAG root is already updated.
8105     HasTailCall = true;
8106 
8107     // Since there's no actual continuation from this block, nothing can be
8108     // relying on us setting vregs for them.
8109     PendingExports.clear();
8110   } else {
8111     DAG.setRoot(Result.second);
8112   }
8113 
8114   if (EHPadBB) {
8115     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8116                            BeginLabel));
8117   }
8118 
8119   return Result;
8120 }
8121 
8122 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8123                                       bool isTailCall,
8124                                       bool isMustTailCall,
8125                                       const BasicBlock *EHPadBB) {
8126   auto &DL = DAG.getDataLayout();
8127   FunctionType *FTy = CB.getFunctionType();
8128   Type *RetTy = CB.getType();
8129 
8130   TargetLowering::ArgListTy Args;
8131   Args.reserve(CB.arg_size());
8132 
8133   const Value *SwiftErrorVal = nullptr;
8134   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8135 
8136   if (isTailCall) {
8137     // Avoid emitting tail calls in functions with the disable-tail-calls
8138     // attribute.
8139     auto *Caller = CB.getParent()->getParent();
8140     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8141         "true" && !isMustTailCall)
8142       isTailCall = false;
8143 
8144     // We can't tail call inside a function with a swifterror argument. Lowering
8145     // does not support this yet. It would have to move into the swifterror
8146     // register before the call.
8147     if (TLI.supportSwiftError() &&
8148         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8149       isTailCall = false;
8150   }
8151 
8152   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8153     TargetLowering::ArgListEntry Entry;
8154     const Value *V = *I;
8155 
8156     // Skip empty types
8157     if (V->getType()->isEmptyTy())
8158       continue;
8159 
8160     SDValue ArgNode = getValue(V);
8161     Entry.Node = ArgNode; Entry.Ty = V->getType();
8162 
8163     Entry.setAttributes(&CB, I - CB.arg_begin());
8164 
8165     // Use swifterror virtual register as input to the call.
8166     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8167       SwiftErrorVal = V;
8168       // We find the virtual register for the actual swifterror argument.
8169       // Instead of using the Value, we use the virtual register instead.
8170       Entry.Node =
8171           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8172                           EVT(TLI.getPointerTy(DL)));
8173     }
8174 
8175     Args.push_back(Entry);
8176 
8177     // If we have an explicit sret argument that is an Instruction, (i.e., it
8178     // might point to function-local memory), we can't meaningfully tail-call.
8179     if (Entry.IsSRet && isa<Instruction>(V))
8180       isTailCall = false;
8181   }
8182 
8183   // If call site has a cfguardtarget operand bundle, create and add an
8184   // additional ArgListEntry.
8185   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8186     TargetLowering::ArgListEntry Entry;
8187     Value *V = Bundle->Inputs[0];
8188     SDValue ArgNode = getValue(V);
8189     Entry.Node = ArgNode;
8190     Entry.Ty = V->getType();
8191     Entry.IsCFGuardTarget = true;
8192     Args.push_back(Entry);
8193   }
8194 
8195   // Check if target-independent constraints permit a tail call here.
8196   // Target-dependent constraints are checked within TLI->LowerCallTo.
8197   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8198     isTailCall = false;
8199 
8200   // Disable tail calls if there is an swifterror argument. Targets have not
8201   // been updated to support tail calls.
8202   if (TLI.supportSwiftError() && SwiftErrorVal)
8203     isTailCall = false;
8204 
8205   ConstantInt *CFIType = nullptr;
8206   if (CB.isIndirectCall()) {
8207     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8208       if (!TLI.supportKCFIBundles())
8209         report_fatal_error(
8210             "Target doesn't support calls with kcfi operand bundles.");
8211       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8212       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8213     }
8214   }
8215 
8216   TargetLowering::CallLoweringInfo CLI(DAG);
8217   CLI.setDebugLoc(getCurSDLoc())
8218       .setChain(getRoot())
8219       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8220       .setTailCall(isTailCall)
8221       .setConvergent(CB.isConvergent())
8222       .setIsPreallocated(
8223           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8224       .setCFIType(CFIType);
8225   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8226 
8227   if (Result.first.getNode()) {
8228     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8229     setValue(&CB, Result.first);
8230   }
8231 
8232   // The last element of CLI.InVals has the SDValue for swifterror return.
8233   // Here we copy it to a virtual register and update SwiftErrorMap for
8234   // book-keeping.
8235   if (SwiftErrorVal && TLI.supportSwiftError()) {
8236     // Get the last element of InVals.
8237     SDValue Src = CLI.InVals.back();
8238     Register VReg =
8239         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8240     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8241     DAG.setRoot(CopyNode);
8242   }
8243 }
8244 
8245 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8246                              SelectionDAGBuilder &Builder) {
8247   // Check to see if this load can be trivially constant folded, e.g. if the
8248   // input is from a string literal.
8249   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8250     // Cast pointer to the type we really want to load.
8251     Type *LoadTy =
8252         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8253     if (LoadVT.isVector())
8254       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8255 
8256     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8257                                          PointerType::getUnqual(LoadTy));
8258 
8259     if (const Constant *LoadCst =
8260             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8261                                          LoadTy, Builder.DAG.getDataLayout()))
8262       return Builder.getValue(LoadCst);
8263   }
8264 
8265   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8266   // still constant memory, the input chain can be the entry node.
8267   SDValue Root;
8268   bool ConstantMemory = false;
8269 
8270   // Do not serialize (non-volatile) loads of constant memory with anything.
8271   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8272     Root = Builder.DAG.getEntryNode();
8273     ConstantMemory = true;
8274   } else {
8275     // Do not serialize non-volatile loads against each other.
8276     Root = Builder.DAG.getRoot();
8277   }
8278 
8279   SDValue Ptr = Builder.getValue(PtrVal);
8280   SDValue LoadVal =
8281       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8282                           MachinePointerInfo(PtrVal), Align(1));
8283 
8284   if (!ConstantMemory)
8285     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8286   return LoadVal;
8287 }
8288 
8289 /// Record the value for an instruction that produces an integer result,
8290 /// converting the type where necessary.
8291 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8292                                                   SDValue Value,
8293                                                   bool IsSigned) {
8294   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8295                                                     I.getType(), true);
8296   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8297   setValue(&I, Value);
8298 }
8299 
8300 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8301 /// true and lower it. Otherwise return false, and it will be lowered like a
8302 /// normal call.
8303 /// The caller already checked that \p I calls the appropriate LibFunc with a
8304 /// correct prototype.
8305 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8306   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8307   const Value *Size = I.getArgOperand(2);
8308   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8309   if (CSize && CSize->getZExtValue() == 0) {
8310     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8311                                                           I.getType(), true);
8312     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8313     return true;
8314   }
8315 
8316   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8317   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8318       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8319       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8320   if (Res.first.getNode()) {
8321     processIntegerCallValue(I, Res.first, true);
8322     PendingLoads.push_back(Res.second);
8323     return true;
8324   }
8325 
8326   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8327   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8328   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8329     return false;
8330 
8331   // If the target has a fast compare for the given size, it will return a
8332   // preferred load type for that size. Require that the load VT is legal and
8333   // that the target supports unaligned loads of that type. Otherwise, return
8334   // INVALID.
8335   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8336     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8337     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8338     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8339       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8340       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8341       // TODO: Check alignment of src and dest ptrs.
8342       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8343       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8344       if (!TLI.isTypeLegal(LVT) ||
8345           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8346           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8347         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8348     }
8349 
8350     return LVT;
8351   };
8352 
8353   // This turns into unaligned loads. We only do this if the target natively
8354   // supports the MVT we'll be loading or if it is small enough (<= 4) that
8355   // we'll only produce a small number of byte loads.
8356   MVT LoadVT;
8357   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8358   switch (NumBitsToCompare) {
8359   default:
8360     return false;
8361   case 16:
8362     LoadVT = MVT::i16;
8363     break;
8364   case 32:
8365     LoadVT = MVT::i32;
8366     break;
8367   case 64:
8368   case 128:
8369   case 256:
8370     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8371     break;
8372   }
8373 
8374   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8375     return false;
8376 
8377   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8378   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8379 
8380   // Bitcast to a wide integer type if the loads are vectors.
8381   if (LoadVT.isVector()) {
8382     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8383     LoadL = DAG.getBitcast(CmpVT, LoadL);
8384     LoadR = DAG.getBitcast(CmpVT, LoadR);
8385   }
8386 
8387   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8388   processIntegerCallValue(I, Cmp, false);
8389   return true;
8390 }
8391 
8392 /// See if we can lower a memchr call into an optimized form. If so, return
8393 /// true and lower it. Otherwise return false, and it will be lowered like a
8394 /// normal call.
8395 /// The caller already checked that \p I calls the appropriate LibFunc with a
8396 /// correct prototype.
8397 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8398   const Value *Src = I.getArgOperand(0);
8399   const Value *Char = I.getArgOperand(1);
8400   const Value *Length = I.getArgOperand(2);
8401 
8402   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8403   std::pair<SDValue, SDValue> Res =
8404     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8405                                 getValue(Src), getValue(Char), getValue(Length),
8406                                 MachinePointerInfo(Src));
8407   if (Res.first.getNode()) {
8408     setValue(&I, Res.first);
8409     PendingLoads.push_back(Res.second);
8410     return true;
8411   }
8412 
8413   return false;
8414 }
8415 
8416 /// See if we can lower a mempcpy call into an optimized form. If so, return
8417 /// true and lower it. Otherwise return false, and it will be lowered like a
8418 /// normal call.
8419 /// The caller already checked that \p I calls the appropriate LibFunc with a
8420 /// correct prototype.
8421 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8422   SDValue Dst = getValue(I.getArgOperand(0));
8423   SDValue Src = getValue(I.getArgOperand(1));
8424   SDValue Size = getValue(I.getArgOperand(2));
8425 
8426   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8427   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8428   // DAG::getMemcpy needs Alignment to be defined.
8429   Align Alignment = std::min(DstAlign, SrcAlign);
8430 
8431   SDLoc sdl = getCurSDLoc();
8432 
8433   // In the mempcpy context we need to pass in a false value for isTailCall
8434   // because the return pointer needs to be adjusted by the size of
8435   // the copied memory.
8436   SDValue Root = getMemoryRoot();
8437   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8438                              /*isTailCall=*/false,
8439                              MachinePointerInfo(I.getArgOperand(0)),
8440                              MachinePointerInfo(I.getArgOperand(1)),
8441                              I.getAAMetadata());
8442   assert(MC.getNode() != nullptr &&
8443          "** memcpy should not be lowered as TailCall in mempcpy context **");
8444   DAG.setRoot(MC);
8445 
8446   // Check if Size needs to be truncated or extended.
8447   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8448 
8449   // Adjust return pointer to point just past the last dst byte.
8450   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8451                                     Dst, Size);
8452   setValue(&I, DstPlusSize);
8453   return true;
8454 }
8455 
8456 /// See if we can lower a strcpy call into an optimized form.  If so, return
8457 /// true and lower it, otherwise return false and it will be lowered like a
8458 /// normal call.
8459 /// The caller already checked that \p I calls the appropriate LibFunc with a
8460 /// correct prototype.
8461 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8462   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8463 
8464   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8465   std::pair<SDValue, SDValue> Res =
8466     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8467                                 getValue(Arg0), getValue(Arg1),
8468                                 MachinePointerInfo(Arg0),
8469                                 MachinePointerInfo(Arg1), isStpcpy);
8470   if (Res.first.getNode()) {
8471     setValue(&I, Res.first);
8472     DAG.setRoot(Res.second);
8473     return true;
8474   }
8475 
8476   return false;
8477 }
8478 
8479 /// See if we can lower a strcmp call into an optimized form.  If so, return
8480 /// true and lower it, otherwise return false and it will be lowered like a
8481 /// normal call.
8482 /// The caller already checked that \p I calls the appropriate LibFunc with a
8483 /// correct prototype.
8484 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8485   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8486 
8487   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8488   std::pair<SDValue, SDValue> Res =
8489     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8490                                 getValue(Arg0), getValue(Arg1),
8491                                 MachinePointerInfo(Arg0),
8492                                 MachinePointerInfo(Arg1));
8493   if (Res.first.getNode()) {
8494     processIntegerCallValue(I, Res.first, true);
8495     PendingLoads.push_back(Res.second);
8496     return true;
8497   }
8498 
8499   return false;
8500 }
8501 
8502 /// See if we can lower a strlen call into an optimized form.  If so, return
8503 /// true and lower it, otherwise return false and it will be lowered like a
8504 /// normal call.
8505 /// The caller already checked that \p I calls the appropriate LibFunc with a
8506 /// correct prototype.
8507 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8508   const Value *Arg0 = I.getArgOperand(0);
8509 
8510   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8511   std::pair<SDValue, SDValue> Res =
8512     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8513                                 getValue(Arg0), MachinePointerInfo(Arg0));
8514   if (Res.first.getNode()) {
8515     processIntegerCallValue(I, Res.first, false);
8516     PendingLoads.push_back(Res.second);
8517     return true;
8518   }
8519 
8520   return false;
8521 }
8522 
8523 /// See if we can lower a strnlen call into an optimized form.  If so, return
8524 /// true and lower it, otherwise return false and it will be lowered like a
8525 /// normal call.
8526 /// The caller already checked that \p I calls the appropriate LibFunc with a
8527 /// correct prototype.
8528 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8529   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8530 
8531   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8532   std::pair<SDValue, SDValue> Res =
8533     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8534                                  getValue(Arg0), getValue(Arg1),
8535                                  MachinePointerInfo(Arg0));
8536   if (Res.first.getNode()) {
8537     processIntegerCallValue(I, Res.first, false);
8538     PendingLoads.push_back(Res.second);
8539     return true;
8540   }
8541 
8542   return false;
8543 }
8544 
8545 /// See if we can lower a unary floating-point operation into an SDNode with
8546 /// the specified Opcode.  If so, return true and lower it, otherwise return
8547 /// false and it will be lowered like a normal call.
8548 /// The caller already checked that \p I calls the appropriate LibFunc with a
8549 /// correct prototype.
8550 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8551                                               unsigned Opcode) {
8552   // We already checked this call's prototype; verify it doesn't modify errno.
8553   if (!I.onlyReadsMemory())
8554     return false;
8555 
8556   SDNodeFlags Flags;
8557   Flags.copyFMF(cast<FPMathOperator>(I));
8558 
8559   SDValue Tmp = getValue(I.getArgOperand(0));
8560   setValue(&I,
8561            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8562   return true;
8563 }
8564 
8565 /// See if we can lower a binary floating-point operation into an SDNode with
8566 /// the specified Opcode. If so, return true and lower it. Otherwise return
8567 /// false, and it will be lowered like a normal call.
8568 /// The caller already checked that \p I calls the appropriate LibFunc with a
8569 /// correct prototype.
8570 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8571                                                unsigned Opcode) {
8572   // We already checked this call's prototype; verify it doesn't modify errno.
8573   if (!I.onlyReadsMemory())
8574     return false;
8575 
8576   SDNodeFlags Flags;
8577   Flags.copyFMF(cast<FPMathOperator>(I));
8578 
8579   SDValue Tmp0 = getValue(I.getArgOperand(0));
8580   SDValue Tmp1 = getValue(I.getArgOperand(1));
8581   EVT VT = Tmp0.getValueType();
8582   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8583   return true;
8584 }
8585 
8586 void SelectionDAGBuilder::visitCall(const CallInst &I) {
8587   // Handle inline assembly differently.
8588   if (I.isInlineAsm()) {
8589     visitInlineAsm(I);
8590     return;
8591   }
8592 
8593   diagnoseDontCall(I);
8594 
8595   if (Function *F = I.getCalledFunction()) {
8596     if (F->isDeclaration()) {
8597       // Is this an LLVM intrinsic or a target-specific intrinsic?
8598       unsigned IID = F->getIntrinsicID();
8599       if (!IID)
8600         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8601           IID = II->getIntrinsicID(F);
8602 
8603       if (IID) {
8604         visitIntrinsicCall(I, IID);
8605         return;
8606       }
8607     }
8608 
8609     // Check for well-known libc/libm calls.  If the function is internal, it
8610     // can't be a library call.  Don't do the check if marked as nobuiltin for
8611     // some reason or the call site requires strict floating point semantics.
8612     LibFunc Func;
8613     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8614         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8615         LibInfo->hasOptimizedCodeGen(Func)) {
8616       switch (Func) {
8617       default: break;
8618       case LibFunc_bcmp:
8619         if (visitMemCmpBCmpCall(I))
8620           return;
8621         break;
8622       case LibFunc_copysign:
8623       case LibFunc_copysignf:
8624       case LibFunc_copysignl:
8625         // We already checked this call's prototype; verify it doesn't modify
8626         // errno.
8627         if (I.onlyReadsMemory()) {
8628           SDValue LHS = getValue(I.getArgOperand(0));
8629           SDValue RHS = getValue(I.getArgOperand(1));
8630           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8631                                    LHS.getValueType(), LHS, RHS));
8632           return;
8633         }
8634         break;
8635       case LibFunc_fabs:
8636       case LibFunc_fabsf:
8637       case LibFunc_fabsl:
8638         if (visitUnaryFloatCall(I, ISD::FABS))
8639           return;
8640         break;
8641       case LibFunc_fmin:
8642       case LibFunc_fminf:
8643       case LibFunc_fminl:
8644         if (visitBinaryFloatCall(I, ISD::FMINNUM))
8645           return;
8646         break;
8647       case LibFunc_fmax:
8648       case LibFunc_fmaxf:
8649       case LibFunc_fmaxl:
8650         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8651           return;
8652         break;
8653       case LibFunc_sin:
8654       case LibFunc_sinf:
8655       case LibFunc_sinl:
8656         if (visitUnaryFloatCall(I, ISD::FSIN))
8657           return;
8658         break;
8659       case LibFunc_cos:
8660       case LibFunc_cosf:
8661       case LibFunc_cosl:
8662         if (visitUnaryFloatCall(I, ISD::FCOS))
8663           return;
8664         break;
8665       case LibFunc_sqrt:
8666       case LibFunc_sqrtf:
8667       case LibFunc_sqrtl:
8668       case LibFunc_sqrt_finite:
8669       case LibFunc_sqrtf_finite:
8670       case LibFunc_sqrtl_finite:
8671         if (visitUnaryFloatCall(I, ISD::FSQRT))
8672           return;
8673         break;
8674       case LibFunc_floor:
8675       case LibFunc_floorf:
8676       case LibFunc_floorl:
8677         if (visitUnaryFloatCall(I, ISD::FFLOOR))
8678           return;
8679         break;
8680       case LibFunc_nearbyint:
8681       case LibFunc_nearbyintf:
8682       case LibFunc_nearbyintl:
8683         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8684           return;
8685         break;
8686       case LibFunc_ceil:
8687       case LibFunc_ceilf:
8688       case LibFunc_ceill:
8689         if (visitUnaryFloatCall(I, ISD::FCEIL))
8690           return;
8691         break;
8692       case LibFunc_rint:
8693       case LibFunc_rintf:
8694       case LibFunc_rintl:
8695         if (visitUnaryFloatCall(I, ISD::FRINT))
8696           return;
8697         break;
8698       case LibFunc_round:
8699       case LibFunc_roundf:
8700       case LibFunc_roundl:
8701         if (visitUnaryFloatCall(I, ISD::FROUND))
8702           return;
8703         break;
8704       case LibFunc_trunc:
8705       case LibFunc_truncf:
8706       case LibFunc_truncl:
8707         if (visitUnaryFloatCall(I, ISD::FTRUNC))
8708           return;
8709         break;
8710       case LibFunc_log2:
8711       case LibFunc_log2f:
8712       case LibFunc_log2l:
8713         if (visitUnaryFloatCall(I, ISD::FLOG2))
8714           return;
8715         break;
8716       case LibFunc_exp2:
8717       case LibFunc_exp2f:
8718       case LibFunc_exp2l:
8719         if (visitUnaryFloatCall(I, ISD::FEXP2))
8720           return;
8721         break;
8722       case LibFunc_exp10:
8723       case LibFunc_exp10f:
8724       case LibFunc_exp10l:
8725         if (visitUnaryFloatCall(I, ISD::FEXP10))
8726           return;
8727         break;
8728       case LibFunc_ldexp:
8729       case LibFunc_ldexpf:
8730       case LibFunc_ldexpl:
8731         if (visitBinaryFloatCall(I, ISD::FLDEXP))
8732           return;
8733         break;
8734       case LibFunc_memcmp:
8735         if (visitMemCmpBCmpCall(I))
8736           return;
8737         break;
8738       case LibFunc_mempcpy:
8739         if (visitMemPCpyCall(I))
8740           return;
8741         break;
8742       case LibFunc_memchr:
8743         if (visitMemChrCall(I))
8744           return;
8745         break;
8746       case LibFunc_strcpy:
8747         if (visitStrCpyCall(I, false))
8748           return;
8749         break;
8750       case LibFunc_stpcpy:
8751         if (visitStrCpyCall(I, true))
8752           return;
8753         break;
8754       case LibFunc_strcmp:
8755         if (visitStrCmpCall(I))
8756           return;
8757         break;
8758       case LibFunc_strlen:
8759         if (visitStrLenCall(I))
8760           return;
8761         break;
8762       case LibFunc_strnlen:
8763         if (visitStrNLenCall(I))
8764           return;
8765         break;
8766       }
8767     }
8768   }
8769 
8770   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8771   // have to do anything here to lower funclet bundles.
8772   // CFGuardTarget bundles are lowered in LowerCallTo.
8773   assert(!I.hasOperandBundlesOtherThan(
8774              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8775               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8776               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8777          "Cannot lower calls with arbitrary operand bundles!");
8778 
8779   SDValue Callee = getValue(I.getCalledOperand());
8780 
8781   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8782     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8783   else
8784     // Check if we can potentially perform a tail call. More detailed checking
8785     // is be done within LowerCallTo, after more information about the call is
8786     // known.
8787     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8788 }
8789 
8790 namespace {
8791 
8792 /// AsmOperandInfo - This contains information for each constraint that we are
8793 /// lowering.
8794 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8795 public:
8796   /// CallOperand - If this is the result output operand or a clobber
8797   /// this is null, otherwise it is the incoming operand to the CallInst.
8798   /// This gets modified as the asm is processed.
8799   SDValue CallOperand;
8800 
8801   /// AssignedRegs - If this is a register or register class operand, this
8802   /// contains the set of register corresponding to the operand.
8803   RegsForValue AssignedRegs;
8804 
8805   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8806     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8807   }
8808 
8809   /// Whether or not this operand accesses memory
8810   bool hasMemory(const TargetLowering &TLI) const {
8811     // Indirect operand accesses access memory.
8812     if (isIndirect)
8813       return true;
8814 
8815     for (const auto &Code : Codes)
8816       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8817         return true;
8818 
8819     return false;
8820   }
8821 };
8822 
8823 
8824 } // end anonymous namespace
8825 
8826 /// Make sure that the output operand \p OpInfo and its corresponding input
8827 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
8828 /// out).
8829 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
8830                                SDISelAsmOperandInfo &MatchingOpInfo,
8831                                SelectionDAG &DAG) {
8832   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
8833     return;
8834 
8835   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
8836   const auto &TLI = DAG.getTargetLoweringInfo();
8837 
8838   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
8839       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
8840                                        OpInfo.ConstraintVT);
8841   std::pair<unsigned, const TargetRegisterClass *> InputRC =
8842       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
8843                                        MatchingOpInfo.ConstraintVT);
8844   if ((OpInfo.ConstraintVT.isInteger() !=
8845        MatchingOpInfo.ConstraintVT.isInteger()) ||
8846       (MatchRC.second != InputRC.second)) {
8847     // FIXME: error out in a more elegant fashion
8848     report_fatal_error("Unsupported asm: input constraint"
8849                        " with a matching output constraint of"
8850                        " incompatible type!");
8851   }
8852   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
8853 }
8854 
8855 /// Get a direct memory input to behave well as an indirect operand.
8856 /// This may introduce stores, hence the need for a \p Chain.
8857 /// \return The (possibly updated) chain.
8858 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
8859                                         SDISelAsmOperandInfo &OpInfo,
8860                                         SelectionDAG &DAG) {
8861   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8862 
8863   // If we don't have an indirect input, put it in the constpool if we can,
8864   // otherwise spill it to a stack slot.
8865   // TODO: This isn't quite right. We need to handle these according to
8866   // the addressing mode that the constraint wants. Also, this may take
8867   // an additional register for the computation and we don't want that
8868   // either.
8869 
8870   // If the operand is a float, integer, or vector constant, spill to a
8871   // constant pool entry to get its address.
8872   const Value *OpVal = OpInfo.CallOperandVal;
8873   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
8874       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
8875     OpInfo.CallOperand = DAG.getConstantPool(
8876         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
8877     return Chain;
8878   }
8879 
8880   // Otherwise, create a stack slot and emit a store to it before the asm.
8881   Type *Ty = OpVal->getType();
8882   auto &DL = DAG.getDataLayout();
8883   uint64_t TySize = DL.getTypeAllocSize(Ty);
8884   MachineFunction &MF = DAG.getMachineFunction();
8885   int SSFI = MF.getFrameInfo().CreateStackObject(
8886       TySize, DL.getPrefTypeAlign(Ty), false);
8887   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
8888   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
8889                             MachinePointerInfo::getFixedStack(MF, SSFI),
8890                             TLI.getMemValueType(DL, Ty));
8891   OpInfo.CallOperand = StackSlot;
8892 
8893   return Chain;
8894 }
8895 
8896 /// GetRegistersForValue - Assign registers (virtual or physical) for the
8897 /// specified operand.  We prefer to assign virtual registers, to allow the
8898 /// register allocator to handle the assignment process.  However, if the asm
8899 /// uses features that we can't model on machineinstrs, we have SDISel do the
8900 /// allocation.  This produces generally horrible, but correct, code.
8901 ///
8902 ///   OpInfo describes the operand
8903 ///   RefOpInfo describes the matching operand if any, the operand otherwise
8904 static std::optional<unsigned>
8905 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
8906                      SDISelAsmOperandInfo &OpInfo,
8907                      SDISelAsmOperandInfo &RefOpInfo) {
8908   LLVMContext &Context = *DAG.getContext();
8909   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8910 
8911   MachineFunction &MF = DAG.getMachineFunction();
8912   SmallVector<unsigned, 4> Regs;
8913   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8914 
8915   // No work to do for memory/address operands.
8916   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8917       OpInfo.ConstraintType == TargetLowering::C_Address)
8918     return std::nullopt;
8919 
8920   // If this is a constraint for a single physreg, or a constraint for a
8921   // register class, find it.
8922   unsigned AssignedReg;
8923   const TargetRegisterClass *RC;
8924   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
8925       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
8926   // RC is unset only on failure. Return immediately.
8927   if (!RC)
8928     return std::nullopt;
8929 
8930   // Get the actual register value type.  This is important, because the user
8931   // may have asked for (e.g.) the AX register in i32 type.  We need to
8932   // remember that AX is actually i16 to get the right extension.
8933   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
8934 
8935   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
8936     // If this is an FP operand in an integer register (or visa versa), or more
8937     // generally if the operand value disagrees with the register class we plan
8938     // to stick it in, fix the operand type.
8939     //
8940     // If this is an input value, the bitcast to the new type is done now.
8941     // Bitcast for output value is done at the end of visitInlineAsm().
8942     if ((OpInfo.Type == InlineAsm::isOutput ||
8943          OpInfo.Type == InlineAsm::isInput) &&
8944         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
8945       // Try to convert to the first EVT that the reg class contains.  If the
8946       // types are identical size, use a bitcast to convert (e.g. two differing
8947       // vector types).  Note: output bitcast is done at the end of
8948       // visitInlineAsm().
8949       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
8950         // Exclude indirect inputs while they are unsupported because the code
8951         // to perform the load is missing and thus OpInfo.CallOperand still
8952         // refers to the input address rather than the pointed-to value.
8953         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
8954           OpInfo.CallOperand =
8955               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
8956         OpInfo.ConstraintVT = RegVT;
8957         // If the operand is an FP value and we want it in integer registers,
8958         // use the corresponding integer type. This turns an f64 value into
8959         // i64, which can be passed with two i32 values on a 32-bit machine.
8960       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
8961         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
8962         if (OpInfo.Type == InlineAsm::isInput)
8963           OpInfo.CallOperand =
8964               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
8965         OpInfo.ConstraintVT = VT;
8966       }
8967     }
8968   }
8969 
8970   // No need to allocate a matching input constraint since the constraint it's
8971   // matching to has already been allocated.
8972   if (OpInfo.isMatchingInputConstraint())
8973     return std::nullopt;
8974 
8975   EVT ValueVT = OpInfo.ConstraintVT;
8976   if (OpInfo.ConstraintVT == MVT::Other)
8977     ValueVT = RegVT;
8978 
8979   // Initialize NumRegs.
8980   unsigned NumRegs = 1;
8981   if (OpInfo.ConstraintVT != MVT::Other)
8982     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
8983 
8984   // If this is a constraint for a specific physical register, like {r17},
8985   // assign it now.
8986 
8987   // If this associated to a specific register, initialize iterator to correct
8988   // place. If virtual, make sure we have enough registers
8989 
8990   // Initialize iterator if necessary
8991   TargetRegisterClass::iterator I = RC->begin();
8992   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8993 
8994   // Do not check for single registers.
8995   if (AssignedReg) {
8996     I = std::find(I, RC->end(), AssignedReg);
8997     if (I == RC->end()) {
8998       // RC does not contain the selected register, which indicates a
8999       // mismatch between the register and the required type/bitwidth.
9000       return {AssignedReg};
9001     }
9002   }
9003 
9004   for (; NumRegs; --NumRegs, ++I) {
9005     assert(I != RC->end() && "Ran out of registers to allocate!");
9006     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9007     Regs.push_back(R);
9008   }
9009 
9010   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9011   return std::nullopt;
9012 }
9013 
9014 static unsigned
9015 findMatchingInlineAsmOperand(unsigned OperandNo,
9016                              const std::vector<SDValue> &AsmNodeOperands) {
9017   // Scan until we find the definition we already emitted of this operand.
9018   unsigned CurOp = InlineAsm::Op_FirstOperand;
9019   for (; OperandNo; --OperandNo) {
9020     // Advance to the next operand.
9021     unsigned OpFlag =
9022         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
9023     const InlineAsm::Flag F(OpFlag);
9024     assert(
9025         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9026         "Skipped past definitions?");
9027     CurOp += F.getNumOperandRegisters() + 1;
9028   }
9029   return CurOp;
9030 }
9031 
9032 namespace {
9033 
9034 class ExtraFlags {
9035   unsigned Flags = 0;
9036 
9037 public:
9038   explicit ExtraFlags(const CallBase &Call) {
9039     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9040     if (IA->hasSideEffects())
9041       Flags |= InlineAsm::Extra_HasSideEffects;
9042     if (IA->isAlignStack())
9043       Flags |= InlineAsm::Extra_IsAlignStack;
9044     if (Call.isConvergent())
9045       Flags |= InlineAsm::Extra_IsConvergent;
9046     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9047   }
9048 
9049   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9050     // Ideally, we would only check against memory constraints.  However, the
9051     // meaning of an Other constraint can be target-specific and we can't easily
9052     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9053     // for Other constraints as well.
9054     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9055         OpInfo.ConstraintType == TargetLowering::C_Other) {
9056       if (OpInfo.Type == InlineAsm::isInput)
9057         Flags |= InlineAsm::Extra_MayLoad;
9058       else if (OpInfo.Type == InlineAsm::isOutput)
9059         Flags |= InlineAsm::Extra_MayStore;
9060       else if (OpInfo.Type == InlineAsm::isClobber)
9061         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9062     }
9063   }
9064 
9065   unsigned get() const { return Flags; }
9066 };
9067 
9068 } // end anonymous namespace
9069 
9070 static bool isFunction(SDValue Op) {
9071   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9072     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9073       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9074 
9075       // In normal "call dllimport func" instruction (non-inlineasm) it force
9076       // indirect access by specifing call opcode. And usually specially print
9077       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9078       // not do in this way now. (In fact, this is similar with "Data Access"
9079       // action). So here we ignore dllimport function.
9080       if (Fn && !Fn->hasDLLImportStorageClass())
9081         return true;
9082     }
9083   }
9084   return false;
9085 }
9086 
9087 /// visitInlineAsm - Handle a call to an InlineAsm object.
9088 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9089                                          const BasicBlock *EHPadBB) {
9090   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9091 
9092   /// ConstraintOperands - Information about all of the constraints.
9093   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9094 
9095   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9096   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9097       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9098 
9099   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9100   // AsmDialect, MayLoad, MayStore).
9101   bool HasSideEffect = IA->hasSideEffects();
9102   ExtraFlags ExtraInfo(Call);
9103 
9104   for (auto &T : TargetConstraints) {
9105     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9106     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9107 
9108     if (OpInfo.CallOperandVal)
9109       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9110 
9111     if (!HasSideEffect)
9112       HasSideEffect = OpInfo.hasMemory(TLI);
9113 
9114     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9115     // FIXME: Could we compute this on OpInfo rather than T?
9116 
9117     // Compute the constraint code and ConstraintType to use.
9118     TLI.ComputeConstraintToUse(T, SDValue());
9119 
9120     if (T.ConstraintType == TargetLowering::C_Immediate &&
9121         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9122       // We've delayed emitting a diagnostic like the "n" constraint because
9123       // inlining could cause an integer showing up.
9124       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9125                                           "' expects an integer constant "
9126                                           "expression");
9127 
9128     ExtraInfo.update(T);
9129   }
9130 
9131   // We won't need to flush pending loads if this asm doesn't touch
9132   // memory and is nonvolatile.
9133   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9134 
9135   bool EmitEHLabels = isa<InvokeInst>(Call);
9136   if (EmitEHLabels) {
9137     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9138   }
9139   bool IsCallBr = isa<CallBrInst>(Call);
9140 
9141   if (IsCallBr || EmitEHLabels) {
9142     // If this is a callbr or invoke we need to flush pending exports since
9143     // inlineasm_br and invoke are terminators.
9144     // We need to do this before nodes are glued to the inlineasm_br node.
9145     Chain = getControlRoot();
9146   }
9147 
9148   MCSymbol *BeginLabel = nullptr;
9149   if (EmitEHLabels) {
9150     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9151   }
9152 
9153   int OpNo = -1;
9154   SmallVector<StringRef> AsmStrs;
9155   IA->collectAsmStrs(AsmStrs);
9156 
9157   // Second pass over the constraints: compute which constraint option to use.
9158   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9159     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9160       OpNo++;
9161 
9162     // If this is an output operand with a matching input operand, look up the
9163     // matching input. If their types mismatch, e.g. one is an integer, the
9164     // other is floating point, or their sizes are different, flag it as an
9165     // error.
9166     if (OpInfo.hasMatchingInput()) {
9167       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9168       patchMatchingInput(OpInfo, Input, DAG);
9169     }
9170 
9171     // Compute the constraint code and ConstraintType to use.
9172     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9173 
9174     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9175          OpInfo.Type == InlineAsm::isClobber) ||
9176         OpInfo.ConstraintType == TargetLowering::C_Address)
9177       continue;
9178 
9179     // In Linux PIC model, there are 4 cases about value/label addressing:
9180     //
9181     // 1: Function call or Label jmp inside the module.
9182     // 2: Data access (such as global variable, static variable) inside module.
9183     // 3: Function call or Label jmp outside the module.
9184     // 4: Data access (such as global variable) outside the module.
9185     //
9186     // Due to current llvm inline asm architecture designed to not "recognize"
9187     // the asm code, there are quite troubles for us to treat mem addressing
9188     // differently for same value/adress used in different instuctions.
9189     // For example, in pic model, call a func may in plt way or direclty
9190     // pc-related, but lea/mov a function adress may use got.
9191     //
9192     // Here we try to "recognize" function call for the case 1 and case 3 in
9193     // inline asm. And try to adjust the constraint for them.
9194     //
9195     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9196     // label, so here we don't handle jmp function label now, but we need to
9197     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9198     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9199         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9200         TM.getCodeModel() != CodeModel::Large) {
9201       OpInfo.isIndirect = false;
9202       OpInfo.ConstraintType = TargetLowering::C_Address;
9203     }
9204 
9205     // If this is a memory input, and if the operand is not indirect, do what we
9206     // need to provide an address for the memory input.
9207     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9208         !OpInfo.isIndirect) {
9209       assert((OpInfo.isMultipleAlternative ||
9210               (OpInfo.Type == InlineAsm::isInput)) &&
9211              "Can only indirectify direct input operands!");
9212 
9213       // Memory operands really want the address of the value.
9214       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9215 
9216       // There is no longer a Value* corresponding to this operand.
9217       OpInfo.CallOperandVal = nullptr;
9218 
9219       // It is now an indirect operand.
9220       OpInfo.isIndirect = true;
9221     }
9222 
9223   }
9224 
9225   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9226   std::vector<SDValue> AsmNodeOperands;
9227   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9228   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9229       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9230 
9231   // If we have a !srcloc metadata node associated with it, we want to attach
9232   // this to the ultimately generated inline asm machineinstr.  To do this, we
9233   // pass in the third operand as this (potentially null) inline asm MDNode.
9234   const MDNode *SrcLoc = Call.getMetadata("srcloc");
9235   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9236 
9237   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9238   // bits as operand 3.
9239   AsmNodeOperands.push_back(DAG.getTargetConstant(
9240       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9241 
9242   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9243   // this, assign virtual and physical registers for inputs and otput.
9244   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9245     // Assign Registers.
9246     SDISelAsmOperandInfo &RefOpInfo =
9247         OpInfo.isMatchingInputConstraint()
9248             ? ConstraintOperands[OpInfo.getMatchedOperand()]
9249             : OpInfo;
9250     const auto RegError =
9251         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9252     if (RegError) {
9253       const MachineFunction &MF = DAG.getMachineFunction();
9254       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9255       const char *RegName = TRI.getName(*RegError);
9256       emitInlineAsmError(Call, "register '" + Twine(RegName) +
9257                                    "' allocated for constraint '" +
9258                                    Twine(OpInfo.ConstraintCode) +
9259                                    "' does not match required type");
9260       return;
9261     }
9262 
9263     auto DetectWriteToReservedRegister = [&]() {
9264       const MachineFunction &MF = DAG.getMachineFunction();
9265       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9266       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9267         if (Register::isPhysicalRegister(Reg) &&
9268             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9269           const char *RegName = TRI.getName(Reg);
9270           emitInlineAsmError(Call, "write to reserved register '" +
9271                                        Twine(RegName) + "'");
9272           return true;
9273         }
9274       }
9275       return false;
9276     };
9277     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9278             (OpInfo.Type == InlineAsm::isInput &&
9279              !OpInfo.isMatchingInputConstraint())) &&
9280            "Only address as input operand is allowed.");
9281 
9282     switch (OpInfo.Type) {
9283     case InlineAsm::isOutput:
9284       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9285         const InlineAsm::ConstraintCode ConstraintID =
9286             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9287         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9288                "Failed to convert memory constraint code to constraint id.");
9289 
9290         // Add information to the INLINEASM node to know about this output.
9291         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9292         OpFlags.setMemConstraint(ConstraintID);
9293         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9294                                                         MVT::i32));
9295         AsmNodeOperands.push_back(OpInfo.CallOperand);
9296       } else {
9297         // Otherwise, this outputs to a register (directly for C_Register /
9298         // C_RegisterClass, and a target-defined fashion for
9299         // C_Immediate/C_Other). Find a register that we can use.
9300         if (OpInfo.AssignedRegs.Regs.empty()) {
9301           emitInlineAsmError(
9302               Call, "couldn't allocate output register for constraint '" +
9303                         Twine(OpInfo.ConstraintCode) + "'");
9304           return;
9305         }
9306 
9307         if (DetectWriteToReservedRegister())
9308           return;
9309 
9310         // Add information to the INLINEASM node to know that this register is
9311         // set.
9312         OpInfo.AssignedRegs.AddInlineAsmOperands(
9313             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9314                                   : InlineAsm::Kind::RegDef,
9315             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9316       }
9317       break;
9318 
9319     case InlineAsm::isInput:
9320     case InlineAsm::isLabel: {
9321       SDValue InOperandVal = OpInfo.CallOperand;
9322 
9323       if (OpInfo.isMatchingInputConstraint()) {
9324         // If this is required to match an output register we have already set,
9325         // just use its register.
9326         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9327                                                   AsmNodeOperands);
9328         InlineAsm::Flag Flag(
9329             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
9330         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9331           if (OpInfo.isIndirect) {
9332             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9333             emitInlineAsmError(Call, "inline asm not supported yet: "
9334                                      "don't know how to handle tied "
9335                                      "indirect register inputs");
9336             return;
9337           }
9338 
9339           SmallVector<unsigned, 4> Regs;
9340           MachineFunction &MF = DAG.getMachineFunction();
9341           MachineRegisterInfo &MRI = MF.getRegInfo();
9342           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9343           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9344           Register TiedReg = R->getReg();
9345           MVT RegVT = R->getSimpleValueType(0);
9346           const TargetRegisterClass *RC =
9347               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9348               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9349                                       : TRI.getMinimalPhysRegClass(TiedReg);
9350           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9351             Regs.push_back(MRI.createVirtualRegister(RC));
9352 
9353           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9354 
9355           SDLoc dl = getCurSDLoc();
9356           // Use the produced MatchedRegs object to
9357           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9358           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9359                                            OpInfo.getMatchedOperand(), dl, DAG,
9360                                            AsmNodeOperands);
9361           break;
9362         }
9363 
9364         assert(Flag.isMemKind() && "Unknown matching constraint!");
9365         assert(Flag.getNumOperandRegisters() == 1 &&
9366                "Unexpected number of operands");
9367         // Add information to the INLINEASM node to know about this input.
9368         // See InlineAsm.h isUseOperandTiedToDef.
9369         Flag.clearMemConstraint();
9370         Flag.setMatchingOp(OpInfo.getMatchedOperand());
9371         AsmNodeOperands.push_back(DAG.getTargetConstant(
9372             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9373         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9374         break;
9375       }
9376 
9377       // Treat indirect 'X' constraint as memory.
9378       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9379           OpInfo.isIndirect)
9380         OpInfo.ConstraintType = TargetLowering::C_Memory;
9381 
9382       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9383           OpInfo.ConstraintType == TargetLowering::C_Other) {
9384         std::vector<SDValue> Ops;
9385         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9386                                           Ops, DAG);
9387         if (Ops.empty()) {
9388           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9389             if (isa<ConstantSDNode>(InOperandVal)) {
9390               emitInlineAsmError(Call, "value out of range for constraint '" +
9391                                            Twine(OpInfo.ConstraintCode) + "'");
9392               return;
9393             }
9394 
9395           emitInlineAsmError(Call,
9396                              "invalid operand for inline asm constraint '" +
9397                                  Twine(OpInfo.ConstraintCode) + "'");
9398           return;
9399         }
9400 
9401         // Add information to the INLINEASM node to know about this input.
9402         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9403         AsmNodeOperands.push_back(DAG.getTargetConstant(
9404             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9405         llvm::append_range(AsmNodeOperands, Ops);
9406         break;
9407       }
9408 
9409       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9410         assert((OpInfo.isIndirect ||
9411                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9412                "Operand must be indirect to be a mem!");
9413         assert(InOperandVal.getValueType() ==
9414                    TLI.getPointerTy(DAG.getDataLayout()) &&
9415                "Memory operands expect pointer values");
9416 
9417         const InlineAsm::ConstraintCode ConstraintID =
9418             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9419         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9420                "Failed to convert memory constraint code to constraint id.");
9421 
9422         // Add information to the INLINEASM node to know about this input.
9423         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9424         ResOpType.setMemConstraint(ConstraintID);
9425         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9426                                                         getCurSDLoc(),
9427                                                         MVT::i32));
9428         AsmNodeOperands.push_back(InOperandVal);
9429         break;
9430       }
9431 
9432       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9433         const InlineAsm::ConstraintCode ConstraintID =
9434             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9435         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9436                "Failed to convert memory constraint code to constraint id.");
9437 
9438         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9439 
9440         SDValue AsmOp = InOperandVal;
9441         if (isFunction(InOperandVal)) {
9442           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9443           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9444           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9445                                              InOperandVal.getValueType(),
9446                                              GA->getOffset());
9447         }
9448 
9449         // Add information to the INLINEASM node to know about this input.
9450         ResOpType.setMemConstraint(ConstraintID);
9451 
9452         AsmNodeOperands.push_back(
9453             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9454 
9455         AsmNodeOperands.push_back(AsmOp);
9456         break;
9457       }
9458 
9459       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9460               OpInfo.ConstraintType == TargetLowering::C_Register) &&
9461              "Unknown constraint type!");
9462 
9463       // TODO: Support this.
9464       if (OpInfo.isIndirect) {
9465         emitInlineAsmError(
9466             Call, "Don't know how to handle indirect register inputs yet "
9467                   "for constraint '" +
9468                       Twine(OpInfo.ConstraintCode) + "'");
9469         return;
9470       }
9471 
9472       // Copy the input into the appropriate registers.
9473       if (OpInfo.AssignedRegs.Regs.empty()) {
9474         emitInlineAsmError(Call,
9475                            "couldn't allocate input reg for constraint '" +
9476                                Twine(OpInfo.ConstraintCode) + "'");
9477         return;
9478       }
9479 
9480       if (DetectWriteToReservedRegister())
9481         return;
9482 
9483       SDLoc dl = getCurSDLoc();
9484 
9485       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9486                                         &Call);
9487 
9488       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9489                                                0, dl, DAG, AsmNodeOperands);
9490       break;
9491     }
9492     case InlineAsm::isClobber:
9493       // Add the clobbered value to the operand list, so that the register
9494       // allocator is aware that the physreg got clobbered.
9495       if (!OpInfo.AssignedRegs.Regs.empty())
9496         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9497                                                  false, 0, getCurSDLoc(), DAG,
9498                                                  AsmNodeOperands);
9499       break;
9500     }
9501   }
9502 
9503   // Finish up input operands.  Set the input chain and add the flag last.
9504   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9505   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9506 
9507   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9508   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9509                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9510   Glue = Chain.getValue(1);
9511 
9512   // Do additional work to generate outputs.
9513 
9514   SmallVector<EVT, 1> ResultVTs;
9515   SmallVector<SDValue, 1> ResultValues;
9516   SmallVector<SDValue, 8> OutChains;
9517 
9518   llvm::Type *CallResultType = Call.getType();
9519   ArrayRef<Type *> ResultTypes;
9520   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9521     ResultTypes = StructResult->elements();
9522   else if (!CallResultType->isVoidTy())
9523     ResultTypes = ArrayRef(CallResultType);
9524 
9525   auto CurResultType = ResultTypes.begin();
9526   auto handleRegAssign = [&](SDValue V) {
9527     assert(CurResultType != ResultTypes.end() && "Unexpected value");
9528     assert((*CurResultType)->isSized() && "Unexpected unsized type");
9529     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9530     ++CurResultType;
9531     // If the type of the inline asm call site return value is different but has
9532     // same size as the type of the asm output bitcast it.  One example of this
9533     // is for vectors with different width / number of elements.  This can
9534     // happen for register classes that can contain multiple different value
9535     // types.  The preg or vreg allocated may not have the same VT as was
9536     // expected.
9537     //
9538     // This can also happen for a return value that disagrees with the register
9539     // class it is put in, eg. a double in a general-purpose register on a
9540     // 32-bit machine.
9541     if (ResultVT != V.getValueType() &&
9542         ResultVT.getSizeInBits() == V.getValueSizeInBits())
9543       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9544     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9545              V.getValueType().isInteger()) {
9546       // If a result value was tied to an input value, the computed result
9547       // may have a wider width than the expected result.  Extract the
9548       // relevant portion.
9549       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9550     }
9551     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9552     ResultVTs.push_back(ResultVT);
9553     ResultValues.push_back(V);
9554   };
9555 
9556   // Deal with output operands.
9557   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9558     if (OpInfo.Type == InlineAsm::isOutput) {
9559       SDValue Val;
9560       // Skip trivial output operands.
9561       if (OpInfo.AssignedRegs.Regs.empty())
9562         continue;
9563 
9564       switch (OpInfo.ConstraintType) {
9565       case TargetLowering::C_Register:
9566       case TargetLowering::C_RegisterClass:
9567         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9568                                                   Chain, &Glue, &Call);
9569         break;
9570       case TargetLowering::C_Immediate:
9571       case TargetLowering::C_Other:
9572         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9573                                               OpInfo, DAG);
9574         break;
9575       case TargetLowering::C_Memory:
9576         break; // Already handled.
9577       case TargetLowering::C_Address:
9578         break; // Silence warning.
9579       case TargetLowering::C_Unknown:
9580         assert(false && "Unexpected unknown constraint");
9581       }
9582 
9583       // Indirect output manifest as stores. Record output chains.
9584       if (OpInfo.isIndirect) {
9585         const Value *Ptr = OpInfo.CallOperandVal;
9586         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9587         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9588                                      MachinePointerInfo(Ptr));
9589         OutChains.push_back(Store);
9590       } else {
9591         // generate CopyFromRegs to associated registers.
9592         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9593         if (Val.getOpcode() == ISD::MERGE_VALUES) {
9594           for (const SDValue &V : Val->op_values())
9595             handleRegAssign(V);
9596         } else
9597           handleRegAssign(Val);
9598       }
9599     }
9600   }
9601 
9602   // Set results.
9603   if (!ResultValues.empty()) {
9604     assert(CurResultType == ResultTypes.end() &&
9605            "Mismatch in number of ResultTypes");
9606     assert(ResultValues.size() == ResultTypes.size() &&
9607            "Mismatch in number of output operands in asm result");
9608 
9609     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9610                             DAG.getVTList(ResultVTs), ResultValues);
9611     setValue(&Call, V);
9612   }
9613 
9614   // Collect store chains.
9615   if (!OutChains.empty())
9616     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9617 
9618   if (EmitEHLabels) {
9619     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9620   }
9621 
9622   // Only Update Root if inline assembly has a memory effect.
9623   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9624       EmitEHLabels)
9625     DAG.setRoot(Chain);
9626 }
9627 
9628 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9629                                              const Twine &Message) {
9630   LLVMContext &Ctx = *DAG.getContext();
9631   Ctx.emitError(&Call, Message);
9632 
9633   // Make sure we leave the DAG in a valid state
9634   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9635   SmallVector<EVT, 1> ValueVTs;
9636   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9637 
9638   if (ValueVTs.empty())
9639     return;
9640 
9641   SmallVector<SDValue, 1> Ops;
9642   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9643     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9644 
9645   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9646 }
9647 
9648 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9649   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9650                           MVT::Other, getRoot(),
9651                           getValue(I.getArgOperand(0)),
9652                           DAG.getSrcValue(I.getArgOperand(0))));
9653 }
9654 
9655 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9656   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9657   const DataLayout &DL = DAG.getDataLayout();
9658   SDValue V = DAG.getVAArg(
9659       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9660       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9661       DL.getABITypeAlign(I.getType()).value());
9662   DAG.setRoot(V.getValue(1));
9663 
9664   if (I.getType()->isPointerTy())
9665     V = DAG.getPtrExtOrTrunc(
9666         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9667   setValue(&I, V);
9668 }
9669 
9670 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9671   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9672                           MVT::Other, getRoot(),
9673                           getValue(I.getArgOperand(0)),
9674                           DAG.getSrcValue(I.getArgOperand(0))));
9675 }
9676 
9677 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9678   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9679                           MVT::Other, getRoot(),
9680                           getValue(I.getArgOperand(0)),
9681                           getValue(I.getArgOperand(1)),
9682                           DAG.getSrcValue(I.getArgOperand(0)),
9683                           DAG.getSrcValue(I.getArgOperand(1))));
9684 }
9685 
9686 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9687                                                     const Instruction &I,
9688                                                     SDValue Op) {
9689   const MDNode *Range = getRangeMetadata(I);
9690   if (!Range)
9691     return Op;
9692 
9693   ConstantRange CR = getConstantRangeFromMetadata(*Range);
9694   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9695     return Op;
9696 
9697   APInt Lo = CR.getUnsignedMin();
9698   if (!Lo.isMinValue())
9699     return Op;
9700 
9701   APInt Hi = CR.getUnsignedMax();
9702   unsigned Bits = std::max(Hi.getActiveBits(),
9703                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9704 
9705   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9706 
9707   SDLoc SL = getCurSDLoc();
9708 
9709   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9710                              DAG.getValueType(SmallVT));
9711   unsigned NumVals = Op.getNode()->getNumValues();
9712   if (NumVals == 1)
9713     return ZExt;
9714 
9715   SmallVector<SDValue, 4> Ops;
9716 
9717   Ops.push_back(ZExt);
9718   for (unsigned I = 1; I != NumVals; ++I)
9719     Ops.push_back(Op.getValue(I));
9720 
9721   return DAG.getMergeValues(Ops, SL);
9722 }
9723 
9724 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9725 /// the call being lowered.
9726 ///
9727 /// This is a helper for lowering intrinsics that follow a target calling
9728 /// convention or require stack pointer adjustment. Only a subset of the
9729 /// intrinsic's operands need to participate in the calling convention.
9730 void SelectionDAGBuilder::populateCallLoweringInfo(
9731     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9732     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9733     bool IsPatchPoint) {
9734   TargetLowering::ArgListTy Args;
9735   Args.reserve(NumArgs);
9736 
9737   // Populate the argument list.
9738   // Attributes for args start at offset 1, after the return attribute.
9739   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9740        ArgI != ArgE; ++ArgI) {
9741     const Value *V = Call->getOperand(ArgI);
9742 
9743     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9744 
9745     TargetLowering::ArgListEntry Entry;
9746     Entry.Node = getValue(V);
9747     Entry.Ty = V->getType();
9748     Entry.setAttributes(Call, ArgI);
9749     Args.push_back(Entry);
9750   }
9751 
9752   CLI.setDebugLoc(getCurSDLoc())
9753       .setChain(getRoot())
9754       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
9755       .setDiscardResult(Call->use_empty())
9756       .setIsPatchPoint(IsPatchPoint)
9757       .setIsPreallocated(
9758           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9759 }
9760 
9761 /// Add a stack map intrinsic call's live variable operands to a stackmap
9762 /// or patchpoint target node's operand list.
9763 ///
9764 /// Constants are converted to TargetConstants purely as an optimization to
9765 /// avoid constant materialization and register allocation.
9766 ///
9767 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9768 /// generate addess computation nodes, and so FinalizeISel can convert the
9769 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9770 /// address materialization and register allocation, but may also be required
9771 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9772 /// alloca in the entry block, then the runtime may assume that the alloca's
9773 /// StackMap location can be read immediately after compilation and that the
9774 /// location is valid at any point during execution (this is similar to the
9775 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9776 /// only available in a register, then the runtime would need to trap when
9777 /// execution reaches the StackMap in order to read the alloca's location.
9778 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9779                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9780                                 SelectionDAGBuilder &Builder) {
9781   SelectionDAG &DAG = Builder.DAG;
9782   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9783     SDValue Op = Builder.getValue(Call.getArgOperand(I));
9784 
9785     // Things on the stack are pointer-typed, meaning that they are already
9786     // legal and can be emitted directly to target nodes.
9787     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9788       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9789     } else {
9790       // Otherwise emit a target independent node to be legalised.
9791       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9792     }
9793   }
9794 }
9795 
9796 /// Lower llvm.experimental.stackmap.
9797 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9798   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9799   //                                  [live variables...])
9800 
9801   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
9802 
9803   SDValue Chain, InGlue, Callee;
9804   SmallVector<SDValue, 32> Ops;
9805 
9806   SDLoc DL = getCurSDLoc();
9807   Callee = getValue(CI.getCalledOperand());
9808 
9809   // The stackmap intrinsic only records the live variables (the arguments
9810   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9811   // intrinsic, this won't be lowered to a function call. This means we don't
9812   // have to worry about calling conventions and target specific lowering code.
9813   // Instead we perform the call lowering right here.
9814   //
9815   // chain, flag = CALLSEQ_START(chain, 0, 0)
9816   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9817   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9818   //
9819   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9820   InGlue = Chain.getValue(1);
9821 
9822   // Add the STACKMAP operands, starting with DAG house-keeping.
9823   Ops.push_back(Chain);
9824   Ops.push_back(InGlue);
9825 
9826   // Add the <id>, <numShadowBytes> operands.
9827   //
9828   // These do not require legalisation, and can be emitted directly to target
9829   // constant nodes.
9830   SDValue ID = getValue(CI.getArgOperand(0));
9831   assert(ID.getValueType() == MVT::i64);
9832   SDValue IDConst = DAG.getTargetConstant(
9833       cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
9834   Ops.push_back(IDConst);
9835 
9836   SDValue Shad = getValue(CI.getArgOperand(1));
9837   assert(Shad.getValueType() == MVT::i32);
9838   SDValue ShadConst = DAG.getTargetConstant(
9839       cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
9840   Ops.push_back(ShadConst);
9841 
9842   // Add the live variables.
9843   addStackMapLiveVars(CI, 2, DL, Ops, *this);
9844 
9845   // Create the STACKMAP node.
9846   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9847   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
9848   InGlue = Chain.getValue(1);
9849 
9850   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
9851 
9852   // Stackmaps don't generate values, so nothing goes into the NodeMap.
9853 
9854   // Set the root to the target-lowered call chain.
9855   DAG.setRoot(Chain);
9856 
9857   // Inform the Frame Information that we have a stackmap in this function.
9858   FuncInfo.MF->getFrameInfo().setHasStackMap();
9859 }
9860 
9861 /// Lower llvm.experimental.patchpoint directly to its target opcode.
9862 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
9863                                           const BasicBlock *EHPadBB) {
9864   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
9865   //                                                 i32 <numBytes>,
9866   //                                                 i8* <target>,
9867   //                                                 i32 <numArgs>,
9868   //                                                 [Args...],
9869   //                                                 [live variables...])
9870 
9871   CallingConv::ID CC = CB.getCallingConv();
9872   bool IsAnyRegCC = CC == CallingConv::AnyReg;
9873   bool HasDef = !CB.getType()->isVoidTy();
9874   SDLoc dl = getCurSDLoc();
9875   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
9876 
9877   // Handle immediate and symbolic callees.
9878   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
9879     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
9880                                    /*isTarget=*/true);
9881   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
9882     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
9883                                          SDLoc(SymbolicCallee),
9884                                          SymbolicCallee->getValueType(0));
9885 
9886   // Get the real number of arguments participating in the call <numArgs>
9887   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
9888   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
9889 
9890   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
9891   // Intrinsics include all meta-operands up to but not including CC.
9892   unsigned NumMetaOpers = PatchPointOpers::CCPos;
9893   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
9894          "Not enough arguments provided to the patchpoint intrinsic");
9895 
9896   // For AnyRegCC the arguments are lowered later on manually.
9897   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
9898   Type *ReturnTy =
9899       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
9900 
9901   TargetLowering::CallLoweringInfo CLI(DAG);
9902   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
9903                            ReturnTy, true);
9904   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
9905 
9906   SDNode *CallEnd = Result.second.getNode();
9907   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
9908     CallEnd = CallEnd->getOperand(0).getNode();
9909 
9910   /// Get a call instruction from the call sequence chain.
9911   /// Tail calls are not allowed.
9912   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
9913          "Expected a callseq node.");
9914   SDNode *Call = CallEnd->getOperand(0).getNode();
9915   bool HasGlue = Call->getGluedNode();
9916 
9917   // Replace the target specific call node with the patchable intrinsic.
9918   SmallVector<SDValue, 8> Ops;
9919 
9920   // Push the chain.
9921   Ops.push_back(*(Call->op_begin()));
9922 
9923   // Optionally, push the glue (if any).
9924   if (HasGlue)
9925     Ops.push_back(*(Call->op_end() - 1));
9926 
9927   // Push the register mask info.
9928   if (HasGlue)
9929     Ops.push_back(*(Call->op_end() - 2));
9930   else
9931     Ops.push_back(*(Call->op_end() - 1));
9932 
9933   // Add the <id> and <numBytes> constants.
9934   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
9935   Ops.push_back(DAG.getTargetConstant(
9936                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
9937   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
9938   Ops.push_back(DAG.getTargetConstant(
9939                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
9940                   MVT::i32));
9941 
9942   // Add the callee.
9943   Ops.push_back(Callee);
9944 
9945   // Adjust <numArgs> to account for any arguments that have been passed on the
9946   // stack instead.
9947   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
9948   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
9949   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
9950   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
9951 
9952   // Add the calling convention
9953   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
9954 
9955   // Add the arguments we omitted previously. The register allocator should
9956   // place these in any free register.
9957   if (IsAnyRegCC)
9958     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
9959       Ops.push_back(getValue(CB.getArgOperand(i)));
9960 
9961   // Push the arguments from the call instruction.
9962   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
9963   Ops.append(Call->op_begin() + 2, e);
9964 
9965   // Push live variables for the stack map.
9966   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
9967 
9968   SDVTList NodeTys;
9969   if (IsAnyRegCC && HasDef) {
9970     // Create the return types based on the intrinsic definition
9971     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9972     SmallVector<EVT, 3> ValueVTs;
9973     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
9974     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
9975 
9976     // There is always a chain and a glue type at the end
9977     ValueVTs.push_back(MVT::Other);
9978     ValueVTs.push_back(MVT::Glue);
9979     NodeTys = DAG.getVTList(ValueVTs);
9980   } else
9981     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9982 
9983   // Replace the target specific call node with a PATCHPOINT node.
9984   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
9985 
9986   // Update the NodeMap.
9987   if (HasDef) {
9988     if (IsAnyRegCC)
9989       setValue(&CB, SDValue(PPV.getNode(), 0));
9990     else
9991       setValue(&CB, Result.first);
9992   }
9993 
9994   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
9995   // call sequence. Furthermore the location of the chain and glue can change
9996   // when the AnyReg calling convention is used and the intrinsic returns a
9997   // value.
9998   if (IsAnyRegCC && HasDef) {
9999     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10000     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10001     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10002   } else
10003     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10004   DAG.DeleteNode(Call);
10005 
10006   // Inform the Frame Information that we have a patchpoint in this function.
10007   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10008 }
10009 
10010 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10011                                             unsigned Intrinsic) {
10012   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10013   SDValue Op1 = getValue(I.getArgOperand(0));
10014   SDValue Op2;
10015   if (I.arg_size() > 1)
10016     Op2 = getValue(I.getArgOperand(1));
10017   SDLoc dl = getCurSDLoc();
10018   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10019   SDValue Res;
10020   SDNodeFlags SDFlags;
10021   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10022     SDFlags.copyFMF(*FPMO);
10023 
10024   switch (Intrinsic) {
10025   case Intrinsic::vector_reduce_fadd:
10026     if (SDFlags.hasAllowReassociation())
10027       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10028                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10029                         SDFlags);
10030     else
10031       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10032     break;
10033   case Intrinsic::vector_reduce_fmul:
10034     if (SDFlags.hasAllowReassociation())
10035       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10036                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10037                         SDFlags);
10038     else
10039       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10040     break;
10041   case Intrinsic::vector_reduce_add:
10042     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10043     break;
10044   case Intrinsic::vector_reduce_mul:
10045     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10046     break;
10047   case Intrinsic::vector_reduce_and:
10048     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10049     break;
10050   case Intrinsic::vector_reduce_or:
10051     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10052     break;
10053   case Intrinsic::vector_reduce_xor:
10054     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10055     break;
10056   case Intrinsic::vector_reduce_smax:
10057     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10058     break;
10059   case Intrinsic::vector_reduce_smin:
10060     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10061     break;
10062   case Intrinsic::vector_reduce_umax:
10063     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10064     break;
10065   case Intrinsic::vector_reduce_umin:
10066     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10067     break;
10068   case Intrinsic::vector_reduce_fmax:
10069     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10070     break;
10071   case Intrinsic::vector_reduce_fmin:
10072     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10073     break;
10074   case Intrinsic::vector_reduce_fmaximum:
10075     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10076     break;
10077   case Intrinsic::vector_reduce_fminimum:
10078     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10079     break;
10080   default:
10081     llvm_unreachable("Unhandled vector reduce intrinsic");
10082   }
10083   setValue(&I, Res);
10084 }
10085 
10086 /// Returns an AttributeList representing the attributes applied to the return
10087 /// value of the given call.
10088 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10089   SmallVector<Attribute::AttrKind, 2> Attrs;
10090   if (CLI.RetSExt)
10091     Attrs.push_back(Attribute::SExt);
10092   if (CLI.RetZExt)
10093     Attrs.push_back(Attribute::ZExt);
10094   if (CLI.IsInReg)
10095     Attrs.push_back(Attribute::InReg);
10096 
10097   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10098                             Attrs);
10099 }
10100 
10101 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10102 /// implementation, which just calls LowerCall.
10103 /// FIXME: When all targets are
10104 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10105 std::pair<SDValue, SDValue>
10106 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10107   // Handle the incoming return values from the call.
10108   CLI.Ins.clear();
10109   Type *OrigRetTy = CLI.RetTy;
10110   SmallVector<EVT, 4> RetTys;
10111   SmallVector<uint64_t, 4> Offsets;
10112   auto &DL = CLI.DAG.getDataLayout();
10113   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10114 
10115   if (CLI.IsPostTypeLegalization) {
10116     // If we are lowering a libcall after legalization, split the return type.
10117     SmallVector<EVT, 4> OldRetTys;
10118     SmallVector<uint64_t, 4> OldOffsets;
10119     RetTys.swap(OldRetTys);
10120     Offsets.swap(OldOffsets);
10121 
10122     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10123       EVT RetVT = OldRetTys[i];
10124       uint64_t Offset = OldOffsets[i];
10125       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10126       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10127       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10128       RetTys.append(NumRegs, RegisterVT);
10129       for (unsigned j = 0; j != NumRegs; ++j)
10130         Offsets.push_back(Offset + j * RegisterVTByteSZ);
10131     }
10132   }
10133 
10134   SmallVector<ISD::OutputArg, 4> Outs;
10135   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10136 
10137   bool CanLowerReturn =
10138       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10139                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10140 
10141   SDValue DemoteStackSlot;
10142   int DemoteStackIdx = -100;
10143   if (!CanLowerReturn) {
10144     // FIXME: equivalent assert?
10145     // assert(!CS.hasInAllocaArgument() &&
10146     //        "sret demotion is incompatible with inalloca");
10147     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10148     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10149     MachineFunction &MF = CLI.DAG.getMachineFunction();
10150     DemoteStackIdx =
10151         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10152     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10153                                               DL.getAllocaAddrSpace());
10154 
10155     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10156     ArgListEntry Entry;
10157     Entry.Node = DemoteStackSlot;
10158     Entry.Ty = StackSlotPtrType;
10159     Entry.IsSExt = false;
10160     Entry.IsZExt = false;
10161     Entry.IsInReg = false;
10162     Entry.IsSRet = true;
10163     Entry.IsNest = false;
10164     Entry.IsByVal = false;
10165     Entry.IsByRef = false;
10166     Entry.IsReturned = false;
10167     Entry.IsSwiftSelf = false;
10168     Entry.IsSwiftAsync = false;
10169     Entry.IsSwiftError = false;
10170     Entry.IsCFGuardTarget = false;
10171     Entry.Alignment = Alignment;
10172     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10173     CLI.NumFixedArgs += 1;
10174     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10175     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10176 
10177     // sret demotion isn't compatible with tail-calls, since the sret argument
10178     // points into the callers stack frame.
10179     CLI.IsTailCall = false;
10180   } else {
10181     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10182         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10183     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10184       ISD::ArgFlagsTy Flags;
10185       if (NeedsRegBlock) {
10186         Flags.setInConsecutiveRegs();
10187         if (I == RetTys.size() - 1)
10188           Flags.setInConsecutiveRegsLast();
10189       }
10190       EVT VT = RetTys[I];
10191       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10192                                                      CLI.CallConv, VT);
10193       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10194                                                        CLI.CallConv, VT);
10195       for (unsigned i = 0; i != NumRegs; ++i) {
10196         ISD::InputArg MyFlags;
10197         MyFlags.Flags = Flags;
10198         MyFlags.VT = RegisterVT;
10199         MyFlags.ArgVT = VT;
10200         MyFlags.Used = CLI.IsReturnValueUsed;
10201         if (CLI.RetTy->isPointerTy()) {
10202           MyFlags.Flags.setPointer();
10203           MyFlags.Flags.setPointerAddrSpace(
10204               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10205         }
10206         if (CLI.RetSExt)
10207           MyFlags.Flags.setSExt();
10208         if (CLI.RetZExt)
10209           MyFlags.Flags.setZExt();
10210         if (CLI.IsInReg)
10211           MyFlags.Flags.setInReg();
10212         CLI.Ins.push_back(MyFlags);
10213       }
10214     }
10215   }
10216 
10217   // We push in swifterror return as the last element of CLI.Ins.
10218   ArgListTy &Args = CLI.getArgs();
10219   if (supportSwiftError()) {
10220     for (const ArgListEntry &Arg : Args) {
10221       if (Arg.IsSwiftError) {
10222         ISD::InputArg MyFlags;
10223         MyFlags.VT = getPointerTy(DL);
10224         MyFlags.ArgVT = EVT(getPointerTy(DL));
10225         MyFlags.Flags.setSwiftError();
10226         CLI.Ins.push_back(MyFlags);
10227       }
10228     }
10229   }
10230 
10231   // Handle all of the outgoing arguments.
10232   CLI.Outs.clear();
10233   CLI.OutVals.clear();
10234   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10235     SmallVector<EVT, 4> ValueVTs;
10236     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10237     // FIXME: Split arguments if CLI.IsPostTypeLegalization
10238     Type *FinalType = Args[i].Ty;
10239     if (Args[i].IsByVal)
10240       FinalType = Args[i].IndirectType;
10241     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10242         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10243     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10244          ++Value) {
10245       EVT VT = ValueVTs[Value];
10246       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10247       SDValue Op = SDValue(Args[i].Node.getNode(),
10248                            Args[i].Node.getResNo() + Value);
10249       ISD::ArgFlagsTy Flags;
10250 
10251       // Certain targets (such as MIPS), may have a different ABI alignment
10252       // for a type depending on the context. Give the target a chance to
10253       // specify the alignment it wants.
10254       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10255       Flags.setOrigAlign(OriginalAlignment);
10256 
10257       if (Args[i].Ty->isPointerTy()) {
10258         Flags.setPointer();
10259         Flags.setPointerAddrSpace(
10260             cast<PointerType>(Args[i].Ty)->getAddressSpace());
10261       }
10262       if (Args[i].IsZExt)
10263         Flags.setZExt();
10264       if (Args[i].IsSExt)
10265         Flags.setSExt();
10266       if (Args[i].IsInReg) {
10267         // If we are using vectorcall calling convention, a structure that is
10268         // passed InReg - is surely an HVA
10269         if (CLI.CallConv == CallingConv::X86_VectorCall &&
10270             isa<StructType>(FinalType)) {
10271           // The first value of a structure is marked
10272           if (0 == Value)
10273             Flags.setHvaStart();
10274           Flags.setHva();
10275         }
10276         // Set InReg Flag
10277         Flags.setInReg();
10278       }
10279       if (Args[i].IsSRet)
10280         Flags.setSRet();
10281       if (Args[i].IsSwiftSelf)
10282         Flags.setSwiftSelf();
10283       if (Args[i].IsSwiftAsync)
10284         Flags.setSwiftAsync();
10285       if (Args[i].IsSwiftError)
10286         Flags.setSwiftError();
10287       if (Args[i].IsCFGuardTarget)
10288         Flags.setCFGuardTarget();
10289       if (Args[i].IsByVal)
10290         Flags.setByVal();
10291       if (Args[i].IsByRef)
10292         Flags.setByRef();
10293       if (Args[i].IsPreallocated) {
10294         Flags.setPreallocated();
10295         // Set the byval flag for CCAssignFn callbacks that don't know about
10296         // preallocated.  This way we can know how many bytes we should've
10297         // allocated and how many bytes a callee cleanup function will pop.  If
10298         // we port preallocated to more targets, we'll have to add custom
10299         // preallocated handling in the various CC lowering callbacks.
10300         Flags.setByVal();
10301       }
10302       if (Args[i].IsInAlloca) {
10303         Flags.setInAlloca();
10304         // Set the byval flag for CCAssignFn callbacks that don't know about
10305         // inalloca.  This way we can know how many bytes we should've allocated
10306         // and how many bytes a callee cleanup function will pop.  If we port
10307         // inalloca to more targets, we'll have to add custom inalloca handling
10308         // in the various CC lowering callbacks.
10309         Flags.setByVal();
10310       }
10311       Align MemAlign;
10312       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10313         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10314         Flags.setByValSize(FrameSize);
10315 
10316         // info is not there but there are cases it cannot get right.
10317         if (auto MA = Args[i].Alignment)
10318           MemAlign = *MA;
10319         else
10320           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10321       } else if (auto MA = Args[i].Alignment) {
10322         MemAlign = *MA;
10323       } else {
10324         MemAlign = OriginalAlignment;
10325       }
10326       Flags.setMemAlign(MemAlign);
10327       if (Args[i].IsNest)
10328         Flags.setNest();
10329       if (NeedsRegBlock)
10330         Flags.setInConsecutiveRegs();
10331 
10332       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10333                                                  CLI.CallConv, VT);
10334       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10335                                                         CLI.CallConv, VT);
10336       SmallVector<SDValue, 4> Parts(NumParts);
10337       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10338 
10339       if (Args[i].IsSExt)
10340         ExtendKind = ISD::SIGN_EXTEND;
10341       else if (Args[i].IsZExt)
10342         ExtendKind = ISD::ZERO_EXTEND;
10343 
10344       // Conservatively only handle 'returned' on non-vectors that can be lowered,
10345       // for now.
10346       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10347           CanLowerReturn) {
10348         assert((CLI.RetTy == Args[i].Ty ||
10349                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10350                  CLI.RetTy->getPointerAddressSpace() ==
10351                      Args[i].Ty->getPointerAddressSpace())) &&
10352                RetTys.size() == NumValues && "unexpected use of 'returned'");
10353         // Before passing 'returned' to the target lowering code, ensure that
10354         // either the register MVT and the actual EVT are the same size or that
10355         // the return value and argument are extended in the same way; in these
10356         // cases it's safe to pass the argument register value unchanged as the
10357         // return register value (although it's at the target's option whether
10358         // to do so)
10359         // TODO: allow code generation to take advantage of partially preserved
10360         // registers rather than clobbering the entire register when the
10361         // parameter extension method is not compatible with the return
10362         // extension method
10363         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10364             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10365              CLI.RetZExt == Args[i].IsZExt))
10366           Flags.setReturned();
10367       }
10368 
10369       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10370                      CLI.CallConv, ExtendKind);
10371 
10372       for (unsigned j = 0; j != NumParts; ++j) {
10373         // if it isn't first piece, alignment must be 1
10374         // For scalable vectors the scalable part is currently handled
10375         // by individual targets, so we just use the known minimum size here.
10376         ISD::OutputArg MyFlags(
10377             Flags, Parts[j].getValueType().getSimpleVT(), VT,
10378             i < CLI.NumFixedArgs, i,
10379             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10380         if (NumParts > 1 && j == 0)
10381           MyFlags.Flags.setSplit();
10382         else if (j != 0) {
10383           MyFlags.Flags.setOrigAlign(Align(1));
10384           if (j == NumParts - 1)
10385             MyFlags.Flags.setSplitEnd();
10386         }
10387 
10388         CLI.Outs.push_back(MyFlags);
10389         CLI.OutVals.push_back(Parts[j]);
10390       }
10391 
10392       if (NeedsRegBlock && Value == NumValues - 1)
10393         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10394     }
10395   }
10396 
10397   SmallVector<SDValue, 4> InVals;
10398   CLI.Chain = LowerCall(CLI, InVals);
10399 
10400   // Update CLI.InVals to use outside of this function.
10401   CLI.InVals = InVals;
10402 
10403   // Verify that the target's LowerCall behaved as expected.
10404   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10405          "LowerCall didn't return a valid chain!");
10406   assert((!CLI.IsTailCall || InVals.empty()) &&
10407          "LowerCall emitted a return value for a tail call!");
10408   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10409          "LowerCall didn't emit the correct number of values!");
10410 
10411   // For a tail call, the return value is merely live-out and there aren't
10412   // any nodes in the DAG representing it. Return a special value to
10413   // indicate that a tail call has been emitted and no more Instructions
10414   // should be processed in the current block.
10415   if (CLI.IsTailCall) {
10416     CLI.DAG.setRoot(CLI.Chain);
10417     return std::make_pair(SDValue(), SDValue());
10418   }
10419 
10420 #ifndef NDEBUG
10421   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10422     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10423     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10424            "LowerCall emitted a value with the wrong type!");
10425   }
10426 #endif
10427 
10428   SmallVector<SDValue, 4> ReturnValues;
10429   if (!CanLowerReturn) {
10430     // The instruction result is the result of loading from the
10431     // hidden sret parameter.
10432     SmallVector<EVT, 1> PVTs;
10433     Type *PtrRetTy =
10434         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10435 
10436     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10437     assert(PVTs.size() == 1 && "Pointers should fit in one register");
10438     EVT PtrVT = PVTs[0];
10439 
10440     unsigned NumValues = RetTys.size();
10441     ReturnValues.resize(NumValues);
10442     SmallVector<SDValue, 4> Chains(NumValues);
10443 
10444     // An aggregate return value cannot wrap around the address space, so
10445     // offsets to its parts don't wrap either.
10446     SDNodeFlags Flags;
10447     Flags.setNoUnsignedWrap(true);
10448 
10449     MachineFunction &MF = CLI.DAG.getMachineFunction();
10450     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10451     for (unsigned i = 0; i < NumValues; ++i) {
10452       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10453                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
10454                                                         PtrVT), Flags);
10455       SDValue L = CLI.DAG.getLoad(
10456           RetTys[i], CLI.DL, CLI.Chain, Add,
10457           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10458                                             DemoteStackIdx, Offsets[i]),
10459           HiddenSRetAlign);
10460       ReturnValues[i] = L;
10461       Chains[i] = L.getValue(1);
10462     }
10463 
10464     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10465   } else {
10466     // Collect the legal value parts into potentially illegal values
10467     // that correspond to the original function's return values.
10468     std::optional<ISD::NodeType> AssertOp;
10469     if (CLI.RetSExt)
10470       AssertOp = ISD::AssertSext;
10471     else if (CLI.RetZExt)
10472       AssertOp = ISD::AssertZext;
10473     unsigned CurReg = 0;
10474     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10475       EVT VT = RetTys[I];
10476       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10477                                                      CLI.CallConv, VT);
10478       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10479                                                        CLI.CallConv, VT);
10480 
10481       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
10482                                               NumRegs, RegisterVT, VT, nullptr,
10483                                               CLI.CallConv, AssertOp));
10484       CurReg += NumRegs;
10485     }
10486 
10487     // For a function returning void, there is no return value. We can't create
10488     // such a node, so we just return a null return value in that case. In
10489     // that case, nothing will actually look at the value.
10490     if (ReturnValues.empty())
10491       return std::make_pair(SDValue(), CLI.Chain);
10492   }
10493 
10494   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10495                                 CLI.DAG.getVTList(RetTys), ReturnValues);
10496   return std::make_pair(Res, CLI.Chain);
10497 }
10498 
10499 /// Places new result values for the node in Results (their number
10500 /// and types must exactly match those of the original return values of
10501 /// the node), or leaves Results empty, which indicates that the node is not
10502 /// to be custom lowered after all.
10503 void TargetLowering::LowerOperationWrapper(SDNode *N,
10504                                            SmallVectorImpl<SDValue> &Results,
10505                                            SelectionDAG &DAG) const {
10506   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10507 
10508   if (!Res.getNode())
10509     return;
10510 
10511   // If the original node has one result, take the return value from
10512   // LowerOperation as is. It might not be result number 0.
10513   if (N->getNumValues() == 1) {
10514     Results.push_back(Res);
10515     return;
10516   }
10517 
10518   // If the original node has multiple results, then the return node should
10519   // have the same number of results.
10520   assert((N->getNumValues() == Res->getNumValues()) &&
10521       "Lowering returned the wrong number of results!");
10522 
10523   // Places new result values base on N result number.
10524   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10525     Results.push_back(Res.getValue(I));
10526 }
10527 
10528 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10529   llvm_unreachable("LowerOperation not implemented for this target!");
10530 }
10531 
10532 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10533                                                      unsigned Reg,
10534                                                      ISD::NodeType ExtendType) {
10535   SDValue Op = getNonRegisterValue(V);
10536   assert((Op.getOpcode() != ISD::CopyFromReg ||
10537           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10538          "Copy from a reg to the same reg!");
10539   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10540 
10541   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10542   // If this is an InlineAsm we have to match the registers required, not the
10543   // notional registers required by the type.
10544 
10545   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10546                    std::nullopt); // This is not an ABI copy.
10547   SDValue Chain = DAG.getEntryNode();
10548 
10549   if (ExtendType == ISD::ANY_EXTEND) {
10550     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10551     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10552       ExtendType = PreferredExtendIt->second;
10553   }
10554   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10555   PendingExports.push_back(Chain);
10556 }
10557 
10558 #include "llvm/CodeGen/SelectionDAGISel.h"
10559 
10560 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10561 /// entry block, return true.  This includes arguments used by switches, since
10562 /// the switch may expand into multiple basic blocks.
10563 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10564   // With FastISel active, we may be splitting blocks, so force creation
10565   // of virtual registers for all non-dead arguments.
10566   if (FastISel)
10567     return A->use_empty();
10568 
10569   const BasicBlock &Entry = A->getParent()->front();
10570   for (const User *U : A->users())
10571     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10572       return false;  // Use not in entry block.
10573 
10574   return true;
10575 }
10576 
10577 using ArgCopyElisionMapTy =
10578     DenseMap<const Argument *,
10579              std::pair<const AllocaInst *, const StoreInst *>>;
10580 
10581 /// Scan the entry block of the function in FuncInfo for arguments that look
10582 /// like copies into a local alloca. Record any copied arguments in
10583 /// ArgCopyElisionCandidates.
10584 static void
10585 findArgumentCopyElisionCandidates(const DataLayout &DL,
10586                                   FunctionLoweringInfo *FuncInfo,
10587                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10588   // Record the state of every static alloca used in the entry block. Argument
10589   // allocas are all used in the entry block, so we need approximately as many
10590   // entries as we have arguments.
10591   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10592   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10593   unsigned NumArgs = FuncInfo->Fn->arg_size();
10594   StaticAllocas.reserve(NumArgs * 2);
10595 
10596   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10597     if (!V)
10598       return nullptr;
10599     V = V->stripPointerCasts();
10600     const auto *AI = dyn_cast<AllocaInst>(V);
10601     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10602       return nullptr;
10603     auto Iter = StaticAllocas.insert({AI, Unknown});
10604     return &Iter.first->second;
10605   };
10606 
10607   // Look for stores of arguments to static allocas. Look through bitcasts and
10608   // GEPs to handle type coercions, as long as the alloca is fully initialized
10609   // by the store. Any non-store use of an alloca escapes it and any subsequent
10610   // unanalyzed store might write it.
10611   // FIXME: Handle structs initialized with multiple stores.
10612   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10613     // Look for stores, and handle non-store uses conservatively.
10614     const auto *SI = dyn_cast<StoreInst>(&I);
10615     if (!SI) {
10616       // We will look through cast uses, so ignore them completely.
10617       if (I.isCast())
10618         continue;
10619       // Ignore debug info and pseudo op intrinsics, they don't escape or store
10620       // to allocas.
10621       if (I.isDebugOrPseudoInst())
10622         continue;
10623       // This is an unknown instruction. Assume it escapes or writes to all
10624       // static alloca operands.
10625       for (const Use &U : I.operands()) {
10626         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10627           *Info = StaticAllocaInfo::Clobbered;
10628       }
10629       continue;
10630     }
10631 
10632     // If the stored value is a static alloca, mark it as escaped.
10633     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10634       *Info = StaticAllocaInfo::Clobbered;
10635 
10636     // Check if the destination is a static alloca.
10637     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10638     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10639     if (!Info)
10640       continue;
10641     const AllocaInst *AI = cast<AllocaInst>(Dst);
10642 
10643     // Skip allocas that have been initialized or clobbered.
10644     if (*Info != StaticAllocaInfo::Unknown)
10645       continue;
10646 
10647     // Check if the stored value is an argument, and that this store fully
10648     // initializes the alloca.
10649     // If the argument type has padding bits we can't directly forward a pointer
10650     // as the upper bits may contain garbage.
10651     // Don't elide copies from the same argument twice.
10652     const Value *Val = SI->getValueOperand()->stripPointerCasts();
10653     const auto *Arg = dyn_cast<Argument>(Val);
10654     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10655         Arg->getType()->isEmptyTy() ||
10656         DL.getTypeStoreSize(Arg->getType()) !=
10657             DL.getTypeAllocSize(AI->getAllocatedType()) ||
10658         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10659         ArgCopyElisionCandidates.count(Arg)) {
10660       *Info = StaticAllocaInfo::Clobbered;
10661       continue;
10662     }
10663 
10664     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10665                       << '\n');
10666 
10667     // Mark this alloca and store for argument copy elision.
10668     *Info = StaticAllocaInfo::Elidable;
10669     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10670 
10671     // Stop scanning if we've seen all arguments. This will happen early in -O0
10672     // builds, which is useful, because -O0 builds have large entry blocks and
10673     // many allocas.
10674     if (ArgCopyElisionCandidates.size() == NumArgs)
10675       break;
10676   }
10677 }
10678 
10679 /// Try to elide argument copies from memory into a local alloca. Succeeds if
10680 /// ArgVal is a load from a suitable fixed stack object.
10681 static void tryToElideArgumentCopy(
10682     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10683     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10684     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10685     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10686     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10687   // Check if this is a load from a fixed stack object.
10688   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10689   if (!LNode)
10690     return;
10691   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10692   if (!FINode)
10693     return;
10694 
10695   // Check that the fixed stack object is the right size and alignment.
10696   // Look at the alignment that the user wrote on the alloca instead of looking
10697   // at the stack object.
10698   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10699   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10700   const AllocaInst *AI = ArgCopyIter->second.first;
10701   int FixedIndex = FINode->getIndex();
10702   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10703   int OldIndex = AllocaIndex;
10704   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10705   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10706     LLVM_DEBUG(
10707         dbgs() << "  argument copy elision failed due to bad fixed stack "
10708                   "object size\n");
10709     return;
10710   }
10711   Align RequiredAlignment = AI->getAlign();
10712   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10713     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10714                          "greater than stack argument alignment ("
10715                       << DebugStr(RequiredAlignment) << " vs "
10716                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10717     return;
10718   }
10719 
10720   // Perform the elision. Delete the old stack object and replace its only use
10721   // in the variable info map. Mark the stack object as mutable.
10722   LLVM_DEBUG({
10723     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10724            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10725            << '\n';
10726   });
10727   MFI.RemoveStackObject(OldIndex);
10728   MFI.setIsImmutableObjectIndex(FixedIndex, false);
10729   AllocaIndex = FixedIndex;
10730   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10731   for (SDValue ArgVal : ArgVals)
10732     Chains.push_back(ArgVal.getValue(1));
10733 
10734   // Avoid emitting code for the store implementing the copy.
10735   const StoreInst *SI = ArgCopyIter->second.second;
10736   ElidedArgCopyInstrs.insert(SI);
10737 
10738   // Check for uses of the argument again so that we can avoid exporting ArgVal
10739   // if it is't used by anything other than the store.
10740   for (const Value *U : Arg.users()) {
10741     if (U != SI) {
10742       ArgHasUses = true;
10743       break;
10744     }
10745   }
10746 }
10747 
10748 void SelectionDAGISel::LowerArguments(const Function &F) {
10749   SelectionDAG &DAG = SDB->DAG;
10750   SDLoc dl = SDB->getCurSDLoc();
10751   const DataLayout &DL = DAG.getDataLayout();
10752   SmallVector<ISD::InputArg, 16> Ins;
10753 
10754   // In Naked functions we aren't going to save any registers.
10755   if (F.hasFnAttribute(Attribute::Naked))
10756     return;
10757 
10758   if (!FuncInfo->CanLowerReturn) {
10759     // Put in an sret pointer parameter before all the other parameters.
10760     SmallVector<EVT, 1> ValueVTs;
10761     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10762                     PointerType::get(F.getContext(),
10763                                      DAG.getDataLayout().getAllocaAddrSpace()),
10764                     ValueVTs);
10765 
10766     // NOTE: Assuming that a pointer will never break down to more than one VT
10767     // or one register.
10768     ISD::ArgFlagsTy Flags;
10769     Flags.setSRet();
10770     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10771     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10772                          ISD::InputArg::NoArgIndex, 0);
10773     Ins.push_back(RetArg);
10774   }
10775 
10776   // Look for stores of arguments to static allocas. Mark such arguments with a
10777   // flag to ask the target to give us the memory location of that argument if
10778   // available.
10779   ArgCopyElisionMapTy ArgCopyElisionCandidates;
10780   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10781                                     ArgCopyElisionCandidates);
10782 
10783   // Set up the incoming argument description vector.
10784   for (const Argument &Arg : F.args()) {
10785     unsigned ArgNo = Arg.getArgNo();
10786     SmallVector<EVT, 4> ValueVTs;
10787     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10788     bool isArgValueUsed = !Arg.use_empty();
10789     unsigned PartBase = 0;
10790     Type *FinalType = Arg.getType();
10791     if (Arg.hasAttribute(Attribute::ByVal))
10792       FinalType = Arg.getParamByValType();
10793     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10794         FinalType, F.getCallingConv(), F.isVarArg(), DL);
10795     for (unsigned Value = 0, NumValues = ValueVTs.size();
10796          Value != NumValues; ++Value) {
10797       EVT VT = ValueVTs[Value];
10798       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10799       ISD::ArgFlagsTy Flags;
10800 
10801 
10802       if (Arg.getType()->isPointerTy()) {
10803         Flags.setPointer();
10804         Flags.setPointerAddrSpace(
10805             cast<PointerType>(Arg.getType())->getAddressSpace());
10806       }
10807       if (Arg.hasAttribute(Attribute::ZExt))
10808         Flags.setZExt();
10809       if (Arg.hasAttribute(Attribute::SExt))
10810         Flags.setSExt();
10811       if (Arg.hasAttribute(Attribute::InReg)) {
10812         // If we are using vectorcall calling convention, a structure that is
10813         // passed InReg - is surely an HVA
10814         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10815             isa<StructType>(Arg.getType())) {
10816           // The first value of a structure is marked
10817           if (0 == Value)
10818             Flags.setHvaStart();
10819           Flags.setHva();
10820         }
10821         // Set InReg Flag
10822         Flags.setInReg();
10823       }
10824       if (Arg.hasAttribute(Attribute::StructRet))
10825         Flags.setSRet();
10826       if (Arg.hasAttribute(Attribute::SwiftSelf))
10827         Flags.setSwiftSelf();
10828       if (Arg.hasAttribute(Attribute::SwiftAsync))
10829         Flags.setSwiftAsync();
10830       if (Arg.hasAttribute(Attribute::SwiftError))
10831         Flags.setSwiftError();
10832       if (Arg.hasAttribute(Attribute::ByVal))
10833         Flags.setByVal();
10834       if (Arg.hasAttribute(Attribute::ByRef))
10835         Flags.setByRef();
10836       if (Arg.hasAttribute(Attribute::InAlloca)) {
10837         Flags.setInAlloca();
10838         // Set the byval flag for CCAssignFn callbacks that don't know about
10839         // inalloca.  This way we can know how many bytes we should've allocated
10840         // and how many bytes a callee cleanup function will pop.  If we port
10841         // inalloca to more targets, we'll have to add custom inalloca handling
10842         // in the various CC lowering callbacks.
10843         Flags.setByVal();
10844       }
10845       if (Arg.hasAttribute(Attribute::Preallocated)) {
10846         Flags.setPreallocated();
10847         // Set the byval flag for CCAssignFn callbacks that don't know about
10848         // preallocated.  This way we can know how many bytes we should've
10849         // allocated and how many bytes a callee cleanup function will pop.  If
10850         // we port preallocated to more targets, we'll have to add custom
10851         // preallocated handling in the various CC lowering callbacks.
10852         Flags.setByVal();
10853       }
10854 
10855       // Certain targets (such as MIPS), may have a different ABI alignment
10856       // for a type depending on the context. Give the target a chance to
10857       // specify the alignment it wants.
10858       const Align OriginalAlignment(
10859           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
10860       Flags.setOrigAlign(OriginalAlignment);
10861 
10862       Align MemAlign;
10863       Type *ArgMemTy = nullptr;
10864       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
10865           Flags.isByRef()) {
10866         if (!ArgMemTy)
10867           ArgMemTy = Arg.getPointeeInMemoryValueType();
10868 
10869         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
10870 
10871         // For in-memory arguments, size and alignment should be passed from FE.
10872         // BE will guess if this info is not there but there are cases it cannot
10873         // get right.
10874         if (auto ParamAlign = Arg.getParamStackAlign())
10875           MemAlign = *ParamAlign;
10876         else if ((ParamAlign = Arg.getParamAlign()))
10877           MemAlign = *ParamAlign;
10878         else
10879           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
10880         if (Flags.isByRef())
10881           Flags.setByRefSize(MemSize);
10882         else
10883           Flags.setByValSize(MemSize);
10884       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
10885         MemAlign = *ParamAlign;
10886       } else {
10887         MemAlign = OriginalAlignment;
10888       }
10889       Flags.setMemAlign(MemAlign);
10890 
10891       if (Arg.hasAttribute(Attribute::Nest))
10892         Flags.setNest();
10893       if (NeedsRegBlock)
10894         Flags.setInConsecutiveRegs();
10895       if (ArgCopyElisionCandidates.count(&Arg))
10896         Flags.setCopyElisionCandidate();
10897       if (Arg.hasAttribute(Attribute::Returned))
10898         Flags.setReturned();
10899 
10900       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
10901           *CurDAG->getContext(), F.getCallingConv(), VT);
10902       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
10903           *CurDAG->getContext(), F.getCallingConv(), VT);
10904       for (unsigned i = 0; i != NumRegs; ++i) {
10905         // For scalable vectors, use the minimum size; individual targets
10906         // are responsible for handling scalable vector arguments and
10907         // return values.
10908         ISD::InputArg MyFlags(
10909             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
10910             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
10911         if (NumRegs > 1 && i == 0)
10912           MyFlags.Flags.setSplit();
10913         // if it isn't first piece, alignment must be 1
10914         else if (i > 0) {
10915           MyFlags.Flags.setOrigAlign(Align(1));
10916           if (i == NumRegs - 1)
10917             MyFlags.Flags.setSplitEnd();
10918         }
10919         Ins.push_back(MyFlags);
10920       }
10921       if (NeedsRegBlock && Value == NumValues - 1)
10922         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
10923       PartBase += VT.getStoreSize().getKnownMinValue();
10924     }
10925   }
10926 
10927   // Call the target to set up the argument values.
10928   SmallVector<SDValue, 8> InVals;
10929   SDValue NewRoot = TLI->LowerFormalArguments(
10930       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
10931 
10932   // Verify that the target's LowerFormalArguments behaved as expected.
10933   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
10934          "LowerFormalArguments didn't return a valid chain!");
10935   assert(InVals.size() == Ins.size() &&
10936          "LowerFormalArguments didn't emit the correct number of values!");
10937   LLVM_DEBUG({
10938     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
10939       assert(InVals[i].getNode() &&
10940              "LowerFormalArguments emitted a null value!");
10941       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
10942              "LowerFormalArguments emitted a value with the wrong type!");
10943     }
10944   });
10945 
10946   // Update the DAG with the new chain value resulting from argument lowering.
10947   DAG.setRoot(NewRoot);
10948 
10949   // Set up the argument values.
10950   unsigned i = 0;
10951   if (!FuncInfo->CanLowerReturn) {
10952     // Create a virtual register for the sret pointer, and put in a copy
10953     // from the sret argument into it.
10954     SmallVector<EVT, 1> ValueVTs;
10955     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10956                     PointerType::get(F.getContext(),
10957                                      DAG.getDataLayout().getAllocaAddrSpace()),
10958                     ValueVTs);
10959     MVT VT = ValueVTs[0].getSimpleVT();
10960     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
10961     std::optional<ISD::NodeType> AssertOp;
10962     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
10963                                         nullptr, F.getCallingConv(), AssertOp);
10964 
10965     MachineFunction& MF = SDB->DAG.getMachineFunction();
10966     MachineRegisterInfo& RegInfo = MF.getRegInfo();
10967     Register SRetReg =
10968         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
10969     FuncInfo->DemoteRegister = SRetReg;
10970     NewRoot =
10971         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
10972     DAG.setRoot(NewRoot);
10973 
10974     // i indexes lowered arguments.  Bump it past the hidden sret argument.
10975     ++i;
10976   }
10977 
10978   SmallVector<SDValue, 4> Chains;
10979   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
10980   for (const Argument &Arg : F.args()) {
10981     SmallVector<SDValue, 4> ArgValues;
10982     SmallVector<EVT, 4> ValueVTs;
10983     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10984     unsigned NumValues = ValueVTs.size();
10985     if (NumValues == 0)
10986       continue;
10987 
10988     bool ArgHasUses = !Arg.use_empty();
10989 
10990     // Elide the copying store if the target loaded this argument from a
10991     // suitable fixed stack object.
10992     if (Ins[i].Flags.isCopyElisionCandidate()) {
10993       unsigned NumParts = 0;
10994       for (EVT VT : ValueVTs)
10995         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
10996                                                        F.getCallingConv(), VT);
10997 
10998       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
10999                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11000                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11001     }
11002 
11003     // If this argument is unused then remember its value. It is used to generate
11004     // debugging information.
11005     bool isSwiftErrorArg =
11006         TLI->supportSwiftError() &&
11007         Arg.hasAttribute(Attribute::SwiftError);
11008     if (!ArgHasUses && !isSwiftErrorArg) {
11009       SDB->setUnusedArgValue(&Arg, InVals[i]);
11010 
11011       // Also remember any frame index for use in FastISel.
11012       if (FrameIndexSDNode *FI =
11013           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11014         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11015     }
11016 
11017     for (unsigned Val = 0; Val != NumValues; ++Val) {
11018       EVT VT = ValueVTs[Val];
11019       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11020                                                       F.getCallingConv(), VT);
11021       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11022           *CurDAG->getContext(), F.getCallingConv(), VT);
11023 
11024       // Even an apparent 'unused' swifterror argument needs to be returned. So
11025       // we do generate a copy for it that can be used on return from the
11026       // function.
11027       if (ArgHasUses || isSwiftErrorArg) {
11028         std::optional<ISD::NodeType> AssertOp;
11029         if (Arg.hasAttribute(Attribute::SExt))
11030           AssertOp = ISD::AssertSext;
11031         else if (Arg.hasAttribute(Attribute::ZExt))
11032           AssertOp = ISD::AssertZext;
11033 
11034         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11035                                              PartVT, VT, nullptr,
11036                                              F.getCallingConv(), AssertOp));
11037       }
11038 
11039       i += NumParts;
11040     }
11041 
11042     // We don't need to do anything else for unused arguments.
11043     if (ArgValues.empty())
11044       continue;
11045 
11046     // Note down frame index.
11047     if (FrameIndexSDNode *FI =
11048         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11049       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11050 
11051     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11052                                      SDB->getCurSDLoc());
11053 
11054     SDB->setValue(&Arg, Res);
11055     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11056       // We want to associate the argument with the frame index, among
11057       // involved operands, that correspond to the lowest address. The
11058       // getCopyFromParts function, called earlier, is swapping the order of
11059       // the operands to BUILD_PAIR depending on endianness. The result of
11060       // that swapping is that the least significant bits of the argument will
11061       // be in the first operand of the BUILD_PAIR node, and the most
11062       // significant bits will be in the second operand.
11063       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11064       if (LoadSDNode *LNode =
11065           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11066         if (FrameIndexSDNode *FI =
11067             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11068           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11069     }
11070 
11071     // Analyses past this point are naive and don't expect an assertion.
11072     if (Res.getOpcode() == ISD::AssertZext)
11073       Res = Res.getOperand(0);
11074 
11075     // Update the SwiftErrorVRegDefMap.
11076     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11077       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11078       if (Register::isVirtualRegister(Reg))
11079         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11080                                    Reg);
11081     }
11082 
11083     // If this argument is live outside of the entry block, insert a copy from
11084     // wherever we got it to the vreg that other BB's will reference it as.
11085     if (Res.getOpcode() == ISD::CopyFromReg) {
11086       // If we can, though, try to skip creating an unnecessary vreg.
11087       // FIXME: This isn't very clean... it would be nice to make this more
11088       // general.
11089       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11090       if (Register::isVirtualRegister(Reg)) {
11091         FuncInfo->ValueMap[&Arg] = Reg;
11092         continue;
11093       }
11094     }
11095     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11096       FuncInfo->InitializeRegForValue(&Arg);
11097       SDB->CopyToExportRegsIfNeeded(&Arg);
11098     }
11099   }
11100 
11101   if (!Chains.empty()) {
11102     Chains.push_back(NewRoot);
11103     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11104   }
11105 
11106   DAG.setRoot(NewRoot);
11107 
11108   assert(i == InVals.size() && "Argument register count mismatch!");
11109 
11110   // If any argument copy elisions occurred and we have debug info, update the
11111   // stale frame indices used in the dbg.declare variable info table.
11112   if (!ArgCopyElisionFrameIndexMap.empty()) {
11113     for (MachineFunction::VariableDbgInfo &VI :
11114          MF->getInStackSlotVariableDbgInfo()) {
11115       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11116       if (I != ArgCopyElisionFrameIndexMap.end())
11117         VI.updateStackSlot(I->second);
11118     }
11119   }
11120 
11121   // Finally, if the target has anything special to do, allow it to do so.
11122   emitFunctionEntryCode();
11123 }
11124 
11125 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11126 /// ensure constants are generated when needed.  Remember the virtual registers
11127 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11128 /// directly add them, because expansion might result in multiple MBB's for one
11129 /// BB.  As such, the start of the BB might correspond to a different MBB than
11130 /// the end.
11131 void
11132 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11133   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11134 
11135   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11136 
11137   // Check PHI nodes in successors that expect a value to be available from this
11138   // block.
11139   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11140     if (!isa<PHINode>(SuccBB->begin())) continue;
11141     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11142 
11143     // If this terminator has multiple identical successors (common for
11144     // switches), only handle each succ once.
11145     if (!SuccsHandled.insert(SuccMBB).second)
11146       continue;
11147 
11148     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11149 
11150     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11151     // nodes and Machine PHI nodes, but the incoming operands have not been
11152     // emitted yet.
11153     for (const PHINode &PN : SuccBB->phis()) {
11154       // Ignore dead phi's.
11155       if (PN.use_empty())
11156         continue;
11157 
11158       // Skip empty types
11159       if (PN.getType()->isEmptyTy())
11160         continue;
11161 
11162       unsigned Reg;
11163       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11164 
11165       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11166         unsigned &RegOut = ConstantsOut[C];
11167         if (RegOut == 0) {
11168           RegOut = FuncInfo.CreateRegs(C);
11169           // We need to zero/sign extend ConstantInt phi operands to match
11170           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11171           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11172           if (auto *CI = dyn_cast<ConstantInt>(C))
11173             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11174                                                     : ISD::ZERO_EXTEND;
11175           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11176         }
11177         Reg = RegOut;
11178       } else {
11179         DenseMap<const Value *, Register>::iterator I =
11180           FuncInfo.ValueMap.find(PHIOp);
11181         if (I != FuncInfo.ValueMap.end())
11182           Reg = I->second;
11183         else {
11184           assert(isa<AllocaInst>(PHIOp) &&
11185                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11186                  "Didn't codegen value into a register!??");
11187           Reg = FuncInfo.CreateRegs(PHIOp);
11188           CopyValueToVirtualRegister(PHIOp, Reg);
11189         }
11190       }
11191 
11192       // Remember that this register needs to added to the machine PHI node as
11193       // the input for this MBB.
11194       SmallVector<EVT, 4> ValueVTs;
11195       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11196       for (EVT VT : ValueVTs) {
11197         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11198         for (unsigned i = 0; i != NumRegisters; ++i)
11199           FuncInfo.PHINodesToUpdate.push_back(
11200               std::make_pair(&*MBBI++, Reg + i));
11201         Reg += NumRegisters;
11202       }
11203     }
11204   }
11205 
11206   ConstantsOut.clear();
11207 }
11208 
11209 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11210   MachineFunction::iterator I(MBB);
11211   if (++I == FuncInfo.MF->end())
11212     return nullptr;
11213   return &*I;
11214 }
11215 
11216 /// During lowering new call nodes can be created (such as memset, etc.).
11217 /// Those will become new roots of the current DAG, but complications arise
11218 /// when they are tail calls. In such cases, the call lowering will update
11219 /// the root, but the builder still needs to know that a tail call has been
11220 /// lowered in order to avoid generating an additional return.
11221 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11222   // If the node is null, we do have a tail call.
11223   if (MaybeTC.getNode() != nullptr)
11224     DAG.setRoot(MaybeTC);
11225   else
11226     HasTailCall = true;
11227 }
11228 
11229 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11230                                         MachineBasicBlock *SwitchMBB,
11231                                         MachineBasicBlock *DefaultMBB) {
11232   MachineFunction *CurMF = FuncInfo.MF;
11233   MachineBasicBlock *NextMBB = nullptr;
11234   MachineFunction::iterator BBI(W.MBB);
11235   if (++BBI != FuncInfo.MF->end())
11236     NextMBB = &*BBI;
11237 
11238   unsigned Size = W.LastCluster - W.FirstCluster + 1;
11239 
11240   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11241 
11242   if (Size == 2 && W.MBB == SwitchMBB) {
11243     // If any two of the cases has the same destination, and if one value
11244     // is the same as the other, but has one bit unset that the other has set,
11245     // use bit manipulation to do two compares at once.  For example:
11246     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11247     // TODO: This could be extended to merge any 2 cases in switches with 3
11248     // cases.
11249     // TODO: Handle cases where W.CaseBB != SwitchBB.
11250     CaseCluster &Small = *W.FirstCluster;
11251     CaseCluster &Big = *W.LastCluster;
11252 
11253     if (Small.Low == Small.High && Big.Low == Big.High &&
11254         Small.MBB == Big.MBB) {
11255       const APInt &SmallValue = Small.Low->getValue();
11256       const APInt &BigValue = Big.Low->getValue();
11257 
11258       // Check that there is only one bit different.
11259       APInt CommonBit = BigValue ^ SmallValue;
11260       if (CommonBit.isPowerOf2()) {
11261         SDValue CondLHS = getValue(Cond);
11262         EVT VT = CondLHS.getValueType();
11263         SDLoc DL = getCurSDLoc();
11264 
11265         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11266                                  DAG.getConstant(CommonBit, DL, VT));
11267         SDValue Cond = DAG.getSetCC(
11268             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11269             ISD::SETEQ);
11270 
11271         // Update successor info.
11272         // Both Small and Big will jump to Small.BB, so we sum up the
11273         // probabilities.
11274         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11275         if (BPI)
11276           addSuccessorWithProb(
11277               SwitchMBB, DefaultMBB,
11278               // The default destination is the first successor in IR.
11279               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11280         else
11281           addSuccessorWithProb(SwitchMBB, DefaultMBB);
11282 
11283         // Insert the true branch.
11284         SDValue BrCond =
11285             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11286                         DAG.getBasicBlock(Small.MBB));
11287         // Insert the false branch.
11288         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11289                              DAG.getBasicBlock(DefaultMBB));
11290 
11291         DAG.setRoot(BrCond);
11292         return;
11293       }
11294     }
11295   }
11296 
11297   if (TM.getOptLevel() != CodeGenOptLevel::None) {
11298     // Here, we order cases by probability so the most likely case will be
11299     // checked first. However, two clusters can have the same probability in
11300     // which case their relative ordering is non-deterministic. So we use Low
11301     // as a tie-breaker as clusters are guaranteed to never overlap.
11302     llvm::sort(W.FirstCluster, W.LastCluster + 1,
11303                [](const CaseCluster &a, const CaseCluster &b) {
11304       return a.Prob != b.Prob ?
11305              a.Prob > b.Prob :
11306              a.Low->getValue().slt(b.Low->getValue());
11307     });
11308 
11309     // Rearrange the case blocks so that the last one falls through if possible
11310     // without changing the order of probabilities.
11311     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11312       --I;
11313       if (I->Prob > W.LastCluster->Prob)
11314         break;
11315       if (I->Kind == CC_Range && I->MBB == NextMBB) {
11316         std::swap(*I, *W.LastCluster);
11317         break;
11318       }
11319     }
11320   }
11321 
11322   // Compute total probability.
11323   BranchProbability DefaultProb = W.DefaultProb;
11324   BranchProbability UnhandledProbs = DefaultProb;
11325   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11326     UnhandledProbs += I->Prob;
11327 
11328   MachineBasicBlock *CurMBB = W.MBB;
11329   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11330     bool FallthroughUnreachable = false;
11331     MachineBasicBlock *Fallthrough;
11332     if (I == W.LastCluster) {
11333       // For the last cluster, fall through to the default destination.
11334       Fallthrough = DefaultMBB;
11335       FallthroughUnreachable = isa<UnreachableInst>(
11336           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11337     } else {
11338       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11339       CurMF->insert(BBI, Fallthrough);
11340       // Put Cond in a virtual register to make it available from the new blocks.
11341       ExportFromCurrentBlock(Cond);
11342     }
11343     UnhandledProbs -= I->Prob;
11344 
11345     switch (I->Kind) {
11346       case CC_JumpTable: {
11347         // FIXME: Optimize away range check based on pivot comparisons.
11348         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11349         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11350 
11351         // The jump block hasn't been inserted yet; insert it here.
11352         MachineBasicBlock *JumpMBB = JT->MBB;
11353         CurMF->insert(BBI, JumpMBB);
11354 
11355         auto JumpProb = I->Prob;
11356         auto FallthroughProb = UnhandledProbs;
11357 
11358         // If the default statement is a target of the jump table, we evenly
11359         // distribute the default probability to successors of CurMBB. Also
11360         // update the probability on the edge from JumpMBB to Fallthrough.
11361         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11362                                               SE = JumpMBB->succ_end();
11363              SI != SE; ++SI) {
11364           if (*SI == DefaultMBB) {
11365             JumpProb += DefaultProb / 2;
11366             FallthroughProb -= DefaultProb / 2;
11367             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11368             JumpMBB->normalizeSuccProbs();
11369             break;
11370           }
11371         }
11372 
11373         // If the default clause is unreachable, propagate that knowledge into
11374         // JTH->FallthroughUnreachable which will use it to suppress the range
11375         // check.
11376         //
11377         // However, don't do this if we're doing branch target enforcement,
11378         // because a table branch _without_ a range check can be a tempting JOP
11379         // gadget - out-of-bounds inputs that are impossible in correct
11380         // execution become possible again if an attacker can influence the
11381         // control flow. So if an attacker doesn't already have a BTI bypass
11382         // available, we don't want them to be able to get one out of this
11383         // table branch.
11384         if (FallthroughUnreachable) {
11385           Function &CurFunc = CurMF->getFunction();
11386           bool HasBranchTargetEnforcement = false;
11387           if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11388             HasBranchTargetEnforcement =
11389                 CurFunc.getFnAttribute("branch-target-enforcement")
11390                     .getValueAsBool();
11391           } else {
11392             HasBranchTargetEnforcement =
11393                 CurMF->getMMI().getModule()->getModuleFlag(
11394                     "branch-target-enforcement");
11395           }
11396           if (!HasBranchTargetEnforcement)
11397             JTH->FallthroughUnreachable = true;
11398         }
11399 
11400         if (!JTH->FallthroughUnreachable)
11401           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11402         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11403         CurMBB->normalizeSuccProbs();
11404 
11405         // The jump table header will be inserted in our current block, do the
11406         // range check, and fall through to our fallthrough block.
11407         JTH->HeaderBB = CurMBB;
11408         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11409 
11410         // If we're in the right place, emit the jump table header right now.
11411         if (CurMBB == SwitchMBB) {
11412           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11413           JTH->Emitted = true;
11414         }
11415         break;
11416       }
11417       case CC_BitTests: {
11418         // FIXME: Optimize away range check based on pivot comparisons.
11419         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11420 
11421         // The bit test blocks haven't been inserted yet; insert them here.
11422         for (BitTestCase &BTC : BTB->Cases)
11423           CurMF->insert(BBI, BTC.ThisBB);
11424 
11425         // Fill in fields of the BitTestBlock.
11426         BTB->Parent = CurMBB;
11427         BTB->Default = Fallthrough;
11428 
11429         BTB->DefaultProb = UnhandledProbs;
11430         // If the cases in bit test don't form a contiguous range, we evenly
11431         // distribute the probability on the edge to Fallthrough to two
11432         // successors of CurMBB.
11433         if (!BTB->ContiguousRange) {
11434           BTB->Prob += DefaultProb / 2;
11435           BTB->DefaultProb -= DefaultProb / 2;
11436         }
11437 
11438         if (FallthroughUnreachable)
11439           BTB->FallthroughUnreachable = true;
11440 
11441         // If we're in the right place, emit the bit test header right now.
11442         if (CurMBB == SwitchMBB) {
11443           visitBitTestHeader(*BTB, SwitchMBB);
11444           BTB->Emitted = true;
11445         }
11446         break;
11447       }
11448       case CC_Range: {
11449         const Value *RHS, *LHS, *MHS;
11450         ISD::CondCode CC;
11451         if (I->Low == I->High) {
11452           // Check Cond == I->Low.
11453           CC = ISD::SETEQ;
11454           LHS = Cond;
11455           RHS=I->Low;
11456           MHS = nullptr;
11457         } else {
11458           // Check I->Low <= Cond <= I->High.
11459           CC = ISD::SETLE;
11460           LHS = I->Low;
11461           MHS = Cond;
11462           RHS = I->High;
11463         }
11464 
11465         // If Fallthrough is unreachable, fold away the comparison.
11466         if (FallthroughUnreachable)
11467           CC = ISD::SETTRUE;
11468 
11469         // The false probability is the sum of all unhandled cases.
11470         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11471                      getCurSDLoc(), I->Prob, UnhandledProbs);
11472 
11473         if (CurMBB == SwitchMBB)
11474           visitSwitchCase(CB, SwitchMBB);
11475         else
11476           SL->SwitchCases.push_back(CB);
11477 
11478         break;
11479       }
11480     }
11481     CurMBB = Fallthrough;
11482   }
11483 }
11484 
11485 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
11486                                               CaseClusterIt First,
11487                                               CaseClusterIt Last) {
11488   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
11489     if (X.Prob != CC.Prob)
11490       return X.Prob > CC.Prob;
11491 
11492     // Ties are broken by comparing the case value.
11493     return X.Low->getValue().slt(CC.Low->getValue());
11494   });
11495 }
11496 
11497 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11498                                         const SwitchWorkListItem &W,
11499                                         Value *Cond,
11500                                         MachineBasicBlock *SwitchMBB) {
11501   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11502          "Clusters not sorted?");
11503 
11504   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11505 
11506   // Balance the tree based on branch probabilities to create a near-optimal (in
11507   // terms of search time given key frequency) binary search tree. See e.g. Kurt
11508   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
11509   CaseClusterIt LastLeft = W.FirstCluster;
11510   CaseClusterIt FirstRight = W.LastCluster;
11511   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
11512   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
11513 
11514   // Move LastLeft and FirstRight towards each other from opposite directions to
11515   // find a partitioning of the clusters which balances the probability on both
11516   // sides. If LeftProb and RightProb are equal, alternate which side is
11517   // taken to ensure 0-probability nodes are distributed evenly.
11518   unsigned I = 0;
11519   while (LastLeft + 1 < FirstRight) {
11520     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
11521       LeftProb += (++LastLeft)->Prob;
11522     else
11523       RightProb += (--FirstRight)->Prob;
11524     I++;
11525   }
11526 
11527   while (true) {
11528     // Our binary search tree differs from a typical BST in that ours can have up
11529     // to three values in each leaf. The pivot selection above doesn't take that
11530     // into account, which means the tree might require more nodes and be less
11531     // efficient. We compensate for this here.
11532 
11533     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
11534     unsigned NumRight = W.LastCluster - FirstRight + 1;
11535 
11536     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
11537       // If one side has less than 3 clusters, and the other has more than 3,
11538       // consider taking a cluster from the other side.
11539 
11540       if (NumLeft < NumRight) {
11541         // Consider moving the first cluster on the right to the left side.
11542         CaseCluster &CC = *FirstRight;
11543         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11544         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11545         if (LeftSideRank <= RightSideRank) {
11546           // Moving the cluster to the left does not demote it.
11547           ++LastLeft;
11548           ++FirstRight;
11549           continue;
11550         }
11551       } else {
11552         assert(NumRight < NumLeft);
11553         // Consider moving the last element on the left to the right side.
11554         CaseCluster &CC = *LastLeft;
11555         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11556         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11557         if (RightSideRank <= LeftSideRank) {
11558           // Moving the cluster to the right does not demot it.
11559           --LastLeft;
11560           --FirstRight;
11561           continue;
11562         }
11563       }
11564     }
11565     break;
11566   }
11567 
11568   assert(LastLeft + 1 == FirstRight);
11569   assert(LastLeft >= W.FirstCluster);
11570   assert(FirstRight <= W.LastCluster);
11571 
11572   // Use the first element on the right as pivot since we will make less-than
11573   // comparisons against it.
11574   CaseClusterIt PivotCluster = FirstRight;
11575   assert(PivotCluster > W.FirstCluster);
11576   assert(PivotCluster <= W.LastCluster);
11577 
11578   CaseClusterIt FirstLeft = W.FirstCluster;
11579   CaseClusterIt LastRight = W.LastCluster;
11580 
11581   const ConstantInt *Pivot = PivotCluster->Low;
11582 
11583   // New blocks will be inserted immediately after the current one.
11584   MachineFunction::iterator BBI(W.MBB);
11585   ++BBI;
11586 
11587   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11588   // we can branch to its destination directly if it's squeezed exactly in
11589   // between the known lower bound and Pivot - 1.
11590   MachineBasicBlock *LeftMBB;
11591   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11592       FirstLeft->Low == W.GE &&
11593       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11594     LeftMBB = FirstLeft->MBB;
11595   } else {
11596     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11597     FuncInfo.MF->insert(BBI, LeftMBB);
11598     WorkList.push_back(
11599         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11600     // Put Cond in a virtual register to make it available from the new blocks.
11601     ExportFromCurrentBlock(Cond);
11602   }
11603 
11604   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11605   // single cluster, RHS.Low == Pivot, and we can branch to its destination
11606   // directly if RHS.High equals the current upper bound.
11607   MachineBasicBlock *RightMBB;
11608   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11609       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11610     RightMBB = FirstRight->MBB;
11611   } else {
11612     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11613     FuncInfo.MF->insert(BBI, RightMBB);
11614     WorkList.push_back(
11615         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11616     // Put Cond in a virtual register to make it available from the new blocks.
11617     ExportFromCurrentBlock(Cond);
11618   }
11619 
11620   // Create the CaseBlock record that will be used to lower the branch.
11621   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11622                getCurSDLoc(), LeftProb, RightProb);
11623 
11624   if (W.MBB == SwitchMBB)
11625     visitSwitchCase(CB, SwitchMBB);
11626   else
11627     SL->SwitchCases.push_back(CB);
11628 }
11629 
11630 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11631 // from the swith statement.
11632 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11633                                             BranchProbability PeeledCaseProb) {
11634   if (PeeledCaseProb == BranchProbability::getOne())
11635     return BranchProbability::getZero();
11636   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11637 
11638   uint32_t Numerator = CaseProb.getNumerator();
11639   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11640   return BranchProbability(Numerator, std::max(Numerator, Denominator));
11641 }
11642 
11643 // Try to peel the top probability case if it exceeds the threshold.
11644 // Return current MachineBasicBlock for the switch statement if the peeling
11645 // does not occur.
11646 // If the peeling is performed, return the newly created MachineBasicBlock
11647 // for the peeled switch statement. Also update Clusters to remove the peeled
11648 // case. PeeledCaseProb is the BranchProbability for the peeled case.
11649 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11650     const SwitchInst &SI, CaseClusterVector &Clusters,
11651     BranchProbability &PeeledCaseProb) {
11652   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11653   // Don't perform if there is only one cluster or optimizing for size.
11654   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11655       TM.getOptLevel() == CodeGenOptLevel::None ||
11656       SwitchMBB->getParent()->getFunction().hasMinSize())
11657     return SwitchMBB;
11658 
11659   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11660   unsigned PeeledCaseIndex = 0;
11661   bool SwitchPeeled = false;
11662   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11663     CaseCluster &CC = Clusters[Index];
11664     if (CC.Prob < TopCaseProb)
11665       continue;
11666     TopCaseProb = CC.Prob;
11667     PeeledCaseIndex = Index;
11668     SwitchPeeled = true;
11669   }
11670   if (!SwitchPeeled)
11671     return SwitchMBB;
11672 
11673   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11674                     << TopCaseProb << "\n");
11675 
11676   // Record the MBB for the peeled switch statement.
11677   MachineFunction::iterator BBI(SwitchMBB);
11678   ++BBI;
11679   MachineBasicBlock *PeeledSwitchMBB =
11680       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11681   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11682 
11683   ExportFromCurrentBlock(SI.getCondition());
11684   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11685   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11686                           nullptr,   nullptr,      TopCaseProb.getCompl()};
11687   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11688 
11689   Clusters.erase(PeeledCaseIt);
11690   for (CaseCluster &CC : Clusters) {
11691     LLVM_DEBUG(
11692         dbgs() << "Scale the probablity for one cluster, before scaling: "
11693                << CC.Prob << "\n");
11694     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11695     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11696   }
11697   PeeledCaseProb = TopCaseProb;
11698   return PeeledSwitchMBB;
11699 }
11700 
11701 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11702   // Extract cases from the switch.
11703   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11704   CaseClusterVector Clusters;
11705   Clusters.reserve(SI.getNumCases());
11706   for (auto I : SI.cases()) {
11707     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11708     const ConstantInt *CaseVal = I.getCaseValue();
11709     BranchProbability Prob =
11710         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11711             : BranchProbability(1, SI.getNumCases() + 1);
11712     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11713   }
11714 
11715   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11716 
11717   // Cluster adjacent cases with the same destination. We do this at all
11718   // optimization levels because it's cheap to do and will make codegen faster
11719   // if there are many clusters.
11720   sortAndRangeify(Clusters);
11721 
11722   // The branch probablity of the peeled case.
11723   BranchProbability PeeledCaseProb = BranchProbability::getZero();
11724   MachineBasicBlock *PeeledSwitchMBB =
11725       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11726 
11727   // If there is only the default destination, jump there directly.
11728   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11729   if (Clusters.empty()) {
11730     assert(PeeledSwitchMBB == SwitchMBB);
11731     SwitchMBB->addSuccessor(DefaultMBB);
11732     if (DefaultMBB != NextBlock(SwitchMBB)) {
11733       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11734                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11735     }
11736     return;
11737   }
11738 
11739   SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
11740   SL->findBitTestClusters(Clusters, &SI);
11741 
11742   LLVM_DEBUG({
11743     dbgs() << "Case clusters: ";
11744     for (const CaseCluster &C : Clusters) {
11745       if (C.Kind == CC_JumpTable)
11746         dbgs() << "JT:";
11747       if (C.Kind == CC_BitTests)
11748         dbgs() << "BT:";
11749 
11750       C.Low->getValue().print(dbgs(), true);
11751       if (C.Low != C.High) {
11752         dbgs() << '-';
11753         C.High->getValue().print(dbgs(), true);
11754       }
11755       dbgs() << ' ';
11756     }
11757     dbgs() << '\n';
11758   });
11759 
11760   assert(!Clusters.empty());
11761   SwitchWorkList WorkList;
11762   CaseClusterIt First = Clusters.begin();
11763   CaseClusterIt Last = Clusters.end() - 1;
11764   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11765   // Scale the branchprobability for DefaultMBB if the peel occurs and
11766   // DefaultMBB is not replaced.
11767   if (PeeledCaseProb != BranchProbability::getZero() &&
11768       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11769     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11770   WorkList.push_back(
11771       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11772 
11773   while (!WorkList.empty()) {
11774     SwitchWorkListItem W = WorkList.pop_back_val();
11775     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11776 
11777     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11778         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11779       // For optimized builds, lower large range as a balanced binary tree.
11780       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11781       continue;
11782     }
11783 
11784     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11785   }
11786 }
11787 
11788 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11789   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11790   auto DL = getCurSDLoc();
11791   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11792   setValue(&I, DAG.getStepVector(DL, ResultVT));
11793 }
11794 
11795 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11796   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11797   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11798 
11799   SDLoc DL = getCurSDLoc();
11800   SDValue V = getValue(I.getOperand(0));
11801   assert(VT == V.getValueType() && "Malformed vector.reverse!");
11802 
11803   if (VT.isScalableVector()) {
11804     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11805     return;
11806   }
11807 
11808   // Use VECTOR_SHUFFLE for the fixed-length vector
11809   // to maintain existing behavior.
11810   SmallVector<int, 8> Mask;
11811   unsigned NumElts = VT.getVectorMinNumElements();
11812   for (unsigned i = 0; i != NumElts; ++i)
11813     Mask.push_back(NumElts - 1 - i);
11814 
11815   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11816 }
11817 
11818 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11819   auto DL = getCurSDLoc();
11820   SDValue InVec = getValue(I.getOperand(0));
11821   EVT OutVT =
11822       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11823 
11824   unsigned OutNumElts = OutVT.getVectorMinNumElements();
11825 
11826   // ISD Node needs the input vectors split into two equal parts
11827   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11828                            DAG.getVectorIdxConstant(0, DL));
11829   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11830                            DAG.getVectorIdxConstant(OutNumElts, DL));
11831 
11832   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11833   // legalisation and combines.
11834   if (OutVT.isFixedLengthVector()) {
11835     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11836                                         createStrideMask(0, 2, OutNumElts));
11837     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11838                                        createStrideMask(1, 2, OutNumElts));
11839     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
11840     setValue(&I, Res);
11841     return;
11842   }
11843 
11844   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
11845                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
11846   setValue(&I, Res);
11847 }
11848 
11849 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
11850   auto DL = getCurSDLoc();
11851   EVT InVT = getValue(I.getOperand(0)).getValueType();
11852   SDValue InVec0 = getValue(I.getOperand(0));
11853   SDValue InVec1 = getValue(I.getOperand(1));
11854   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11855   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11856 
11857   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11858   // legalisation and combines.
11859   if (OutVT.isFixedLengthVector()) {
11860     unsigned NumElts = InVT.getVectorMinNumElements();
11861     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
11862     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
11863                                       createInterleaveMask(NumElts, 2)));
11864     return;
11865   }
11866 
11867   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
11868                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
11869   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
11870                     Res.getValue(1));
11871   setValue(&I, Res);
11872 }
11873 
11874 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11875   SmallVector<EVT, 4> ValueVTs;
11876   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11877                   ValueVTs);
11878   unsigned NumValues = ValueVTs.size();
11879   if (NumValues == 0) return;
11880 
11881   SmallVector<SDValue, 4> Values(NumValues);
11882   SDValue Op = getValue(I.getOperand(0));
11883 
11884   for (unsigned i = 0; i != NumValues; ++i)
11885     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
11886                             SDValue(Op.getNode(), Op.getResNo() + i));
11887 
11888   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
11889                            DAG.getVTList(ValueVTs), Values));
11890 }
11891 
11892 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
11893   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11894   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11895 
11896   SDLoc DL = getCurSDLoc();
11897   SDValue V1 = getValue(I.getOperand(0));
11898   SDValue V2 = getValue(I.getOperand(1));
11899   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
11900 
11901   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
11902   if (VT.isScalableVector()) {
11903     MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
11904     setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
11905                              DAG.getConstant(Imm, DL, IdxVT)));
11906     return;
11907   }
11908 
11909   unsigned NumElts = VT.getVectorNumElements();
11910 
11911   uint64_t Idx = (NumElts + Imm) % NumElts;
11912 
11913   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
11914   SmallVector<int, 8> Mask;
11915   for (unsigned i = 0; i < NumElts; ++i)
11916     Mask.push_back(Idx + i);
11917   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
11918 }
11919 
11920 // Consider the following MIR after SelectionDAG, which produces output in
11921 // phyregs in the first case or virtregs in the second case.
11922 //
11923 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
11924 // %5:gr32 = COPY $ebx
11925 // %6:gr32 = COPY $edx
11926 // %1:gr32 = COPY %6:gr32
11927 // %0:gr32 = COPY %5:gr32
11928 //
11929 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
11930 // %1:gr32 = COPY %6:gr32
11931 // %0:gr32 = COPY %5:gr32
11932 //
11933 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
11934 // Given %1, we'd like to return $edx in the first case and %6 in the second.
11935 //
11936 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
11937 // to a single virtreg (such as %0). The remaining outputs monotonically
11938 // increase in virtreg number from there. If a callbr has no outputs, then it
11939 // should not have a corresponding callbr landingpad; in fact, the callbr
11940 // landingpad would not even be able to refer to such a callbr.
11941 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
11942   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
11943   // There is definitely at least one copy.
11944   assert(MI->getOpcode() == TargetOpcode::COPY &&
11945          "start of copy chain MUST be COPY");
11946   Reg = MI->getOperand(1).getReg();
11947   MI = MRI.def_begin(Reg)->getParent();
11948   // There may be an optional second copy.
11949   if (MI->getOpcode() == TargetOpcode::COPY) {
11950     assert(Reg.isVirtual() && "expected COPY of virtual register");
11951     Reg = MI->getOperand(1).getReg();
11952     assert(Reg.isPhysical() && "expected COPY of physical register");
11953     MI = MRI.def_begin(Reg)->getParent();
11954   }
11955   // The start of the chain must be an INLINEASM_BR.
11956   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
11957          "end of copy chain MUST be INLINEASM_BR");
11958   return Reg;
11959 }
11960 
11961 // We must do this walk rather than the simpler
11962 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
11963 // otherwise we will end up with copies of virtregs only valid along direct
11964 // edges.
11965 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
11966   SmallVector<EVT, 8> ResultVTs;
11967   SmallVector<SDValue, 8> ResultValues;
11968   const auto *CBR =
11969       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
11970 
11971   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11972   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
11973   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
11974 
11975   unsigned InitialDef = FuncInfo.ValueMap[CBR];
11976   SDValue Chain = DAG.getRoot();
11977 
11978   // Re-parse the asm constraints string.
11979   TargetLowering::AsmOperandInfoVector TargetConstraints =
11980       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
11981   for (auto &T : TargetConstraints) {
11982     SDISelAsmOperandInfo OpInfo(T);
11983     if (OpInfo.Type != InlineAsm::isOutput)
11984       continue;
11985 
11986     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
11987     // individual constraint.
11988     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
11989 
11990     switch (OpInfo.ConstraintType) {
11991     case TargetLowering::C_Register:
11992     case TargetLowering::C_RegisterClass: {
11993       // Fill in OpInfo.AssignedRegs.Regs.
11994       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
11995 
11996       // getRegistersForValue may produce 1 to many registers based on whether
11997       // the OpInfo.ConstraintVT is legal on the target or not.
11998       for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
11999         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12000         if (Register::isPhysicalRegister(OriginalDef))
12001           FuncInfo.MBB->addLiveIn(OriginalDef);
12002         // Update the assigned registers to use the original defs.
12003         OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12004       }
12005 
12006       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12007           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12008       ResultValues.push_back(V);
12009       ResultVTs.push_back(OpInfo.ConstraintVT);
12010       break;
12011     }
12012     case TargetLowering::C_Other: {
12013       SDValue Flag;
12014       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12015                                                   OpInfo, DAG);
12016       ++InitialDef;
12017       ResultValues.push_back(V);
12018       ResultVTs.push_back(OpInfo.ConstraintVT);
12019       break;
12020     }
12021     default:
12022       break;
12023     }
12024   }
12025   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12026                           DAG.getVTList(ResultVTs), ResultValues);
12027   setValue(&I, V);
12028 }
12029