xref: /llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp (revision 89881480030f48f83af668175b70a9798edca2fb)
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLoweringBase class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RuntimeLibcalls.h"
32 #include "llvm/CodeGen/StackMaps.h"
33 #include "llvm/CodeGen/TargetLowering.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/ValueTypes.h"
37 #include "llvm/CodeGenTypes/MachineValueType.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetMachine.h"
54 #include "llvm/Target/TargetOptions.h"
55 #include "llvm/TargetParser/Triple.h"
56 #include "llvm/Transforms/Utils/SizeOpts.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <cstring>
61 #include <iterator>
62 #include <string>
63 #include <tuple>
64 #include <utility>
65 
66 using namespace llvm;
67 
68 static cl::opt<bool> JumpIsExpensiveOverride(
69     "jump-is-expensive", cl::init(false),
70     cl::desc("Do not create extra branches to split comparison logic."),
71     cl::Hidden);
72 
73 static cl::opt<unsigned> MinimumJumpTableEntries
74   ("min-jump-table-entries", cl::init(4), cl::Hidden,
75    cl::desc("Set minimum number of entries to use a jump table."));
76 
77 static cl::opt<unsigned> MaximumJumpTableSize
78   ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79    cl::desc("Set maximum size of jump tables."));
80 
81 /// Minimum jump table density for normal functions.
82 static cl::opt<unsigned>
83     JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84                      cl::desc("Minimum density for building a jump table in "
85                               "a normal function"));
86 
87 /// Minimum jump table density for -Os or -Oz functions.
88 static cl::opt<unsigned> OptsizeJumpTableDensity(
89     "optsize-jump-table-density", cl::init(40), cl::Hidden,
90     cl::desc("Minimum density for building a jump table in "
91              "an optsize function"));
92 
93 // FIXME: This option is only to test if the strict fp operation processed
94 // correctly by preventing mutating strict fp operation to normal fp operation
95 // during development. When the backend supports strict float operation, this
96 // option will be meaningless.
97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98        cl::desc("Don't mutate strict-float node to a legalize node"),
99        cl::init(false), cl::Hidden);
100 
101 static bool darwinHasSinCos(const Triple &TT) {
102   assert(TT.isOSDarwin() && "should be called with darwin triple");
103   // Don't bother with 32 bit x86.
104   if (TT.getArch() == Triple::x86)
105     return false;
106   // Macos < 10.9 has no sincos_stret.
107   if (TT.isMacOSX())
108     return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
109   // iOS < 7.0 has no sincos_stret.
110   if (TT.isiOS())
111     return !TT.isOSVersionLT(7, 0);
112   // Any other darwin such as WatchOS/TvOS is new enough.
113   return true;
114 }
115 
116 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
117 #define HANDLE_LIBCALL(code, name) \
118   setLibcallName(RTLIB::code, name);
119 #include "llvm/IR/RuntimeLibcalls.def"
120 #undef HANDLE_LIBCALL
121   // Initialize calling conventions to their default.
122   for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
123     setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
124 
125   // Use the f128 variants of math functions on x86_64
126   if (TT.getArch() == Triple::ArchType::x86_64 && TT.isGNUEnvironment()) {
127     setLibcallName(RTLIB::REM_F128, "fmodf128");
128     setLibcallName(RTLIB::FMA_F128, "fmaf128");
129     setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
130     setLibcallName(RTLIB::CBRT_F128, "cbrtf128");
131     setLibcallName(RTLIB::LOG_F128, "logf128");
132     setLibcallName(RTLIB::LOG_FINITE_F128, "__logf128_finite");
133     setLibcallName(RTLIB::LOG2_F128, "log2f128");
134     setLibcallName(RTLIB::LOG2_FINITE_F128, "__log2f128_finite");
135     setLibcallName(RTLIB::LOG10_F128, "log10f128");
136     setLibcallName(RTLIB::LOG10_FINITE_F128, "__log10f128_finite");
137     setLibcallName(RTLIB::EXP_F128, "expf128");
138     setLibcallName(RTLIB::EXP_FINITE_F128, "__expf128_finite");
139     setLibcallName(RTLIB::EXP2_F128, "exp2f128");
140     setLibcallName(RTLIB::EXP2_FINITE_F128, "__exp2f128_finite");
141     setLibcallName(RTLIB::EXP10_F128, "exp10f128");
142     setLibcallName(RTLIB::SIN_F128, "sinf128");
143     setLibcallName(RTLIB::COS_F128, "cosf128");
144     setLibcallName(RTLIB::TAN_F128, "tanf128");
145     setLibcallName(RTLIB::SINCOS_F128, "sincosf128");
146     setLibcallName(RTLIB::POW_F128, "powf128");
147     setLibcallName(RTLIB::POW_FINITE_F128, "__powf128_finite");
148     setLibcallName(RTLIB::CEIL_F128, "ceilf128");
149     setLibcallName(RTLIB::TRUNC_F128, "truncf128");
150     setLibcallName(RTLIB::RINT_F128, "rintf128");
151     setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
152     setLibcallName(RTLIB::ROUND_F128, "roundf128");
153     setLibcallName(RTLIB::ROUNDEVEN_F128, "roundevenf128");
154     setLibcallName(RTLIB::FLOOR_F128, "floorf128");
155     setLibcallName(RTLIB::COPYSIGN_F128, "copysignf128");
156     setLibcallName(RTLIB::FMIN_F128, "fminf128");
157     setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
158     setLibcallName(RTLIB::LROUND_F128, "lroundf128");
159     setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
160     setLibcallName(RTLIB::LRINT_F128, "lrintf128");
161     setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
162     setLibcallName(RTLIB::LDEXP_F128, "ldexpf128");
163     setLibcallName(RTLIB::FREXP_F128, "frexpf128");
164   }
165 
166   // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
167   if (TT.isPPC()) {
168     setLibcallName(RTLIB::ADD_F128, "__addkf3");
169     setLibcallName(RTLIB::SUB_F128, "__subkf3");
170     setLibcallName(RTLIB::MUL_F128, "__mulkf3");
171     setLibcallName(RTLIB::DIV_F128, "__divkf3");
172     setLibcallName(RTLIB::POWI_F128, "__powikf2");
173     setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
174     setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
175     setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
176     setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
177     setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
178     setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
179     setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti");
180     setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
181     setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
182     setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti");
183     setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
184     setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
185     setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf");
186     setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
187     setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
188     setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf");
189     setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
190     setLibcallName(RTLIB::UNE_F128, "__nekf2");
191     setLibcallName(RTLIB::OGE_F128, "__gekf2");
192     setLibcallName(RTLIB::OLT_F128, "__ltkf2");
193     setLibcallName(RTLIB::OLE_F128, "__lekf2");
194     setLibcallName(RTLIB::OGT_F128, "__gtkf2");
195     setLibcallName(RTLIB::UO_F128, "__unordkf2");
196   }
197 
198   // A few names are different on particular architectures or environments.
199   if (TT.isOSDarwin()) {
200     // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
201     // of the gnueabi-style __gnu_*_ieee.
202     // FIXME: What about other targets?
203     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
204     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
205 
206     // Some darwins have an optimized __bzero/bzero function.
207     switch (TT.getArch()) {
208     case Triple::x86:
209     case Triple::x86_64:
210       if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
211         setLibcallName(RTLIB::BZERO, "__bzero");
212       break;
213     case Triple::aarch64:
214     case Triple::aarch64_32:
215       setLibcallName(RTLIB::BZERO, "bzero");
216       break;
217     default:
218       break;
219     }
220 
221     if (darwinHasSinCos(TT)) {
222       setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
223       setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
224       if (TT.isWatchABI()) {
225         setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
226                               CallingConv::ARM_AAPCS_VFP);
227         setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
228                               CallingConv::ARM_AAPCS_VFP);
229       }
230     }
231 
232     switch (TT.getOS()) {
233     case Triple::MacOSX:
234       if (TT.isMacOSXVersionLT(10, 9)) {
235         setLibcallName(RTLIB::EXP10_F32, nullptr);
236         setLibcallName(RTLIB::EXP10_F64, nullptr);
237       } else {
238         setLibcallName(RTLIB::EXP10_F32, "__exp10f");
239         setLibcallName(RTLIB::EXP10_F64, "__exp10");
240       }
241       break;
242     case Triple::IOS:
243     case Triple::TvOS:
244     case Triple::WatchOS:
245     case Triple::XROS:
246       if (!TT.isWatchOS() &&
247           (TT.isOSVersionLT(7, 0) || (TT.isOSVersionLT(9, 0) && TT.isX86()))) {
248         setLibcallName(RTLIB::EXP10_F32, nullptr);
249         setLibcallName(RTLIB::EXP10_F64, nullptr);
250       } else {
251         setLibcallName(RTLIB::EXP10_F32, "__exp10f");
252         setLibcallName(RTLIB::EXP10_F64, "__exp10");
253       }
254 
255       break;
256     default:
257       break;
258     }
259   } else {
260     setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
261     setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
262   }
263 
264   if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
265       (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
266     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
267     setLibcallName(RTLIB::SINCOS_F64, "sincos");
268     setLibcallName(RTLIB::SINCOS_F80, "sincosl");
269     setLibcallName(RTLIB::SINCOS_F128, "sincosl");
270     setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
271   }
272 
273   if (TT.isPS()) {
274     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
275     setLibcallName(RTLIB::SINCOS_F64, "sincos");
276   }
277 
278   if (TT.isOSOpenBSD()) {
279     setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
280   }
281 
282   if (TT.isOSWindows() && !TT.isOSCygMing()) {
283     setLibcallName(RTLIB::LDEXP_F32, nullptr);
284     setLibcallName(RTLIB::LDEXP_F80, nullptr);
285     setLibcallName(RTLIB::LDEXP_F128, nullptr);
286     setLibcallName(RTLIB::LDEXP_PPCF128, nullptr);
287 
288     setLibcallName(RTLIB::FREXP_F32, nullptr);
289     setLibcallName(RTLIB::FREXP_F80, nullptr);
290     setLibcallName(RTLIB::FREXP_F128, nullptr);
291     setLibcallName(RTLIB::FREXP_PPCF128, nullptr);
292   }
293 }
294 
295 /// GetFPLibCall - Helper to return the right libcall for the given floating
296 /// point type, or UNKNOWN_LIBCALL if there is none.
297 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT,
298                                    RTLIB::Libcall Call_F32,
299                                    RTLIB::Libcall Call_F64,
300                                    RTLIB::Libcall Call_F80,
301                                    RTLIB::Libcall Call_F128,
302                                    RTLIB::Libcall Call_PPCF128) {
303   return
304     VT == MVT::f32 ? Call_F32 :
305     VT == MVT::f64 ? Call_F64 :
306     VT == MVT::f80 ? Call_F80 :
307     VT == MVT::f128 ? Call_F128 :
308     VT == MVT::ppcf128 ? Call_PPCF128 :
309     RTLIB::UNKNOWN_LIBCALL;
310 }
311 
312 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
313 /// UNKNOWN_LIBCALL if there is none.
314 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
315   if (OpVT == MVT::f16) {
316     if (RetVT == MVT::f32)
317       return FPEXT_F16_F32;
318     if (RetVT == MVT::f64)
319       return FPEXT_F16_F64;
320     if (RetVT == MVT::f80)
321       return FPEXT_F16_F80;
322     if (RetVT == MVT::f128)
323       return FPEXT_F16_F128;
324   } else if (OpVT == MVT::f32) {
325     if (RetVT == MVT::f64)
326       return FPEXT_F32_F64;
327     if (RetVT == MVT::f128)
328       return FPEXT_F32_F128;
329     if (RetVT == MVT::ppcf128)
330       return FPEXT_F32_PPCF128;
331   } else if (OpVT == MVT::f64) {
332     if (RetVT == MVT::f128)
333       return FPEXT_F64_F128;
334     else if (RetVT == MVT::ppcf128)
335       return FPEXT_F64_PPCF128;
336   } else if (OpVT == MVT::f80) {
337     if (RetVT == MVT::f128)
338       return FPEXT_F80_F128;
339   } else if (OpVT == MVT::bf16) {
340     if (RetVT == MVT::f32)
341       return FPEXT_BF16_F32;
342   }
343 
344   return UNKNOWN_LIBCALL;
345 }
346 
347 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
348 /// UNKNOWN_LIBCALL if there is none.
349 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
350   if (RetVT == MVT::f16) {
351     if (OpVT == MVT::f32)
352       return FPROUND_F32_F16;
353     if (OpVT == MVT::f64)
354       return FPROUND_F64_F16;
355     if (OpVT == MVT::f80)
356       return FPROUND_F80_F16;
357     if (OpVT == MVT::f128)
358       return FPROUND_F128_F16;
359     if (OpVT == MVT::ppcf128)
360       return FPROUND_PPCF128_F16;
361   } else if (RetVT == MVT::bf16) {
362     if (OpVT == MVT::f32)
363       return FPROUND_F32_BF16;
364     if (OpVT == MVT::f64)
365       return FPROUND_F64_BF16;
366   } else if (RetVT == MVT::f32) {
367     if (OpVT == MVT::f64)
368       return FPROUND_F64_F32;
369     if (OpVT == MVT::f80)
370       return FPROUND_F80_F32;
371     if (OpVT == MVT::f128)
372       return FPROUND_F128_F32;
373     if (OpVT == MVT::ppcf128)
374       return FPROUND_PPCF128_F32;
375   } else if (RetVT == MVT::f64) {
376     if (OpVT == MVT::f80)
377       return FPROUND_F80_F64;
378     if (OpVT == MVT::f128)
379       return FPROUND_F128_F64;
380     if (OpVT == MVT::ppcf128)
381       return FPROUND_PPCF128_F64;
382   } else if (RetVT == MVT::f80) {
383     if (OpVT == MVT::f128)
384       return FPROUND_F128_F80;
385   }
386 
387   return UNKNOWN_LIBCALL;
388 }
389 
390 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
391 /// UNKNOWN_LIBCALL if there is none.
392 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
393   if (OpVT == MVT::f16) {
394     if (RetVT == MVT::i32)
395       return FPTOSINT_F16_I32;
396     if (RetVT == MVT::i64)
397       return FPTOSINT_F16_I64;
398     if (RetVT == MVT::i128)
399       return FPTOSINT_F16_I128;
400   } else if (OpVT == MVT::f32) {
401     if (RetVT == MVT::i32)
402       return FPTOSINT_F32_I32;
403     if (RetVT == MVT::i64)
404       return FPTOSINT_F32_I64;
405     if (RetVT == MVT::i128)
406       return FPTOSINT_F32_I128;
407   } else if (OpVT == MVT::f64) {
408     if (RetVT == MVT::i32)
409       return FPTOSINT_F64_I32;
410     if (RetVT == MVT::i64)
411       return FPTOSINT_F64_I64;
412     if (RetVT == MVT::i128)
413       return FPTOSINT_F64_I128;
414   } else if (OpVT == MVT::f80) {
415     if (RetVT == MVT::i32)
416       return FPTOSINT_F80_I32;
417     if (RetVT == MVT::i64)
418       return FPTOSINT_F80_I64;
419     if (RetVT == MVT::i128)
420       return FPTOSINT_F80_I128;
421   } else if (OpVT == MVT::f128) {
422     if (RetVT == MVT::i32)
423       return FPTOSINT_F128_I32;
424     if (RetVT == MVT::i64)
425       return FPTOSINT_F128_I64;
426     if (RetVT == MVT::i128)
427       return FPTOSINT_F128_I128;
428   } else if (OpVT == MVT::ppcf128) {
429     if (RetVT == MVT::i32)
430       return FPTOSINT_PPCF128_I32;
431     if (RetVT == MVT::i64)
432       return FPTOSINT_PPCF128_I64;
433     if (RetVT == MVT::i128)
434       return FPTOSINT_PPCF128_I128;
435   }
436   return UNKNOWN_LIBCALL;
437 }
438 
439 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
440 /// UNKNOWN_LIBCALL if there is none.
441 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
442   if (OpVT == MVT::f16) {
443     if (RetVT == MVT::i32)
444       return FPTOUINT_F16_I32;
445     if (RetVT == MVT::i64)
446       return FPTOUINT_F16_I64;
447     if (RetVT == MVT::i128)
448       return FPTOUINT_F16_I128;
449   } else if (OpVT == MVT::f32) {
450     if (RetVT == MVT::i32)
451       return FPTOUINT_F32_I32;
452     if (RetVT == MVT::i64)
453       return FPTOUINT_F32_I64;
454     if (RetVT == MVT::i128)
455       return FPTOUINT_F32_I128;
456   } else if (OpVT == MVT::f64) {
457     if (RetVT == MVT::i32)
458       return FPTOUINT_F64_I32;
459     if (RetVT == MVT::i64)
460       return FPTOUINT_F64_I64;
461     if (RetVT == MVT::i128)
462       return FPTOUINT_F64_I128;
463   } else if (OpVT == MVT::f80) {
464     if (RetVT == MVT::i32)
465       return FPTOUINT_F80_I32;
466     if (RetVT == MVT::i64)
467       return FPTOUINT_F80_I64;
468     if (RetVT == MVT::i128)
469       return FPTOUINT_F80_I128;
470   } else if (OpVT == MVT::f128) {
471     if (RetVT == MVT::i32)
472       return FPTOUINT_F128_I32;
473     if (RetVT == MVT::i64)
474       return FPTOUINT_F128_I64;
475     if (RetVT == MVT::i128)
476       return FPTOUINT_F128_I128;
477   } else if (OpVT == MVT::ppcf128) {
478     if (RetVT == MVT::i32)
479       return FPTOUINT_PPCF128_I32;
480     if (RetVT == MVT::i64)
481       return FPTOUINT_PPCF128_I64;
482     if (RetVT == MVT::i128)
483       return FPTOUINT_PPCF128_I128;
484   }
485   return UNKNOWN_LIBCALL;
486 }
487 
488 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
489 /// UNKNOWN_LIBCALL if there is none.
490 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
491   if (OpVT == MVT::i32) {
492     if (RetVT == MVT::f16)
493       return SINTTOFP_I32_F16;
494     if (RetVT == MVT::f32)
495       return SINTTOFP_I32_F32;
496     if (RetVT == MVT::f64)
497       return SINTTOFP_I32_F64;
498     if (RetVT == MVT::f80)
499       return SINTTOFP_I32_F80;
500     if (RetVT == MVT::f128)
501       return SINTTOFP_I32_F128;
502     if (RetVT == MVT::ppcf128)
503       return SINTTOFP_I32_PPCF128;
504   } else if (OpVT == MVT::i64) {
505     if (RetVT == MVT::f16)
506       return SINTTOFP_I64_F16;
507     if (RetVT == MVT::f32)
508       return SINTTOFP_I64_F32;
509     if (RetVT == MVT::f64)
510       return SINTTOFP_I64_F64;
511     if (RetVT == MVT::f80)
512       return SINTTOFP_I64_F80;
513     if (RetVT == MVT::f128)
514       return SINTTOFP_I64_F128;
515     if (RetVT == MVT::ppcf128)
516       return SINTTOFP_I64_PPCF128;
517   } else if (OpVT == MVT::i128) {
518     if (RetVT == MVT::f16)
519       return SINTTOFP_I128_F16;
520     if (RetVT == MVT::f32)
521       return SINTTOFP_I128_F32;
522     if (RetVT == MVT::f64)
523       return SINTTOFP_I128_F64;
524     if (RetVT == MVT::f80)
525       return SINTTOFP_I128_F80;
526     if (RetVT == MVT::f128)
527       return SINTTOFP_I128_F128;
528     if (RetVT == MVT::ppcf128)
529       return SINTTOFP_I128_PPCF128;
530   }
531   return UNKNOWN_LIBCALL;
532 }
533 
534 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
535 /// UNKNOWN_LIBCALL if there is none.
536 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
537   if (OpVT == MVT::i32) {
538     if (RetVT == MVT::f16)
539       return UINTTOFP_I32_F16;
540     if (RetVT == MVT::f32)
541       return UINTTOFP_I32_F32;
542     if (RetVT == MVT::f64)
543       return UINTTOFP_I32_F64;
544     if (RetVT == MVT::f80)
545       return UINTTOFP_I32_F80;
546     if (RetVT == MVT::f128)
547       return UINTTOFP_I32_F128;
548     if (RetVT == MVT::ppcf128)
549       return UINTTOFP_I32_PPCF128;
550   } else if (OpVT == MVT::i64) {
551     if (RetVT == MVT::f16)
552       return UINTTOFP_I64_F16;
553     if (RetVT == MVT::f32)
554       return UINTTOFP_I64_F32;
555     if (RetVT == MVT::f64)
556       return UINTTOFP_I64_F64;
557     if (RetVT == MVT::f80)
558       return UINTTOFP_I64_F80;
559     if (RetVT == MVT::f128)
560       return UINTTOFP_I64_F128;
561     if (RetVT == MVT::ppcf128)
562       return UINTTOFP_I64_PPCF128;
563   } else if (OpVT == MVT::i128) {
564     if (RetVT == MVT::f16)
565       return UINTTOFP_I128_F16;
566     if (RetVT == MVT::f32)
567       return UINTTOFP_I128_F32;
568     if (RetVT == MVT::f64)
569       return UINTTOFP_I128_F64;
570     if (RetVT == MVT::f80)
571       return UINTTOFP_I128_F80;
572     if (RetVT == MVT::f128)
573       return UINTTOFP_I128_F128;
574     if (RetVT == MVT::ppcf128)
575       return UINTTOFP_I128_PPCF128;
576   }
577   return UNKNOWN_LIBCALL;
578 }
579 
580 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) {
581   return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
582                       POWI_PPCF128);
583 }
584 
585 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) {
586   return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
587                       LDEXP_PPCF128);
588 }
589 
590 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) {
591   return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
592                       FREXP_PPCF128);
593 }
594 
595 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4],
596                                              AtomicOrdering Order,
597                                              uint64_t MemSize) {
598   unsigned ModeN, ModelN;
599   switch (MemSize) {
600   case 1:
601     ModeN = 0;
602     break;
603   case 2:
604     ModeN = 1;
605     break;
606   case 4:
607     ModeN = 2;
608     break;
609   case 8:
610     ModeN = 3;
611     break;
612   case 16:
613     ModeN = 4;
614     break;
615   default:
616     return RTLIB::UNKNOWN_LIBCALL;
617   }
618 
619   switch (Order) {
620   case AtomicOrdering::Monotonic:
621     ModelN = 0;
622     break;
623   case AtomicOrdering::Acquire:
624     ModelN = 1;
625     break;
626   case AtomicOrdering::Release:
627     ModelN = 2;
628     break;
629   case AtomicOrdering::AcquireRelease:
630   case AtomicOrdering::SequentiallyConsistent:
631     ModelN = 3;
632     break;
633   default:
634     return UNKNOWN_LIBCALL;
635   }
636 
637   return LC[ModeN][ModelN];
638 }
639 
640 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
641                                         MVT VT) {
642   if (!VT.isScalarInteger())
643     return UNKNOWN_LIBCALL;
644   uint64_t MemSize = VT.getScalarSizeInBits() / 8;
645 
646 #define LCALLS(A, B)                                                           \
647   { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
648 #define LCALL5(A)                                                              \
649   LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
650   switch (Opc) {
651   case ISD::ATOMIC_CMP_SWAP: {
652     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
653     return getOutlineAtomicHelper(LC, Order, MemSize);
654   }
655   case ISD::ATOMIC_SWAP: {
656     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
657     return getOutlineAtomicHelper(LC, Order, MemSize);
658   }
659   case ISD::ATOMIC_LOAD_ADD: {
660     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
661     return getOutlineAtomicHelper(LC, Order, MemSize);
662   }
663   case ISD::ATOMIC_LOAD_OR: {
664     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
665     return getOutlineAtomicHelper(LC, Order, MemSize);
666   }
667   case ISD::ATOMIC_LOAD_CLR: {
668     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
669     return getOutlineAtomicHelper(LC, Order, MemSize);
670   }
671   case ISD::ATOMIC_LOAD_XOR: {
672     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
673     return getOutlineAtomicHelper(LC, Order, MemSize);
674   }
675   default:
676     return UNKNOWN_LIBCALL;
677   }
678 #undef LCALLS
679 #undef LCALL5
680 }
681 
682 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
683 #define OP_TO_LIBCALL(Name, Enum)                                              \
684   case Name:                                                                   \
685     switch (VT.SimpleTy) {                                                     \
686     default:                                                                   \
687       return UNKNOWN_LIBCALL;                                                  \
688     case MVT::i8:                                                              \
689       return Enum##_1;                                                         \
690     case MVT::i16:                                                             \
691       return Enum##_2;                                                         \
692     case MVT::i32:                                                             \
693       return Enum##_4;                                                         \
694     case MVT::i64:                                                             \
695       return Enum##_8;                                                         \
696     case MVT::i128:                                                            \
697       return Enum##_16;                                                        \
698     }
699 
700   switch (Opc) {
701     OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
702     OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
703     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
704     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
705     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
706     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
707     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
708     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
709     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
710     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
711     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
712     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
713   }
714 
715 #undef OP_TO_LIBCALL
716 
717   return UNKNOWN_LIBCALL;
718 }
719 
720 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
721   switch (ElementSize) {
722   case 1:
723     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
724   case 2:
725     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
726   case 4:
727     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
728   case 8:
729     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
730   case 16:
731     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
732   default:
733     return UNKNOWN_LIBCALL;
734   }
735 }
736 
737 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
738   switch (ElementSize) {
739   case 1:
740     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
741   case 2:
742     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
743   case 4:
744     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
745   case 8:
746     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
747   case 16:
748     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
749   default:
750     return UNKNOWN_LIBCALL;
751   }
752 }
753 
754 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
755   switch (ElementSize) {
756   case 1:
757     return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
758   case 2:
759     return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
760   case 4:
761     return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
762   case 8:
763     return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
764   case 16:
765     return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
766   default:
767     return UNKNOWN_LIBCALL;
768   }
769 }
770 
771 /// InitCmpLibcallCCs - Set default comparison libcall CC.
772 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
773   std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID);
774   CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
775   CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
776   CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
777   CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
778   CCs[RTLIB::UNE_F32] = ISD::SETNE;
779   CCs[RTLIB::UNE_F64] = ISD::SETNE;
780   CCs[RTLIB::UNE_F128] = ISD::SETNE;
781   CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
782   CCs[RTLIB::OGE_F32] = ISD::SETGE;
783   CCs[RTLIB::OGE_F64] = ISD::SETGE;
784   CCs[RTLIB::OGE_F128] = ISD::SETGE;
785   CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
786   CCs[RTLIB::OLT_F32] = ISD::SETLT;
787   CCs[RTLIB::OLT_F64] = ISD::SETLT;
788   CCs[RTLIB::OLT_F128] = ISD::SETLT;
789   CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
790   CCs[RTLIB::OLE_F32] = ISD::SETLE;
791   CCs[RTLIB::OLE_F64] = ISD::SETLE;
792   CCs[RTLIB::OLE_F128] = ISD::SETLE;
793   CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
794   CCs[RTLIB::OGT_F32] = ISD::SETGT;
795   CCs[RTLIB::OGT_F64] = ISD::SETGT;
796   CCs[RTLIB::OGT_F128] = ISD::SETGT;
797   CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
798   CCs[RTLIB::UO_F32] = ISD::SETNE;
799   CCs[RTLIB::UO_F64] = ISD::SETNE;
800   CCs[RTLIB::UO_F128] = ISD::SETNE;
801   CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
802 }
803 
804 /// NOTE: The TargetMachine owns TLOF.
805 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
806   initActions();
807 
808   // Perform these initializations only once.
809   MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
810       MaxLoadsPerMemcmp = 8;
811   MaxGluedStoresPerMemcpy = 0;
812   MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
813       MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
814   HasMultipleConditionRegisters = false;
815   HasExtractBitsInsn = false;
816   JumpIsExpensive = JumpIsExpensiveOverride;
817   PredictableSelectIsExpensive = false;
818   EnableExtLdPromotion = false;
819   StackPointerRegisterToSaveRestore = 0;
820   BooleanContents = UndefinedBooleanContent;
821   BooleanFloatContents = UndefinedBooleanContent;
822   BooleanVectorContents = UndefinedBooleanContent;
823   SchedPreferenceInfo = Sched::ILP;
824   GatherAllAliasesMaxDepth = 18;
825   IsStrictFPEnabled = DisableStrictNodeMutation;
826   MaxBytesForAlignment = 0;
827   MaxAtomicSizeInBitsSupported = 0;
828 
829   // Assume that even with libcalls, no target supports wider than 128 bit
830   // division.
831   MaxDivRemBitWidthSupported = 128;
832 
833   MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
834 
835   MinCmpXchgSizeInBits = 0;
836   SupportsUnalignedAtomics = false;
837 
838   std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
839 
840   InitLibcalls(TM.getTargetTriple());
841   InitCmpLibcallCCs(CmpLibcallCCs);
842 }
843 
844 void TargetLoweringBase::initActions() {
845   // All operations default to being supported.
846   memset(OpActions, 0, sizeof(OpActions));
847   memset(LoadExtActions, 0, sizeof(LoadExtActions));
848   memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
849   memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
850   memset(CondCodeActions, 0, sizeof(CondCodeActions));
851   std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
852   std::fill(std::begin(TargetDAGCombineArray),
853             std::end(TargetDAGCombineArray), 0);
854 
855   // Let extending atomic loads be unsupported by default.
856   for (MVT ValVT : MVT::all_valuetypes())
857     for (MVT MemVT : MVT::all_valuetypes())
858       setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT, MemVT,
859                              Expand);
860 
861   // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
862   // remove this and targets should individually set these types if not legal.
863   for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END,
864                                    force_iteration_on_noniterable_enum)) {
865     for (MVT VT : {MVT::i2, MVT::i4})
866       OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
867   }
868   for (MVT AVT : MVT::all_valuetypes()) {
869     for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
870       setTruncStoreAction(AVT, VT, Expand);
871       setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand);
872       setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand);
873     }
874   }
875   for (unsigned IM = (unsigned)ISD::PRE_INC;
876        IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
877     for (MVT VT : {MVT::i2, MVT::i4}) {
878       setIndexedLoadAction(IM, VT, Expand);
879       setIndexedStoreAction(IM, VT, Expand);
880       setIndexedMaskedLoadAction(IM, VT, Expand);
881       setIndexedMaskedStoreAction(IM, VT, Expand);
882     }
883   }
884 
885   for (MVT VT : MVT::fp_valuetypes()) {
886     MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
887     if (IntVT.isValid()) {
888       setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
889       AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
890     }
891   }
892 
893   // Set default actions for various operations.
894   for (MVT VT : MVT::all_valuetypes()) {
895     // Default all indexed load / store to expand.
896     for (unsigned IM = (unsigned)ISD::PRE_INC;
897          IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
898       setIndexedLoadAction(IM, VT, Expand);
899       setIndexedStoreAction(IM, VT, Expand);
900       setIndexedMaskedLoadAction(IM, VT, Expand);
901       setIndexedMaskedStoreAction(IM, VT, Expand);
902     }
903 
904     // Most backends expect to see the node which just returns the value loaded.
905     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
906 
907     // These operations default to expand.
908     setOperationAction({ISD::FGETSIGN,       ISD::CONCAT_VECTORS,
909                         ISD::FMINNUM,        ISD::FMAXNUM,
910                         ISD::FMINNUM_IEEE,   ISD::FMAXNUM_IEEE,
911                         ISD::FMINIMUM,       ISD::FMAXIMUM,
912                         ISD::FMINIMUMNUM,    ISD::FMAXIMUMNUM,
913                         ISD::FMAD,           ISD::SMIN,
914                         ISD::SMAX,           ISD::UMIN,
915                         ISD::UMAX,           ISD::ABS,
916                         ISD::FSHL,           ISD::FSHR,
917                         ISD::SADDSAT,        ISD::UADDSAT,
918                         ISD::SSUBSAT,        ISD::USUBSAT,
919                         ISD::SSHLSAT,        ISD::USHLSAT,
920                         ISD::SMULFIX,        ISD::SMULFIXSAT,
921                         ISD::UMULFIX,        ISD::UMULFIXSAT,
922                         ISD::SDIVFIX,        ISD::SDIVFIXSAT,
923                         ISD::UDIVFIX,        ISD::UDIVFIXSAT,
924                         ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
925                         ISD::IS_FPCLASS},
926                        VT, Expand);
927 
928     // Overflow operations default to expand
929     setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO,
930                         ISD::SMULO, ISD::UMULO},
931                        VT, Expand);
932 
933     // Carry-using overflow operations default to expand.
934     setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY,
935                         ISD::SADDO_CARRY, ISD::SSUBO_CARRY},
936                        VT, Expand);
937 
938     // ADDC/ADDE/SUBC/SUBE default to expand.
939     setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT,
940                        Expand);
941 
942     // [US]CMP default to expand
943     setOperationAction({ISD::UCMP, ISD::SCMP}, VT, Expand);
944 
945     // Halving adds
946     setOperationAction(
947         {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT,
948         Expand);
949 
950     // Absolute difference
951     setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand);
952 
953     // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
954     setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
955                        Expand);
956 
957     setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand);
958 
959     // These library functions default to expand.
960     setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP}, VT,
961                        Expand);
962 
963     // These operations default to expand for vector types.
964     if (VT.isVector())
965       setOperationAction(
966           {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG,
967            ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG,
968            ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT, ISD::FTAN},
969           VT, Expand);
970 
971       // Constrained floating-point operations default to expand.
972 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
973     setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
974 #include "llvm/IR/ConstrainedOps.def"
975 
976     // For most targets @llvm.get.dynamic.area.offset just returns 0.
977     setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
978 
979     // Vector reduction default to expand.
980     setOperationAction(
981         {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD,
982          ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
983          ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
984          ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX,
985          ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM,
986          ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL},
987         VT, Expand);
988 
989     // Named vector shuffles default to expand.
990     setOperationAction(ISD::VECTOR_SPLICE, VT, Expand);
991 
992     // VP operations default to expand.
993 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...)                                   \
994     setOperationAction(ISD::SDOPC, VT, Expand);
995 #include "llvm/IR/VPIntrinsics.def"
996 
997     // FP environment operations default to expand.
998     setOperationAction(ISD::GET_FPENV, VT, Expand);
999     setOperationAction(ISD::SET_FPENV, VT, Expand);
1000     setOperationAction(ISD::RESET_FPENV, VT, Expand);
1001   }
1002 
1003   // Most targets ignore the @llvm.prefetch intrinsic.
1004   setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
1005 
1006   // Most targets also ignore the @llvm.readcyclecounter intrinsic.
1007   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
1008 
1009   // Most targets also ignore the @llvm.readsteadycounter intrinsic.
1010   setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Expand);
1011 
1012   // ConstantFP nodes default to expand.  Targets can either change this to
1013   // Legal, in which case all fp constants are legal, or use isFPImmLegal()
1014   // to optimize expansions for certain constants.
1015   setOperationAction(ISD::ConstantFP,
1016                      {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
1017                      Expand);
1018 
1019   // These library functions default to expand.
1020   setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP,
1021                       ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, ISD::FNEARBYINT,
1022                       ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, ISD::LROUND,
1023                       ISD::LLROUND, ISD::LRINT, ISD::LLRINT, ISD::FROUNDEVEN,
1024                       ISD::FTAN},
1025                      {MVT::f32, MVT::f64, MVT::f128}, Expand);
1026 
1027   setOperationAction(ISD::FTAN, MVT::f16, Promote);
1028   // Default ISD::TRAP to expand (which turns it into abort).
1029   setOperationAction(ISD::TRAP, MVT::Other, Expand);
1030 
1031   // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
1032   // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
1033   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
1034 
1035   setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
1036 
1037   setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand);
1038   setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand);
1039 
1040   for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
1041     setOperationAction(ISD::GET_FPMODE, VT, Expand);
1042     setOperationAction(ISD::SET_FPMODE, VT, Expand);
1043   }
1044   setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand);
1045 
1046   // This one by default will call __clear_cache unless the target
1047   // wants something different.
1048   setOperationAction(ISD::CLEAR_CACHE, MVT::Other, LibCall);
1049 }
1050 
1051 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
1052                                                EVT) const {
1053   return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
1054 }
1055 
1056 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
1057                                          bool LegalTypes) const {
1058   assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
1059   if (LHSTy.isVector())
1060     return LHSTy;
1061   MVT ShiftVT =
1062       LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL);
1063   // If any possible shift value won't fit in the prefered type, just use
1064   // something safe. Assume it will be legalized when the shift is expanded.
1065   if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
1066     ShiftVT = MVT::i32;
1067   assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
1068          "ShiftVT is still too small!");
1069   return ShiftVT;
1070 }
1071 
1072 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
1073   assert(isTypeLegal(VT));
1074   switch (Op) {
1075   default:
1076     return false;
1077   case ISD::SDIV:
1078   case ISD::UDIV:
1079   case ISD::SREM:
1080   case ISD::UREM:
1081     return true;
1082   }
1083 }
1084 
1085 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
1086                                              unsigned DestAS) const {
1087   return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
1088 }
1089 
1090 unsigned TargetLoweringBase::getBitWidthForCttzElements(
1091     Type *RetTy, ElementCount EC, bool ZeroIsPoison,
1092     const ConstantRange *VScaleRange) const {
1093   // Find the smallest "sensible" element type to use for the expansion.
1094   ConstantRange CR(APInt(64, EC.getKnownMinValue()));
1095   if (EC.isScalable())
1096     CR = CR.umul_sat(*VScaleRange);
1097 
1098   if (ZeroIsPoison)
1099     CR = CR.subtract(APInt(64, 1));
1100 
1101   unsigned EltWidth = RetTy->getScalarSizeInBits();
1102   EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
1103   EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
1104 
1105   return EltWidth;
1106 }
1107 
1108 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
1109   // If the command-line option was specified, ignore this request.
1110   if (!JumpIsExpensiveOverride.getNumOccurrences())
1111     JumpIsExpensive = isExpensive;
1112 }
1113 
1114 TargetLoweringBase::LegalizeKind
1115 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
1116   // If this is a simple type, use the ComputeRegisterProp mechanism.
1117   if (VT.isSimple()) {
1118     MVT SVT = VT.getSimpleVT();
1119     assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
1120     MVT NVT = TransformToType[SVT.SimpleTy];
1121     LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1122 
1123     assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1124             LA == TypeSoftPromoteHalf ||
1125             (NVT.isVector() ||
1126              ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
1127            "Promote may not follow Expand or Promote");
1128 
1129     if (LA == TypeSplitVector)
1130       return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
1131     if (LA == TypeScalarizeVector)
1132       return LegalizeKind(LA, SVT.getVectorElementType());
1133     return LegalizeKind(LA, NVT);
1134   }
1135 
1136   // Handle Extended Scalar Types.
1137   if (!VT.isVector()) {
1138     assert(VT.isInteger() && "Float types must be simple");
1139     unsigned BitSize = VT.getSizeInBits();
1140     // First promote to a power-of-two size, then expand if necessary.
1141     if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1142       EVT NVT = VT.getRoundIntegerType(Context);
1143       assert(NVT != VT && "Unable to round integer VT");
1144       LegalizeKind NextStep = getTypeConversion(Context, NVT);
1145       // Avoid multi-step promotion.
1146       if (NextStep.first == TypePromoteInteger)
1147         return NextStep;
1148       // Return rounded integer type.
1149       return LegalizeKind(TypePromoteInteger, NVT);
1150     }
1151 
1152     return LegalizeKind(TypeExpandInteger,
1153                         EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1154   }
1155 
1156   // Handle vector types.
1157   ElementCount NumElts = VT.getVectorElementCount();
1158   EVT EltVT = VT.getVectorElementType();
1159 
1160   // Vectors with only one element are always scalarized.
1161   if (NumElts.isScalar())
1162     return LegalizeKind(TypeScalarizeVector, EltVT);
1163 
1164   // Try to widen vector elements until the element type is a power of two and
1165   // promote it to a legal type later on, for example:
1166   // <3 x i8> -> <4 x i8> -> <4 x i32>
1167   if (EltVT.isInteger()) {
1168     // Vectors with a number of elements that is not a power of two are always
1169     // widened, for example <3 x i8> -> <4 x i8>.
1170     if (!VT.isPow2VectorType()) {
1171       NumElts = NumElts.coefficientNextPowerOf2();
1172       EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1173       return LegalizeKind(TypeWidenVector, NVT);
1174     }
1175 
1176     // Examine the element type.
1177     LegalizeKind LK = getTypeConversion(Context, EltVT);
1178 
1179     // If type is to be expanded, split the vector.
1180     //  <4 x i140> -> <2 x i140>
1181     if (LK.first == TypeExpandInteger) {
1182       if (VT.getVectorElementCount().isScalable())
1183         return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1184       return LegalizeKind(TypeSplitVector,
1185                           VT.getHalfNumVectorElementsVT(Context));
1186     }
1187 
1188     // Promote the integer element types until a legal vector type is found
1189     // or until the element integer type is too big. If a legal type was not
1190     // found, fallback to the usual mechanism of widening/splitting the
1191     // vector.
1192     EVT OldEltVT = EltVT;
1193     while (true) {
1194       // Increase the bitwidth of the element to the next pow-of-two
1195       // (which is greater than 8 bits).
1196       EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1197                   .getRoundIntegerType(Context);
1198 
1199       // Stop trying when getting a non-simple element type.
1200       // Note that vector elements may be greater than legal vector element
1201       // types. Example: X86 XMM registers hold 64bit element on 32bit
1202       // systems.
1203       if (!EltVT.isSimple())
1204         break;
1205 
1206       // Build a new vector type and check if it is legal.
1207       MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1208       // Found a legal promoted vector type.
1209       if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1210         return LegalizeKind(TypePromoteInteger,
1211                             EVT::getVectorVT(Context, EltVT, NumElts));
1212     }
1213 
1214     // Reset the type to the unexpanded type if we did not find a legal vector
1215     // type with a promoted vector element type.
1216     EltVT = OldEltVT;
1217   }
1218 
1219   // Try to widen the vector until a legal type is found.
1220   // If there is no wider legal type, split the vector.
1221   while (true) {
1222     // Round up to the next power of 2.
1223     NumElts = NumElts.coefficientNextPowerOf2();
1224 
1225     // If there is no simple vector type with this many elements then there
1226     // cannot be a larger legal vector type.  Note that this assumes that
1227     // there are no skipped intermediate vector types in the simple types.
1228     if (!EltVT.isSimple())
1229       break;
1230     MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1231     if (LargerVector == MVT())
1232       break;
1233 
1234     // If this type is legal then widen the vector.
1235     if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1236       return LegalizeKind(TypeWidenVector, LargerVector);
1237   }
1238 
1239   // Widen odd vectors to next power of two.
1240   if (!VT.isPow2VectorType()) {
1241     EVT NVT = VT.getPow2VectorType(Context);
1242     return LegalizeKind(TypeWidenVector, NVT);
1243   }
1244 
1245   if (VT.getVectorElementCount() == ElementCount::getScalable(1))
1246     return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1247 
1248   // Vectors with illegal element types are expanded.
1249   EVT NVT = EVT::getVectorVT(Context, EltVT,
1250                              VT.getVectorElementCount().divideCoefficientBy(2));
1251   return LegalizeKind(TypeSplitVector, NVT);
1252 }
1253 
1254 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1255                                           unsigned &NumIntermediates,
1256                                           MVT &RegisterVT,
1257                                           TargetLoweringBase *TLI) {
1258   // Figure out the right, legal destination reg to copy into.
1259   ElementCount EC = VT.getVectorElementCount();
1260   MVT EltTy = VT.getVectorElementType();
1261 
1262   unsigned NumVectorRegs = 1;
1263 
1264   // Scalable vectors cannot be scalarized, so splitting or widening is
1265   // required.
1266   if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1267     llvm_unreachable(
1268         "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1269 
1270   // FIXME: We don't support non-power-of-2-sized vectors for now.
1271   // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1272   if (!isPowerOf2_32(EC.getKnownMinValue())) {
1273     // Split EC to unit size (scalable property is preserved).
1274     NumVectorRegs = EC.getKnownMinValue();
1275     EC = ElementCount::getFixed(1);
1276   }
1277 
1278   // Divide the input until we get to a supported size. This will
1279   // always end up with an EC that represent a scalar or a scalable
1280   // scalar.
1281   while (EC.getKnownMinValue() > 1 &&
1282          !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1283     EC = EC.divideCoefficientBy(2);
1284     NumVectorRegs <<= 1;
1285   }
1286 
1287   NumIntermediates = NumVectorRegs;
1288 
1289   MVT NewVT = MVT::getVectorVT(EltTy, EC);
1290   if (!TLI->isTypeLegal(NewVT))
1291     NewVT = EltTy;
1292   IntermediateVT = NewVT;
1293 
1294   unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1295 
1296   // Convert sizes such as i33 to i64.
1297   LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1298 
1299   MVT DestVT = TLI->getRegisterType(NewVT);
1300   RegisterVT = DestVT;
1301   if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
1302     return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1303 
1304   // Otherwise, promotion or legal types use the same number of registers as
1305   // the vector decimated to the appropriate level.
1306   return NumVectorRegs;
1307 }
1308 
1309 /// isLegalRC - Return true if the value types that can be represented by the
1310 /// specified register class are all legal.
1311 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1312                                    const TargetRegisterClass &RC) const {
1313   for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1314     if (isTypeLegal(*I))
1315       return true;
1316   return false;
1317 }
1318 
1319 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1320 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1321 MachineBasicBlock *
1322 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1323                                    MachineBasicBlock *MBB) const {
1324   MachineInstr *MI = &InitialMI;
1325   MachineFunction &MF = *MI->getMF();
1326   MachineFrameInfo &MFI = MF.getFrameInfo();
1327 
1328   // We're handling multiple types of operands here:
1329   // PATCHPOINT MetaArgs - live-in, read only, direct
1330   // STATEPOINT Deopt Spill - live-through, read only, indirect
1331   // STATEPOINT Deopt Alloca - live-through, read only, direct
1332   // (We're currently conservative and mark the deopt slots read/write in
1333   // practice.)
1334   // STATEPOINT GC Spill - live-through, read/write, indirect
1335   // STATEPOINT GC Alloca - live-through, read/write, direct
1336   // The live-in vs live-through is handled already (the live through ones are
1337   // all stack slots), but we need to handle the different type of stackmap
1338   // operands and memory effects here.
1339 
1340   if (llvm::none_of(MI->operands(),
1341                     [](MachineOperand &Operand) { return Operand.isFI(); }))
1342     return MBB;
1343 
1344   MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1345 
1346   // Inherit previous memory operands.
1347   MIB.cloneMemRefs(*MI);
1348 
1349   for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1350     MachineOperand &MO = MI->getOperand(i);
1351     if (!MO.isFI()) {
1352       // Index of Def operand this Use it tied to.
1353       // Since Defs are coming before Uses, if Use is tied, then
1354       // index of Def must be smaller that index of that Use.
1355       // Also, Defs preserve their position in new MI.
1356       unsigned TiedTo = i;
1357       if (MO.isReg() && MO.isTied())
1358         TiedTo = MI->findTiedOperandIdx(i);
1359       MIB.add(MO);
1360       if (TiedTo < i)
1361         MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1362       continue;
1363     }
1364 
1365     // foldMemoryOperand builds a new MI after replacing a single FI operand
1366     // with the canonical set of five x86 addressing-mode operands.
1367     int FI = MO.getIndex();
1368 
1369     // Add frame index operands recognized by stackmaps.cpp
1370     if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1371       // indirect-mem-ref tag, size, #FI, offset.
1372       // Used for spills inserted by StatepointLowering.  This codepath is not
1373       // used for patchpoints/stackmaps at all, for these spilling is done via
1374       // foldMemoryOperand callback only.
1375       assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1376       MIB.addImm(StackMaps::IndirectMemRefOp);
1377       MIB.addImm(MFI.getObjectSize(FI));
1378       MIB.add(MO);
1379       MIB.addImm(0);
1380     } else {
1381       // direct-mem-ref tag, #FI, offset.
1382       // Used by patchpoint, and direct alloca arguments to statepoints
1383       MIB.addImm(StackMaps::DirectMemRefOp);
1384       MIB.add(MO);
1385       MIB.addImm(0);
1386     }
1387 
1388     assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1389 
1390     // Add a new memory operand for this FI.
1391     assert(MFI.getObjectOffset(FI) != -1);
1392 
1393     // Note: STATEPOINT MMOs are added during SelectionDAG.  STACKMAP, and
1394     // PATCHPOINT should be updated to do the same. (TODO)
1395     if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1396       auto Flags = MachineMemOperand::MOLoad;
1397       MachineMemOperand *MMO = MF.getMachineMemOperand(
1398           MachinePointerInfo::getFixedStack(MF, FI), Flags,
1399           MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1400       MIB->addMemOperand(MF, MMO);
1401     }
1402   }
1403   MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1404   MI->eraseFromParent();
1405   return MBB;
1406 }
1407 
1408 /// findRepresentativeClass - Return the largest legal super-reg register class
1409 /// of the register class for the specified type and its associated "cost".
1410 // This function is in TargetLowering because it uses RegClassForVT which would
1411 // need to be moved to TargetRegisterInfo and would necessitate moving
1412 // isTypeLegal over as well - a massive change that would just require
1413 // TargetLowering having a TargetRegisterInfo class member that it would use.
1414 std::pair<const TargetRegisterClass *, uint8_t>
1415 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1416                                             MVT VT) const {
1417   const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1418   if (!RC)
1419     return std::make_pair(RC, 0);
1420 
1421   // Compute the set of all super-register classes.
1422   BitVector SuperRegRC(TRI->getNumRegClasses());
1423   for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1424     SuperRegRC.setBitsInMask(RCI.getMask());
1425 
1426   // Find the first legal register class with the largest spill size.
1427   const TargetRegisterClass *BestRC = RC;
1428   for (unsigned i : SuperRegRC.set_bits()) {
1429     const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1430     // We want the largest possible spill size.
1431     if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1432       continue;
1433     if (!isLegalRC(*TRI, *SuperRC))
1434       continue;
1435     BestRC = SuperRC;
1436   }
1437   return std::make_pair(BestRC, 1);
1438 }
1439 
1440 /// computeRegisterProperties - Once all of the register classes are added,
1441 /// this allows us to compute derived properties we expose.
1442 void TargetLoweringBase::computeRegisterProperties(
1443     const TargetRegisterInfo *TRI) {
1444   // Everything defaults to needing one register.
1445   for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1446     NumRegistersForVT[i] = 1;
1447     RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1448   }
1449   // ...except isVoid, which doesn't need any registers.
1450   NumRegistersForVT[MVT::isVoid] = 0;
1451 
1452   // Find the largest integer register class.
1453   unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1454   for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1455     assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1456 
1457   // Every integer value type larger than this largest register takes twice as
1458   // many registers to represent as the previous ValueType.
1459   for (unsigned ExpandedReg = LargestIntReg + 1;
1460        ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1461     NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1462     RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1463     TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1464     ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1465                                    TypeExpandInteger);
1466   }
1467 
1468   // Inspect all of the ValueType's smaller than the largest integer
1469   // register to see which ones need promotion.
1470   unsigned LegalIntReg = LargestIntReg;
1471   for (unsigned IntReg = LargestIntReg - 1;
1472        IntReg >= (unsigned)MVT::i1; --IntReg) {
1473     MVT IVT = (MVT::SimpleValueType)IntReg;
1474     if (isTypeLegal(IVT)) {
1475       LegalIntReg = IntReg;
1476     } else {
1477       RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1478         (MVT::SimpleValueType)LegalIntReg;
1479       ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1480     }
1481   }
1482 
1483   // ppcf128 type is really two f64's.
1484   if (!isTypeLegal(MVT::ppcf128)) {
1485     if (isTypeLegal(MVT::f64)) {
1486       NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1487       RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1488       TransformToType[MVT::ppcf128] = MVT::f64;
1489       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1490     } else {
1491       NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1492       RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1493       TransformToType[MVT::ppcf128] = MVT::i128;
1494       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1495     }
1496   }
1497 
1498   // Decide how to handle f128. If the target does not have native f128 support,
1499   // expand it to i128 and we will be generating soft float library calls.
1500   if (!isTypeLegal(MVT::f128)) {
1501     NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1502     RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1503     TransformToType[MVT::f128] = MVT::i128;
1504     ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1505   }
1506 
1507   // Decide how to handle f80. If the target does not have native f80 support,
1508   // expand it to i96 and we will be generating soft float library calls.
1509   if (!isTypeLegal(MVT::f80)) {
1510     NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1511     RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1512     TransformToType[MVT::f80] = MVT::i32;
1513     ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1514   }
1515 
1516   // Decide how to handle f64. If the target does not have native f64 support,
1517   // expand it to i64 and we will be generating soft float library calls.
1518   if (!isTypeLegal(MVT::f64)) {
1519     NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1520     RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1521     TransformToType[MVT::f64] = MVT::i64;
1522     ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1523   }
1524 
1525   // Decide how to handle f32. If the target does not have native f32 support,
1526   // expand it to i32 and we will be generating soft float library calls.
1527   if (!isTypeLegal(MVT::f32)) {
1528     NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1529     RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1530     TransformToType[MVT::f32] = MVT::i32;
1531     ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1532   }
1533 
1534   // Decide how to handle f16. If the target does not have native f16 support,
1535   // promote it to f32, because there are no f16 library calls (except for
1536   // conversions).
1537   if (!isTypeLegal(MVT::f16)) {
1538     // Allow targets to control how we legalize half.
1539     bool SoftPromoteHalfType = softPromoteHalfType();
1540     bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType();
1541 
1542     if (!UseFPRegsForHalfType) {
1543       NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1544       RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1545     } else {
1546       NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1547       RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1548     }
1549     TransformToType[MVT::f16] = MVT::f32;
1550     if (SoftPromoteHalfType) {
1551       ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1552     } else {
1553       ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1554     }
1555   }
1556 
1557   // Decide how to handle bf16. If the target does not have native bf16 support,
1558   // promote it to f32, because there are no bf16 library calls (except for
1559   // converting from f32 to bf16).
1560   if (!isTypeLegal(MVT::bf16)) {
1561     NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1562     RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1563     TransformToType[MVT::bf16] = MVT::f32;
1564     ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1565   }
1566 
1567   // Loop over all of the vector value types to see which need transformations.
1568   for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1569        i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1570     MVT VT = (MVT::SimpleValueType) i;
1571     if (isTypeLegal(VT))
1572       continue;
1573 
1574     MVT EltVT = VT.getVectorElementType();
1575     ElementCount EC = VT.getVectorElementCount();
1576     bool IsLegalWiderType = false;
1577     bool IsScalable = VT.isScalableVector();
1578     LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1579     switch (PreferredAction) {
1580     case TypePromoteInteger: {
1581       MVT::SimpleValueType EndVT = IsScalable ?
1582                                    MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1583                                    MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1584       // Try to promote the elements of integer vectors. If no legal
1585       // promotion was found, fall through to the widen-vector method.
1586       for (unsigned nVT = i + 1;
1587            (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1588         MVT SVT = (MVT::SimpleValueType) nVT;
1589         // Promote vectors of integers to vectors with the same number
1590         // of elements, with a wider element type.
1591         if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1592             SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1593           TransformToType[i] = SVT;
1594           RegisterTypeForVT[i] = SVT;
1595           NumRegistersForVT[i] = 1;
1596           ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1597           IsLegalWiderType = true;
1598           break;
1599         }
1600       }
1601       if (IsLegalWiderType)
1602         break;
1603       [[fallthrough]];
1604     }
1605 
1606     case TypeWidenVector:
1607       if (isPowerOf2_32(EC.getKnownMinValue())) {
1608         // Try to widen the vector.
1609         for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1610           MVT SVT = (MVT::SimpleValueType) nVT;
1611           if (SVT.getVectorElementType() == EltVT &&
1612               SVT.isScalableVector() == IsScalable &&
1613               SVT.getVectorElementCount().getKnownMinValue() >
1614                   EC.getKnownMinValue() &&
1615               isTypeLegal(SVT)) {
1616             TransformToType[i] = SVT;
1617             RegisterTypeForVT[i] = SVT;
1618             NumRegistersForVT[i] = 1;
1619             ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1620             IsLegalWiderType = true;
1621             break;
1622           }
1623         }
1624         if (IsLegalWiderType)
1625           break;
1626       } else {
1627         // Only widen to the next power of 2 to keep consistency with EVT.
1628         MVT NVT = VT.getPow2VectorType();
1629         if (isTypeLegal(NVT)) {
1630           TransformToType[i] = NVT;
1631           ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1632           RegisterTypeForVT[i] = NVT;
1633           NumRegistersForVT[i] = 1;
1634           break;
1635         }
1636       }
1637       [[fallthrough]];
1638 
1639     case TypeSplitVector:
1640     case TypeScalarizeVector: {
1641       MVT IntermediateVT;
1642       MVT RegisterVT;
1643       unsigned NumIntermediates;
1644       unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1645           NumIntermediates, RegisterVT, this);
1646       NumRegistersForVT[i] = NumRegisters;
1647       assert(NumRegistersForVT[i] == NumRegisters &&
1648              "NumRegistersForVT size cannot represent NumRegisters!");
1649       RegisterTypeForVT[i] = RegisterVT;
1650 
1651       MVT NVT = VT.getPow2VectorType();
1652       if (NVT == VT) {
1653         // Type is already a power of 2.  The default action is to split.
1654         TransformToType[i] = MVT::Other;
1655         if (PreferredAction == TypeScalarizeVector)
1656           ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1657         else if (PreferredAction == TypeSplitVector)
1658           ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1659         else if (EC.getKnownMinValue() > 1)
1660           ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1661         else
1662           ValueTypeActions.setTypeAction(VT, EC.isScalable()
1663                                                  ? TypeScalarizeScalableVector
1664                                                  : TypeScalarizeVector);
1665       } else {
1666         TransformToType[i] = NVT;
1667         ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1668       }
1669       break;
1670     }
1671     default:
1672       llvm_unreachable("Unknown vector legalization action!");
1673     }
1674   }
1675 
1676   // Determine the 'representative' register class for each value type.
1677   // An representative register class is the largest (meaning one which is
1678   // not a sub-register class / subreg register class) legal register class for
1679   // a group of value types. For example, on i386, i8, i16, and i32
1680   // representative would be GR32; while on x86_64 it's GR64.
1681   for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1682     const TargetRegisterClass* RRC;
1683     uint8_t Cost;
1684     std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1685     RepRegClassForVT[i] = RRC;
1686     RepRegClassCostForVT[i] = Cost;
1687   }
1688 }
1689 
1690 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1691                                            EVT VT) const {
1692   assert(!VT.isVector() && "No default SetCC type for vectors!");
1693   return getPointerTy(DL).SimpleTy;
1694 }
1695 
1696 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1697   return MVT::i32; // return the default value
1698 }
1699 
1700 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1701 /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
1702 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1703 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1704 ///
1705 /// This method returns the number of registers needed, and the VT for each
1706 /// register.  It also returns the VT and quantity of the intermediate values
1707 /// before they are promoted/expanded.
1708 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
1709                                                     EVT VT, EVT &IntermediateVT,
1710                                                     unsigned &NumIntermediates,
1711                                                     MVT &RegisterVT) const {
1712   ElementCount EltCnt = VT.getVectorElementCount();
1713 
1714   // If there is a wider vector type with the same element type as this one,
1715   // or a promoted vector type that has the same number of elements which
1716   // are wider, then we should convert to that legal vector type.
1717   // This handles things like <2 x float> -> <4 x float> and
1718   // <4 x i1> -> <4 x i32>.
1719   LegalizeTypeAction TA = getTypeAction(Context, VT);
1720   if (!EltCnt.isScalar() &&
1721       (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1722     EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1723     if (isTypeLegal(RegisterEVT)) {
1724       IntermediateVT = RegisterEVT;
1725       RegisterVT = RegisterEVT.getSimpleVT();
1726       NumIntermediates = 1;
1727       return 1;
1728     }
1729   }
1730 
1731   // Figure out the right, legal destination reg to copy into.
1732   EVT EltTy = VT.getVectorElementType();
1733 
1734   unsigned NumVectorRegs = 1;
1735 
1736   // Scalable vectors cannot be scalarized, so handle the legalisation of the
1737   // types like done elsewhere in SelectionDAG.
1738   if (EltCnt.isScalable()) {
1739     LegalizeKind LK;
1740     EVT PartVT = VT;
1741     do {
1742       // Iterate until we've found a legal (part) type to hold VT.
1743       LK = getTypeConversion(Context, PartVT);
1744       PartVT = LK.second;
1745     } while (LK.first != TypeLegal);
1746 
1747     if (!PartVT.isVector()) {
1748       report_fatal_error(
1749           "Don't know how to legalize this scalable vector type");
1750     }
1751 
1752     NumIntermediates =
1753         divideCeil(VT.getVectorElementCount().getKnownMinValue(),
1754                    PartVT.getVectorElementCount().getKnownMinValue());
1755     IntermediateVT = PartVT;
1756     RegisterVT = getRegisterType(Context, IntermediateVT);
1757     return NumIntermediates;
1758   }
1759 
1760   // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally
1761   // we could break down into LHS/RHS like LegalizeDAG does.
1762   if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1763     NumVectorRegs = EltCnt.getKnownMinValue();
1764     EltCnt = ElementCount::getFixed(1);
1765   }
1766 
1767   // Divide the input until we get to a supported size.  This will always
1768   // end with a scalar if the target doesn't support vectors.
1769   while (EltCnt.getKnownMinValue() > 1 &&
1770          !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1771     EltCnt = EltCnt.divideCoefficientBy(2);
1772     NumVectorRegs <<= 1;
1773   }
1774 
1775   NumIntermediates = NumVectorRegs;
1776 
1777   EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1778   if (!isTypeLegal(NewVT))
1779     NewVT = EltTy;
1780   IntermediateVT = NewVT;
1781 
1782   MVT DestVT = getRegisterType(Context, NewVT);
1783   RegisterVT = DestVT;
1784 
1785   if (EVT(DestVT).bitsLT(NewVT)) {  // Value is expanded, e.g. i64 -> i16.
1786     TypeSize NewVTSize = NewVT.getSizeInBits();
1787     // Convert sizes such as i33 to i64.
1788     if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
1789       NewVTSize = NewVTSize.coefficientNextPowerOf2();
1790     return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1791   }
1792 
1793   // Otherwise, promotion or legal types use the same number of registers as
1794   // the vector decimated to the appropriate level.
1795   return NumVectorRegs;
1796 }
1797 
1798 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1799                                                 uint64_t NumCases,
1800                                                 uint64_t Range,
1801                                                 ProfileSummaryInfo *PSI,
1802                                                 BlockFrequencyInfo *BFI) const {
1803   // FIXME: This function check the maximum table size and density, but the
1804   // minimum size is not checked. It would be nice if the minimum size is
1805   // also combined within this function. Currently, the minimum size check is
1806   // performed in findJumpTable() in SelectionDAGBuiler and
1807   // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1808   const bool OptForSize =
1809       SI->getParent()->getParent()->hasOptSize() ||
1810       llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1811   const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1812   const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1813 
1814   // Check whether the number of cases is small enough and
1815   // the range is dense enough for a jump table.
1816   return (OptForSize || Range <= MaxJumpTableSize) &&
1817          (NumCases * 100 >= Range * MinDensity);
1818 }
1819 
1820 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context,
1821                                                         EVT ConditionVT) const {
1822   return getRegisterType(Context, ConditionVT);
1823 }
1824 
1825 /// Get the EVTs and ArgFlags collections that represent the legalized return
1826 /// type of the given function.  This does not require a DAG or a return value,
1827 /// and is suitable for use before any DAGs for the function are constructed.
1828 /// TODO: Move this out of TargetLowering.cpp.
1829 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1830                          AttributeList attr,
1831                          SmallVectorImpl<ISD::OutputArg> &Outs,
1832                          const TargetLowering &TLI, const DataLayout &DL) {
1833   SmallVector<EVT, 4> ValueVTs;
1834   ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1835   unsigned NumValues = ValueVTs.size();
1836   if (NumValues == 0) return;
1837 
1838   for (unsigned j = 0, f = NumValues; j != f; ++j) {
1839     EVT VT = ValueVTs[j];
1840     ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1841 
1842     if (attr.hasRetAttr(Attribute::SExt))
1843       ExtendKind = ISD::SIGN_EXTEND;
1844     else if (attr.hasRetAttr(Attribute::ZExt))
1845       ExtendKind = ISD::ZERO_EXTEND;
1846 
1847     if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1848       VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
1849 
1850     unsigned NumParts =
1851         TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1852     MVT PartVT =
1853         TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1854 
1855     // 'inreg' on function refers to return value
1856     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1857     if (attr.hasRetAttr(Attribute::InReg))
1858       Flags.setInReg();
1859 
1860     // Propagate extension type if any
1861     if (attr.hasRetAttr(Attribute::SExt))
1862       Flags.setSExt();
1863     else if (attr.hasRetAttr(Attribute::ZExt))
1864       Flags.setZExt();
1865 
1866     for (unsigned i = 0; i < NumParts; ++i) {
1867       ISD::ArgFlagsTy OutFlags = Flags;
1868       if (NumParts > 1 && i == 0)
1869         OutFlags.setSplit();
1870       else if (i == NumParts - 1 && i != 0)
1871         OutFlags.setSplitEnd();
1872 
1873       Outs.push_back(
1874           ISD::OutputArg(OutFlags, PartVT, VT, /*isfixed=*/true, 0, 0));
1875     }
1876   }
1877 }
1878 
1879 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1880 /// function arguments in the caller parameter area.  This is the actual
1881 /// alignment, not its logarithm.
1882 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1883                                                    const DataLayout &DL) const {
1884   return DL.getABITypeAlign(Ty).value();
1885 }
1886 
1887 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1888     LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1889     Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
1890   // Check if the specified alignment is sufficient based on the data layout.
1891   // TODO: While using the data layout works in practice, a better solution
1892   // would be to implement this check directly (make this a virtual function).
1893   // For example, the ABI alignment may change based on software platform while
1894   // this function should only be affected by hardware implementation.
1895   Type *Ty = VT.getTypeForEVT(Context);
1896   if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1897     // Assume that an access that meets the ABI-specified alignment is fast.
1898     if (Fast != nullptr)
1899       *Fast = 1;
1900     return true;
1901   }
1902 
1903   // This is a misaligned access.
1904   return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1905 }
1906 
1907 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1908     LLVMContext &Context, const DataLayout &DL, EVT VT,
1909     const MachineMemOperand &MMO, unsigned *Fast) const {
1910   return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1911                                         MMO.getAlign(), MMO.getFlags(), Fast);
1912 }
1913 
1914 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1915                                             const DataLayout &DL, EVT VT,
1916                                             unsigned AddrSpace, Align Alignment,
1917                                             MachineMemOperand::Flags Flags,
1918                                             unsigned *Fast) const {
1919   return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1920                                         Flags, Fast);
1921 }
1922 
1923 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1924                                             const DataLayout &DL, EVT VT,
1925                                             const MachineMemOperand &MMO,
1926                                             unsigned *Fast) const {
1927   return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1928                             MMO.getFlags(), Fast);
1929 }
1930 
1931 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1932                                             const DataLayout &DL, LLT Ty,
1933                                             const MachineMemOperand &MMO,
1934                                             unsigned *Fast) const {
1935   EVT VT = getApproximateEVTForLLT(Ty, DL, Context);
1936   return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1937                             MMO.getFlags(), Fast);
1938 }
1939 
1940 //===----------------------------------------------------------------------===//
1941 //  TargetTransformInfo Helpers
1942 //===----------------------------------------------------------------------===//
1943 
1944 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1945   enum InstructionOpcodes {
1946 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1947 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1948 #include "llvm/IR/Instruction.def"
1949   };
1950   switch (static_cast<InstructionOpcodes>(Opcode)) {
1951   case Ret:            return 0;
1952   case Br:             return 0;
1953   case Switch:         return 0;
1954   case IndirectBr:     return 0;
1955   case Invoke:         return 0;
1956   case CallBr:         return 0;
1957   case Resume:         return 0;
1958   case Unreachable:    return 0;
1959   case CleanupRet:     return 0;
1960   case CatchRet:       return 0;
1961   case CatchPad:       return 0;
1962   case CatchSwitch:    return 0;
1963   case CleanupPad:     return 0;
1964   case FNeg:           return ISD::FNEG;
1965   case Add:            return ISD::ADD;
1966   case FAdd:           return ISD::FADD;
1967   case Sub:            return ISD::SUB;
1968   case FSub:           return ISD::FSUB;
1969   case Mul:            return ISD::MUL;
1970   case FMul:           return ISD::FMUL;
1971   case UDiv:           return ISD::UDIV;
1972   case SDiv:           return ISD::SDIV;
1973   case FDiv:           return ISD::FDIV;
1974   case URem:           return ISD::UREM;
1975   case SRem:           return ISD::SREM;
1976   case FRem:           return ISD::FREM;
1977   case Shl:            return ISD::SHL;
1978   case LShr:           return ISD::SRL;
1979   case AShr:           return ISD::SRA;
1980   case And:            return ISD::AND;
1981   case Or:             return ISD::OR;
1982   case Xor:            return ISD::XOR;
1983   case Alloca:         return 0;
1984   case Load:           return ISD::LOAD;
1985   case Store:          return ISD::STORE;
1986   case GetElementPtr:  return 0;
1987   case Fence:          return 0;
1988   case AtomicCmpXchg:  return 0;
1989   case AtomicRMW:      return 0;
1990   case Trunc:          return ISD::TRUNCATE;
1991   case ZExt:           return ISD::ZERO_EXTEND;
1992   case SExt:           return ISD::SIGN_EXTEND;
1993   case FPToUI:         return ISD::FP_TO_UINT;
1994   case FPToSI:         return ISD::FP_TO_SINT;
1995   case UIToFP:         return ISD::UINT_TO_FP;
1996   case SIToFP:         return ISD::SINT_TO_FP;
1997   case FPTrunc:        return ISD::FP_ROUND;
1998   case FPExt:          return ISD::FP_EXTEND;
1999   case PtrToInt:       return ISD::BITCAST;
2000   case IntToPtr:       return ISD::BITCAST;
2001   case BitCast:        return ISD::BITCAST;
2002   case AddrSpaceCast:  return ISD::ADDRSPACECAST;
2003   case ICmp:           return ISD::SETCC;
2004   case FCmp:           return ISD::SETCC;
2005   case PHI:            return 0;
2006   case Call:           return 0;
2007   case Select:         return ISD::SELECT;
2008   case UserOp1:        return 0;
2009   case UserOp2:        return 0;
2010   case VAArg:          return 0;
2011   case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
2012   case InsertElement:  return ISD::INSERT_VECTOR_ELT;
2013   case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
2014   case ExtractValue:   return ISD::MERGE_VALUES;
2015   case InsertValue:    return ISD::MERGE_VALUES;
2016   case LandingPad:     return 0;
2017   case Freeze:         return ISD::FREEZE;
2018   }
2019 
2020   llvm_unreachable("Unknown instruction type encountered!");
2021 }
2022 
2023 Value *
2024 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
2025                                                        bool UseTLS) const {
2026   // compiler-rt provides a variable with a magic name.  Targets that do not
2027   // link with compiler-rt may also provide such a variable.
2028   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2029   const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
2030   auto UnsafeStackPtr =
2031       dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
2032 
2033   Type *StackPtrTy = PointerType::getUnqual(M->getContext());
2034 
2035   if (!UnsafeStackPtr) {
2036     auto TLSModel = UseTLS ?
2037         GlobalValue::InitialExecTLSModel :
2038         GlobalValue::NotThreadLocal;
2039     // The global variable is not defined yet, define it ourselves.
2040     // We use the initial-exec TLS model because we do not support the
2041     // variable living anywhere other than in the main executable.
2042     UnsafeStackPtr = new GlobalVariable(
2043         *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
2044         UnsafeStackPtrVar, nullptr, TLSModel);
2045   } else {
2046     // The variable exists, check its type and attributes.
2047     if (UnsafeStackPtr->getValueType() != StackPtrTy)
2048       report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
2049     if (UseTLS != UnsafeStackPtr->isThreadLocal())
2050       report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
2051                          (UseTLS ? "" : "not ") + "be thread-local");
2052   }
2053   return UnsafeStackPtr;
2054 }
2055 
2056 Value *
2057 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
2058   if (!TM.getTargetTriple().isAndroid())
2059     return getDefaultSafeStackPointerLocation(IRB, true);
2060 
2061   // Android provides a libc function to retrieve the address of the current
2062   // thread's unsafe stack pointer.
2063   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2064   auto *PtrTy = PointerType::getUnqual(M->getContext());
2065   FunctionCallee Fn =
2066       M->getOrInsertFunction("__safestack_pointer_address", PtrTy);
2067   return IRB.CreateCall(Fn);
2068 }
2069 
2070 //===----------------------------------------------------------------------===//
2071 //  Loop Strength Reduction hooks
2072 //===----------------------------------------------------------------------===//
2073 
2074 /// isLegalAddressingMode - Return true if the addressing mode represented
2075 /// by AM is legal for this target, for a load/store of the specified type.
2076 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
2077                                                const AddrMode &AM, Type *Ty,
2078                                                unsigned AS, Instruction *I) const {
2079   // The default implementation of this implements a conservative RISCy, r+r and
2080   // r+i addr mode.
2081 
2082   // Scalable offsets not supported
2083   if (AM.ScalableOffset)
2084     return false;
2085 
2086   // Allows a sign-extended 16-bit immediate field.
2087   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
2088     return false;
2089 
2090   // No global is ever allowed as a base.
2091   if (AM.BaseGV)
2092     return false;
2093 
2094   // Only support r+r,
2095   switch (AM.Scale) {
2096   case 0:  // "r+i" or just "i", depending on HasBaseReg.
2097     break;
2098   case 1:
2099     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
2100       return false;
2101     // Otherwise we have r+r or r+i.
2102     break;
2103   case 2:
2104     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
2105       return false;
2106     // Allow 2*r as r+r.
2107     break;
2108   default: // Don't allow n * r
2109     return false;
2110   }
2111 
2112   return true;
2113 }
2114 
2115 //===----------------------------------------------------------------------===//
2116 //  Stack Protector
2117 //===----------------------------------------------------------------------===//
2118 
2119 // For OpenBSD return its special guard variable. Otherwise return nullptr,
2120 // so that SelectionDAG handle SSP.
2121 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const {
2122   if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
2123     Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
2124     PointerType *PtrTy = PointerType::getUnqual(M.getContext());
2125     Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
2126     if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
2127       G->setVisibility(GlobalValue::HiddenVisibility);
2128     return C;
2129   }
2130   return nullptr;
2131 }
2132 
2133 // Currently only support "standard" __stack_chk_guard.
2134 // TODO: add LOAD_STACK_GUARD support.
2135 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
2136   if (!M.getNamedValue("__stack_chk_guard")) {
2137     auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
2138                                   false, GlobalVariable::ExternalLinkage,
2139                                   nullptr, "__stack_chk_guard");
2140 
2141     // FreeBSD has "__stack_chk_guard" defined externally on libc.so
2142     if (M.getDirectAccessExternalData() &&
2143         !TM.getTargetTriple().isWindowsGNUEnvironment() &&
2144         !(TM.getTargetTriple().isPPC64() &&
2145           TM.getTargetTriple().isOSFreeBSD()) &&
2146         (!TM.getTargetTriple().isOSDarwin() ||
2147          TM.getRelocationModel() == Reloc::Static))
2148       GV->setDSOLocal(true);
2149   }
2150 }
2151 
2152 // Currently only support "standard" __stack_chk_guard.
2153 // TODO: add LOAD_STACK_GUARD support.
2154 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
2155   return M.getNamedValue("__stack_chk_guard");
2156 }
2157 
2158 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
2159   return nullptr;
2160 }
2161 
2162 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
2163   return MinimumJumpTableEntries;
2164 }
2165 
2166 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
2167   MinimumJumpTableEntries = Val;
2168 }
2169 
2170 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2171   return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2172 }
2173 
2174 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
2175   return MaximumJumpTableSize;
2176 }
2177 
2178 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
2179   MaximumJumpTableSize = Val;
2180 }
2181 
2182 bool TargetLoweringBase::isJumpTableRelative() const {
2183   return getTargetMachine().isPositionIndependent();
2184 }
2185 
2186 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const {
2187   if (TM.Options.LoopAlignment)
2188     return Align(TM.Options.LoopAlignment);
2189   return PrefLoopAlignment;
2190 }
2191 
2192 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment(
2193     MachineBasicBlock *MBB) const {
2194   return MaxBytesForAlignment;
2195 }
2196 
2197 //===----------------------------------------------------------------------===//
2198 //  Reciprocal Estimates
2199 //===----------------------------------------------------------------------===//
2200 
2201 /// Get the reciprocal estimate attribute string for a function that will
2202 /// override the target defaults.
2203 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2204   const Function &F = MF.getFunction();
2205   return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2206 }
2207 
2208 /// Construct a string for the given reciprocal operation of the given type.
2209 /// This string should match the corresponding option to the front-end's
2210 /// "-mrecip" flag assuming those strings have been passed through in an
2211 /// attribute string. For example, "vec-divf" for a division of a vXf32.
2212 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2213   std::string Name = VT.isVector() ? "vec-" : "";
2214 
2215   Name += IsSqrt ? "sqrt" : "div";
2216 
2217   // TODO: Handle other float types?
2218   if (VT.getScalarType() == MVT::f64) {
2219     Name += "d";
2220   } else if (VT.getScalarType() == MVT::f16) {
2221     Name += "h";
2222   } else {
2223     assert(VT.getScalarType() == MVT::f32 &&
2224            "Unexpected FP type for reciprocal estimate");
2225     Name += "f";
2226   }
2227 
2228   return Name;
2229 }
2230 
2231 /// Return the character position and value (a single numeric character) of a
2232 /// customized refinement operation in the input string if it exists. Return
2233 /// false if there is no customized refinement step count.
2234 static bool parseRefinementStep(StringRef In, size_t &Position,
2235                                 uint8_t &Value) {
2236   const char RefStepToken = ':';
2237   Position = In.find(RefStepToken);
2238   if (Position == StringRef::npos)
2239     return false;
2240 
2241   StringRef RefStepString = In.substr(Position + 1);
2242   // Allow exactly one numeric character for the additional refinement
2243   // step parameter.
2244   if (RefStepString.size() == 1) {
2245     char RefStepChar = RefStepString[0];
2246     if (isDigit(RefStepChar)) {
2247       Value = RefStepChar - '0';
2248       return true;
2249     }
2250   }
2251   report_fatal_error("Invalid refinement step for -recip.");
2252 }
2253 
2254 /// For the input attribute string, return one of the ReciprocalEstimate enum
2255 /// status values (enabled, disabled, or not specified) for this operation on
2256 /// the specified data type.
2257 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2258   if (Override.empty())
2259     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2260 
2261   SmallVector<StringRef, 4> OverrideVector;
2262   Override.split(OverrideVector, ',');
2263   unsigned NumArgs = OverrideVector.size();
2264 
2265   // Check if "all", "none", or "default" was specified.
2266   if (NumArgs == 1) {
2267     // Look for an optional setting of the number of refinement steps needed
2268     // for this type of reciprocal operation.
2269     size_t RefPos;
2270     uint8_t RefSteps;
2271     if (parseRefinementStep(Override, RefPos, RefSteps)) {
2272       // Split the string for further processing.
2273       Override = Override.substr(0, RefPos);
2274     }
2275 
2276     // All reciprocal types are enabled.
2277     if (Override == "all")
2278       return TargetLoweringBase::ReciprocalEstimate::Enabled;
2279 
2280     // All reciprocal types are disabled.
2281     if (Override == "none")
2282       return TargetLoweringBase::ReciprocalEstimate::Disabled;
2283 
2284     // Target defaults for enablement are used.
2285     if (Override == "default")
2286       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2287   }
2288 
2289   // The attribute string may omit the size suffix ('f'/'d').
2290   std::string VTName = getReciprocalOpName(IsSqrt, VT);
2291   std::string VTNameNoSize = VTName;
2292   VTNameNoSize.pop_back();
2293   static const char DisabledPrefix = '!';
2294 
2295   for (StringRef RecipType : OverrideVector) {
2296     size_t RefPos;
2297     uint8_t RefSteps;
2298     if (parseRefinementStep(RecipType, RefPos, RefSteps))
2299       RecipType = RecipType.substr(0, RefPos);
2300 
2301     // Ignore the disablement token for string matching.
2302     bool IsDisabled = RecipType[0] == DisabledPrefix;
2303     if (IsDisabled)
2304       RecipType = RecipType.substr(1);
2305 
2306     if (RecipType == VTName || RecipType == VTNameNoSize)
2307       return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2308                         : TargetLoweringBase::ReciprocalEstimate::Enabled;
2309   }
2310 
2311   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2312 }
2313 
2314 /// For the input attribute string, return the customized refinement step count
2315 /// for this operation on the specified data type. If the step count does not
2316 /// exist, return the ReciprocalEstimate enum value for unspecified.
2317 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2318   if (Override.empty())
2319     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2320 
2321   SmallVector<StringRef, 4> OverrideVector;
2322   Override.split(OverrideVector, ',');
2323   unsigned NumArgs = OverrideVector.size();
2324 
2325   // Check if "all", "default", or "none" was specified.
2326   if (NumArgs == 1) {
2327     // Look for an optional setting of the number of refinement steps needed
2328     // for this type of reciprocal operation.
2329     size_t RefPos;
2330     uint8_t RefSteps;
2331     if (!parseRefinementStep(Override, RefPos, RefSteps))
2332       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2333 
2334     // Split the string for further processing.
2335     Override = Override.substr(0, RefPos);
2336     assert(Override != "none" &&
2337            "Disabled reciprocals, but specifed refinement steps?");
2338 
2339     // If this is a general override, return the specified number of steps.
2340     if (Override == "all" || Override == "default")
2341       return RefSteps;
2342   }
2343 
2344   // The attribute string may omit the size suffix ('f'/'d').
2345   std::string VTName = getReciprocalOpName(IsSqrt, VT);
2346   std::string VTNameNoSize = VTName;
2347   VTNameNoSize.pop_back();
2348 
2349   for (StringRef RecipType : OverrideVector) {
2350     size_t RefPos;
2351     uint8_t RefSteps;
2352     if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2353       continue;
2354 
2355     RecipType = RecipType.substr(0, RefPos);
2356     if (RecipType == VTName || RecipType == VTNameNoSize)
2357       return RefSteps;
2358   }
2359 
2360   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2361 }
2362 
2363 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2364                                                     MachineFunction &MF) const {
2365   return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2366 }
2367 
2368 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2369                                                    MachineFunction &MF) const {
2370   return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2371 }
2372 
2373 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2374                                                MachineFunction &MF) const {
2375   return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2376 }
2377 
2378 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2379                                               MachineFunction &MF) const {
2380   return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2381 }
2382 
2383 bool TargetLoweringBase::isLoadBitCastBeneficial(
2384     EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2385     const MachineMemOperand &MMO) const {
2386   // Single-element vectors are scalarized, so we should generally avoid having
2387   // any memory operations on such types, as they would get scalarized too.
2388   if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2389       BitcastVT.getVectorNumElements() == 1)
2390     return false;
2391 
2392   // Don't do if we could do an indexed load on the original type, but not on
2393   // the new one.
2394   if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2395     return true;
2396 
2397   MVT LoadMVT = LoadVT.getSimpleVT();
2398 
2399   // Don't bother doing this if it's just going to be promoted again later, as
2400   // doing so might interfere with other combines.
2401   if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2402       getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2403     return false;
2404 
2405   unsigned Fast = 0;
2406   return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2407                             MMO, &Fast) &&
2408          Fast;
2409 }
2410 
2411 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2412   MF.getRegInfo().freezeReservedRegs();
2413 }
2414 
2415 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags(
2416     const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2417     const TargetLibraryInfo *LibInfo) const {
2418   MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2419   if (LI.isVolatile())
2420     Flags |= MachineMemOperand::MOVolatile;
2421 
2422   if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2423     Flags |= MachineMemOperand::MONonTemporal;
2424 
2425   if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2426     Flags |= MachineMemOperand::MOInvariant;
2427 
2428   if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(),
2429                                          LI.getAlign(), DL, &LI, AC,
2430                                          /*DT=*/nullptr, LibInfo))
2431     Flags |= MachineMemOperand::MODereferenceable;
2432 
2433   Flags |= getTargetMMOFlags(LI);
2434   return Flags;
2435 }
2436 
2437 MachineMemOperand::Flags
2438 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2439                                             const DataLayout &DL) const {
2440   MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2441 
2442   if (SI.isVolatile())
2443     Flags |= MachineMemOperand::MOVolatile;
2444 
2445   if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2446     Flags |= MachineMemOperand::MONonTemporal;
2447 
2448   // FIXME: Not preserving dereferenceable
2449   Flags |= getTargetMMOFlags(SI);
2450   return Flags;
2451 }
2452 
2453 MachineMemOperand::Flags
2454 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2455                                              const DataLayout &DL) const {
2456   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2457 
2458   if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2459     if (RMW->isVolatile())
2460       Flags |= MachineMemOperand::MOVolatile;
2461   } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2462     if (CmpX->isVolatile())
2463       Flags |= MachineMemOperand::MOVolatile;
2464   } else
2465     llvm_unreachable("not an atomic instruction");
2466 
2467   // FIXME: Not preserving dereferenceable
2468   Flags |= getTargetMMOFlags(AI);
2469   return Flags;
2470 }
2471 
2472 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
2473                                                   Instruction *Inst,
2474                                                   AtomicOrdering Ord) const {
2475   if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2476     return Builder.CreateFence(Ord);
2477   else
2478     return nullptr;
2479 }
2480 
2481 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
2482                                                    Instruction *Inst,
2483                                                    AtomicOrdering Ord) const {
2484   if (isAcquireOrStronger(Ord))
2485     return Builder.CreateFence(Ord);
2486   else
2487     return nullptr;
2488 }
2489 
2490 //===----------------------------------------------------------------------===//
2491 //  GlobalISel Hooks
2492 //===----------------------------------------------------------------------===//
2493 
2494 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2495                                         const TargetTransformInfo *TTI) const {
2496   auto &MF = *MI.getMF();
2497   auto &MRI = MF.getRegInfo();
2498   // Assuming a spill and reload of a value has a cost of 1 instruction each,
2499   // this helper function computes the maximum number of uses we should consider
2500   // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2501   // break even in terms of code size when the original MI has 2 users vs
2502   // choosing to potentially spill. Any more than 2 users we we have a net code
2503   // size increase. This doesn't take into account register pressure though.
2504   auto maxUses = [](unsigned RematCost) {
2505     // A cost of 1 means remats are basically free.
2506     if (RematCost == 1)
2507       return std::numeric_limits<unsigned>::max();
2508     if (RematCost == 2)
2509       return 2U;
2510 
2511     // Remat is too expensive, only sink if there's one user.
2512     if (RematCost > 2)
2513       return 1U;
2514     llvm_unreachable("Unexpected remat cost");
2515   };
2516 
2517   switch (MI.getOpcode()) {
2518   default:
2519     return false;
2520   // Constants-like instructions should be close to their users.
2521   // We don't want long live-ranges for them.
2522   case TargetOpcode::G_CONSTANT:
2523   case TargetOpcode::G_FCONSTANT:
2524   case TargetOpcode::G_FRAME_INDEX:
2525   case TargetOpcode::G_INTTOPTR:
2526     return true;
2527   case TargetOpcode::G_GLOBAL_VALUE: {
2528     unsigned RematCost = TTI->getGISelRematGlobalCost();
2529     Register Reg = MI.getOperand(0).getReg();
2530     unsigned MaxUses = maxUses(RematCost);
2531     if (MaxUses == UINT_MAX)
2532       return true; // Remats are "free" so always localize.
2533     return MRI.hasAtMostUserInstrs(Reg, MaxUses);
2534   }
2535   }
2536 }
2537