1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/ISDOpcodes.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/RuntimeLibcalls.h" 32 #include "llvm/CodeGen/StackMaps.h" 33 #include "llvm/CodeGen/TargetLowering.h" 34 #include "llvm/CodeGen/TargetOpcodes.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/CodeGen/ValueTypes.h" 37 #include "llvm/CodeGenTypes/MachineValueType.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Target/TargetMachine.h" 54 #include "llvm/Target/TargetOptions.h" 55 #include "llvm/TargetParser/Triple.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <cstring> 61 #include <iterator> 62 #include <string> 63 #include <tuple> 64 #include <utility> 65 66 using namespace llvm; 67 68 static cl::opt<bool> JumpIsExpensiveOverride( 69 "jump-is-expensive", cl::init(false), 70 cl::desc("Do not create extra branches to split comparison logic."), 71 cl::Hidden); 72 73 static cl::opt<unsigned> MinimumJumpTableEntries 74 ("min-jump-table-entries", cl::init(4), cl::Hidden, 75 cl::desc("Set minimum number of entries to use a jump table.")); 76 77 static cl::opt<unsigned> MaximumJumpTableSize 78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 79 cl::desc("Set maximum size of jump tables.")); 80 81 /// Minimum jump table density for normal functions. 82 static cl::opt<unsigned> 83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 84 cl::desc("Minimum density for building a jump table in " 85 "a normal function")); 86 87 /// Minimum jump table density for -Os or -Oz functions. 88 static cl::opt<unsigned> OptsizeJumpTableDensity( 89 "optsize-jump-table-density", cl::init(40), cl::Hidden, 90 cl::desc("Minimum density for building a jump table in " 91 "an optsize function")); 92 93 // FIXME: This option is only to test if the strict fp operation processed 94 // correctly by preventing mutating strict fp operation to normal fp operation 95 // during development. When the backend supports strict float operation, this 96 // option will be meaningless. 97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 98 cl::desc("Don't mutate strict-float node to a legalize node"), 99 cl::init(false), cl::Hidden); 100 101 static bool darwinHasSinCos(const Triple &TT) { 102 assert(TT.isOSDarwin() && "should be called with darwin triple"); 103 // Don't bother with 32 bit x86. 104 if (TT.getArch() == Triple::x86) 105 return false; 106 // Macos < 10.9 has no sincos_stret. 107 if (TT.isMacOSX()) 108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit(); 109 // iOS < 7.0 has no sincos_stret. 110 if (TT.isiOS()) 111 return !TT.isOSVersionLT(7, 0); 112 // Any other darwin such as WatchOS/TvOS is new enough. 113 return true; 114 } 115 116 void TargetLoweringBase::InitLibcalls(const Triple &TT) { 117 #define HANDLE_LIBCALL(code, name) \ 118 setLibcallName(RTLIB::code, name); 119 #include "llvm/IR/RuntimeLibcalls.def" 120 #undef HANDLE_LIBCALL 121 // Initialize calling conventions to their default. 122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC) 123 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); 124 125 // Use the f128 variants of math functions on x86_64 126 if (TT.getArch() == Triple::ArchType::x86_64 && TT.isGNUEnvironment()) { 127 setLibcallName(RTLIB::REM_F128, "fmodf128"); 128 setLibcallName(RTLIB::FMA_F128, "fmaf128"); 129 setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); 130 setLibcallName(RTLIB::CBRT_F128, "cbrtf128"); 131 setLibcallName(RTLIB::LOG_F128, "logf128"); 132 setLibcallName(RTLIB::LOG_FINITE_F128, "__logf128_finite"); 133 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 134 setLibcallName(RTLIB::LOG2_FINITE_F128, "__log2f128_finite"); 135 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 136 setLibcallName(RTLIB::LOG10_FINITE_F128, "__log10f128_finite"); 137 setLibcallName(RTLIB::EXP_F128, "expf128"); 138 setLibcallName(RTLIB::EXP_FINITE_F128, "__expf128_finite"); 139 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 140 setLibcallName(RTLIB::EXP2_FINITE_F128, "__exp2f128_finite"); 141 setLibcallName(RTLIB::EXP10_F128, "exp10f128"); 142 setLibcallName(RTLIB::SIN_F128, "sinf128"); 143 setLibcallName(RTLIB::COS_F128, "cosf128"); 144 setLibcallName(RTLIB::TAN_F128, "tanf128"); 145 setLibcallName(RTLIB::SINCOS_F128, "sincosf128"); 146 setLibcallName(RTLIB::POW_F128, "powf128"); 147 setLibcallName(RTLIB::POW_FINITE_F128, "__powf128_finite"); 148 setLibcallName(RTLIB::CEIL_F128, "ceilf128"); 149 setLibcallName(RTLIB::TRUNC_F128, "truncf128"); 150 setLibcallName(RTLIB::RINT_F128, "rintf128"); 151 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); 152 setLibcallName(RTLIB::ROUND_F128, "roundf128"); 153 setLibcallName(RTLIB::ROUNDEVEN_F128, "roundevenf128"); 154 setLibcallName(RTLIB::FLOOR_F128, "floorf128"); 155 setLibcallName(RTLIB::COPYSIGN_F128, "copysignf128"); 156 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 157 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 158 setLibcallName(RTLIB::LROUND_F128, "lroundf128"); 159 setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); 160 setLibcallName(RTLIB::LRINT_F128, "lrintf128"); 161 setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); 162 setLibcallName(RTLIB::LDEXP_F128, "ldexpf128"); 163 setLibcallName(RTLIB::FREXP_F128, "frexpf128"); 164 } 165 166 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". 167 if (TT.isPPC()) { 168 setLibcallName(RTLIB::ADD_F128, "__addkf3"); 169 setLibcallName(RTLIB::SUB_F128, "__subkf3"); 170 setLibcallName(RTLIB::MUL_F128, "__mulkf3"); 171 setLibcallName(RTLIB::DIV_F128, "__divkf3"); 172 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 173 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); 174 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); 175 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); 176 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); 177 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); 178 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); 179 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); 180 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); 181 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); 182 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); 183 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); 184 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); 185 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); 186 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); 187 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); 188 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); 189 setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); 190 setLibcallName(RTLIB::UNE_F128, "__nekf2"); 191 setLibcallName(RTLIB::OGE_F128, "__gekf2"); 192 setLibcallName(RTLIB::OLT_F128, "__ltkf2"); 193 setLibcallName(RTLIB::OLE_F128, "__lekf2"); 194 setLibcallName(RTLIB::OGT_F128, "__gtkf2"); 195 setLibcallName(RTLIB::UO_F128, "__unordkf2"); 196 } 197 198 // A few names are different on particular architectures or environments. 199 if (TT.isOSDarwin()) { 200 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead 201 // of the gnueabi-style __gnu_*_ieee. 202 // FIXME: What about other targets? 203 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 204 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 205 206 // Some darwins have an optimized __bzero/bzero function. 207 switch (TT.getArch()) { 208 case Triple::x86: 209 case Triple::x86_64: 210 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6)) 211 setLibcallName(RTLIB::BZERO, "__bzero"); 212 break; 213 case Triple::aarch64: 214 case Triple::aarch64_32: 215 setLibcallName(RTLIB::BZERO, "bzero"); 216 break; 217 default: 218 break; 219 } 220 221 if (darwinHasSinCos(TT)) { 222 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret"); 223 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret"); 224 if (TT.isWatchABI()) { 225 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32, 226 CallingConv::ARM_AAPCS_VFP); 227 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64, 228 CallingConv::ARM_AAPCS_VFP); 229 } 230 } 231 232 switch (TT.getOS()) { 233 case Triple::MacOSX: 234 if (TT.isMacOSXVersionLT(10, 9)) { 235 setLibcallName(RTLIB::EXP10_F32, nullptr); 236 setLibcallName(RTLIB::EXP10_F64, nullptr); 237 } else { 238 setLibcallName(RTLIB::EXP10_F32, "__exp10f"); 239 setLibcallName(RTLIB::EXP10_F64, "__exp10"); 240 } 241 break; 242 case Triple::IOS: 243 case Triple::TvOS: 244 case Triple::WatchOS: 245 case Triple::XROS: 246 if (!TT.isWatchOS() && 247 (TT.isOSVersionLT(7, 0) || (TT.isOSVersionLT(9, 0) && TT.isX86()))) { 248 setLibcallName(RTLIB::EXP10_F32, nullptr); 249 setLibcallName(RTLIB::EXP10_F64, nullptr); 250 } else { 251 setLibcallName(RTLIB::EXP10_F32, "__exp10f"); 252 setLibcallName(RTLIB::EXP10_F64, "__exp10"); 253 } 254 255 break; 256 default: 257 break; 258 } 259 } else { 260 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee"); 261 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee"); 262 } 263 264 if (TT.isGNUEnvironment() || TT.isOSFuchsia() || 265 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) { 266 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 267 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 268 setLibcallName(RTLIB::SINCOS_F80, "sincosl"); 269 setLibcallName(RTLIB::SINCOS_F128, "sincosl"); 270 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); 271 } 272 273 if (TT.isPS()) { 274 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 275 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 276 } 277 278 if (TT.isOSOpenBSD()) { 279 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); 280 } 281 282 if (TT.isOSWindows() && !TT.isOSCygMing()) { 283 setLibcallName(RTLIB::LDEXP_F32, nullptr); 284 setLibcallName(RTLIB::LDEXP_F80, nullptr); 285 setLibcallName(RTLIB::LDEXP_F128, nullptr); 286 setLibcallName(RTLIB::LDEXP_PPCF128, nullptr); 287 288 setLibcallName(RTLIB::FREXP_F32, nullptr); 289 setLibcallName(RTLIB::FREXP_F80, nullptr); 290 setLibcallName(RTLIB::FREXP_F128, nullptr); 291 setLibcallName(RTLIB::FREXP_PPCF128, nullptr); 292 } 293 } 294 295 /// GetFPLibCall - Helper to return the right libcall for the given floating 296 /// point type, or UNKNOWN_LIBCALL if there is none. 297 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, 298 RTLIB::Libcall Call_F32, 299 RTLIB::Libcall Call_F64, 300 RTLIB::Libcall Call_F80, 301 RTLIB::Libcall Call_F128, 302 RTLIB::Libcall Call_PPCF128) { 303 return 304 VT == MVT::f32 ? Call_F32 : 305 VT == MVT::f64 ? Call_F64 : 306 VT == MVT::f80 ? Call_F80 : 307 VT == MVT::f128 ? Call_F128 : 308 VT == MVT::ppcf128 ? Call_PPCF128 : 309 RTLIB::UNKNOWN_LIBCALL; 310 } 311 312 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 313 /// UNKNOWN_LIBCALL if there is none. 314 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 315 if (OpVT == MVT::f16) { 316 if (RetVT == MVT::f32) 317 return FPEXT_F16_F32; 318 if (RetVT == MVT::f64) 319 return FPEXT_F16_F64; 320 if (RetVT == MVT::f80) 321 return FPEXT_F16_F80; 322 if (RetVT == MVT::f128) 323 return FPEXT_F16_F128; 324 } else if (OpVT == MVT::f32) { 325 if (RetVT == MVT::f64) 326 return FPEXT_F32_F64; 327 if (RetVT == MVT::f128) 328 return FPEXT_F32_F128; 329 if (RetVT == MVT::ppcf128) 330 return FPEXT_F32_PPCF128; 331 } else if (OpVT == MVT::f64) { 332 if (RetVT == MVT::f128) 333 return FPEXT_F64_F128; 334 else if (RetVT == MVT::ppcf128) 335 return FPEXT_F64_PPCF128; 336 } else if (OpVT == MVT::f80) { 337 if (RetVT == MVT::f128) 338 return FPEXT_F80_F128; 339 } else if (OpVT == MVT::bf16) { 340 if (RetVT == MVT::f32) 341 return FPEXT_BF16_F32; 342 } 343 344 return UNKNOWN_LIBCALL; 345 } 346 347 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 348 /// UNKNOWN_LIBCALL if there is none. 349 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 350 if (RetVT == MVT::f16) { 351 if (OpVT == MVT::f32) 352 return FPROUND_F32_F16; 353 if (OpVT == MVT::f64) 354 return FPROUND_F64_F16; 355 if (OpVT == MVT::f80) 356 return FPROUND_F80_F16; 357 if (OpVT == MVT::f128) 358 return FPROUND_F128_F16; 359 if (OpVT == MVT::ppcf128) 360 return FPROUND_PPCF128_F16; 361 } else if (RetVT == MVT::bf16) { 362 if (OpVT == MVT::f32) 363 return FPROUND_F32_BF16; 364 if (OpVT == MVT::f64) 365 return FPROUND_F64_BF16; 366 } else if (RetVT == MVT::f32) { 367 if (OpVT == MVT::f64) 368 return FPROUND_F64_F32; 369 if (OpVT == MVT::f80) 370 return FPROUND_F80_F32; 371 if (OpVT == MVT::f128) 372 return FPROUND_F128_F32; 373 if (OpVT == MVT::ppcf128) 374 return FPROUND_PPCF128_F32; 375 } else if (RetVT == MVT::f64) { 376 if (OpVT == MVT::f80) 377 return FPROUND_F80_F64; 378 if (OpVT == MVT::f128) 379 return FPROUND_F128_F64; 380 if (OpVT == MVT::ppcf128) 381 return FPROUND_PPCF128_F64; 382 } else if (RetVT == MVT::f80) { 383 if (OpVT == MVT::f128) 384 return FPROUND_F128_F80; 385 } 386 387 return UNKNOWN_LIBCALL; 388 } 389 390 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 391 /// UNKNOWN_LIBCALL if there is none. 392 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 393 if (OpVT == MVT::f16) { 394 if (RetVT == MVT::i32) 395 return FPTOSINT_F16_I32; 396 if (RetVT == MVT::i64) 397 return FPTOSINT_F16_I64; 398 if (RetVT == MVT::i128) 399 return FPTOSINT_F16_I128; 400 } else if (OpVT == MVT::f32) { 401 if (RetVT == MVT::i32) 402 return FPTOSINT_F32_I32; 403 if (RetVT == MVT::i64) 404 return FPTOSINT_F32_I64; 405 if (RetVT == MVT::i128) 406 return FPTOSINT_F32_I128; 407 } else if (OpVT == MVT::f64) { 408 if (RetVT == MVT::i32) 409 return FPTOSINT_F64_I32; 410 if (RetVT == MVT::i64) 411 return FPTOSINT_F64_I64; 412 if (RetVT == MVT::i128) 413 return FPTOSINT_F64_I128; 414 } else if (OpVT == MVT::f80) { 415 if (RetVT == MVT::i32) 416 return FPTOSINT_F80_I32; 417 if (RetVT == MVT::i64) 418 return FPTOSINT_F80_I64; 419 if (RetVT == MVT::i128) 420 return FPTOSINT_F80_I128; 421 } else if (OpVT == MVT::f128) { 422 if (RetVT == MVT::i32) 423 return FPTOSINT_F128_I32; 424 if (RetVT == MVT::i64) 425 return FPTOSINT_F128_I64; 426 if (RetVT == MVT::i128) 427 return FPTOSINT_F128_I128; 428 } else if (OpVT == MVT::ppcf128) { 429 if (RetVT == MVT::i32) 430 return FPTOSINT_PPCF128_I32; 431 if (RetVT == MVT::i64) 432 return FPTOSINT_PPCF128_I64; 433 if (RetVT == MVT::i128) 434 return FPTOSINT_PPCF128_I128; 435 } 436 return UNKNOWN_LIBCALL; 437 } 438 439 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 440 /// UNKNOWN_LIBCALL if there is none. 441 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 442 if (OpVT == MVT::f16) { 443 if (RetVT == MVT::i32) 444 return FPTOUINT_F16_I32; 445 if (RetVT == MVT::i64) 446 return FPTOUINT_F16_I64; 447 if (RetVT == MVT::i128) 448 return FPTOUINT_F16_I128; 449 } else if (OpVT == MVT::f32) { 450 if (RetVT == MVT::i32) 451 return FPTOUINT_F32_I32; 452 if (RetVT == MVT::i64) 453 return FPTOUINT_F32_I64; 454 if (RetVT == MVT::i128) 455 return FPTOUINT_F32_I128; 456 } else if (OpVT == MVT::f64) { 457 if (RetVT == MVT::i32) 458 return FPTOUINT_F64_I32; 459 if (RetVT == MVT::i64) 460 return FPTOUINT_F64_I64; 461 if (RetVT == MVT::i128) 462 return FPTOUINT_F64_I128; 463 } else if (OpVT == MVT::f80) { 464 if (RetVT == MVT::i32) 465 return FPTOUINT_F80_I32; 466 if (RetVT == MVT::i64) 467 return FPTOUINT_F80_I64; 468 if (RetVT == MVT::i128) 469 return FPTOUINT_F80_I128; 470 } else if (OpVT == MVT::f128) { 471 if (RetVT == MVT::i32) 472 return FPTOUINT_F128_I32; 473 if (RetVT == MVT::i64) 474 return FPTOUINT_F128_I64; 475 if (RetVT == MVT::i128) 476 return FPTOUINT_F128_I128; 477 } else if (OpVT == MVT::ppcf128) { 478 if (RetVT == MVT::i32) 479 return FPTOUINT_PPCF128_I32; 480 if (RetVT == MVT::i64) 481 return FPTOUINT_PPCF128_I64; 482 if (RetVT == MVT::i128) 483 return FPTOUINT_PPCF128_I128; 484 } 485 return UNKNOWN_LIBCALL; 486 } 487 488 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 489 /// UNKNOWN_LIBCALL if there is none. 490 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 491 if (OpVT == MVT::i32) { 492 if (RetVT == MVT::f16) 493 return SINTTOFP_I32_F16; 494 if (RetVT == MVT::f32) 495 return SINTTOFP_I32_F32; 496 if (RetVT == MVT::f64) 497 return SINTTOFP_I32_F64; 498 if (RetVT == MVT::f80) 499 return SINTTOFP_I32_F80; 500 if (RetVT == MVT::f128) 501 return SINTTOFP_I32_F128; 502 if (RetVT == MVT::ppcf128) 503 return SINTTOFP_I32_PPCF128; 504 } else if (OpVT == MVT::i64) { 505 if (RetVT == MVT::f16) 506 return SINTTOFP_I64_F16; 507 if (RetVT == MVT::f32) 508 return SINTTOFP_I64_F32; 509 if (RetVT == MVT::f64) 510 return SINTTOFP_I64_F64; 511 if (RetVT == MVT::f80) 512 return SINTTOFP_I64_F80; 513 if (RetVT == MVT::f128) 514 return SINTTOFP_I64_F128; 515 if (RetVT == MVT::ppcf128) 516 return SINTTOFP_I64_PPCF128; 517 } else if (OpVT == MVT::i128) { 518 if (RetVT == MVT::f16) 519 return SINTTOFP_I128_F16; 520 if (RetVT == MVT::f32) 521 return SINTTOFP_I128_F32; 522 if (RetVT == MVT::f64) 523 return SINTTOFP_I128_F64; 524 if (RetVT == MVT::f80) 525 return SINTTOFP_I128_F80; 526 if (RetVT == MVT::f128) 527 return SINTTOFP_I128_F128; 528 if (RetVT == MVT::ppcf128) 529 return SINTTOFP_I128_PPCF128; 530 } 531 return UNKNOWN_LIBCALL; 532 } 533 534 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 535 /// UNKNOWN_LIBCALL if there is none. 536 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 537 if (OpVT == MVT::i32) { 538 if (RetVT == MVT::f16) 539 return UINTTOFP_I32_F16; 540 if (RetVT == MVT::f32) 541 return UINTTOFP_I32_F32; 542 if (RetVT == MVT::f64) 543 return UINTTOFP_I32_F64; 544 if (RetVT == MVT::f80) 545 return UINTTOFP_I32_F80; 546 if (RetVT == MVT::f128) 547 return UINTTOFP_I32_F128; 548 if (RetVT == MVT::ppcf128) 549 return UINTTOFP_I32_PPCF128; 550 } else if (OpVT == MVT::i64) { 551 if (RetVT == MVT::f16) 552 return UINTTOFP_I64_F16; 553 if (RetVT == MVT::f32) 554 return UINTTOFP_I64_F32; 555 if (RetVT == MVT::f64) 556 return UINTTOFP_I64_F64; 557 if (RetVT == MVT::f80) 558 return UINTTOFP_I64_F80; 559 if (RetVT == MVT::f128) 560 return UINTTOFP_I64_F128; 561 if (RetVT == MVT::ppcf128) 562 return UINTTOFP_I64_PPCF128; 563 } else if (OpVT == MVT::i128) { 564 if (RetVT == MVT::f16) 565 return UINTTOFP_I128_F16; 566 if (RetVT == MVT::f32) 567 return UINTTOFP_I128_F32; 568 if (RetVT == MVT::f64) 569 return UINTTOFP_I128_F64; 570 if (RetVT == MVT::f80) 571 return UINTTOFP_I128_F80; 572 if (RetVT == MVT::f128) 573 return UINTTOFP_I128_F128; 574 if (RetVT == MVT::ppcf128) 575 return UINTTOFP_I128_PPCF128; 576 } 577 return UNKNOWN_LIBCALL; 578 } 579 580 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { 581 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, 582 POWI_PPCF128); 583 } 584 585 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) { 586 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128, 587 LDEXP_PPCF128); 588 } 589 590 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) { 591 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128, 592 FREXP_PPCF128); 593 } 594 595 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4], 596 AtomicOrdering Order, 597 uint64_t MemSize) { 598 unsigned ModeN, ModelN; 599 switch (MemSize) { 600 case 1: 601 ModeN = 0; 602 break; 603 case 2: 604 ModeN = 1; 605 break; 606 case 4: 607 ModeN = 2; 608 break; 609 case 8: 610 ModeN = 3; 611 break; 612 case 16: 613 ModeN = 4; 614 break; 615 default: 616 return RTLIB::UNKNOWN_LIBCALL; 617 } 618 619 switch (Order) { 620 case AtomicOrdering::Monotonic: 621 ModelN = 0; 622 break; 623 case AtomicOrdering::Acquire: 624 ModelN = 1; 625 break; 626 case AtomicOrdering::Release: 627 ModelN = 2; 628 break; 629 case AtomicOrdering::AcquireRelease: 630 case AtomicOrdering::SequentiallyConsistent: 631 ModelN = 3; 632 break; 633 default: 634 return UNKNOWN_LIBCALL; 635 } 636 637 return LC[ModeN][ModelN]; 638 } 639 640 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, 641 MVT VT) { 642 if (!VT.isScalarInteger()) 643 return UNKNOWN_LIBCALL; 644 uint64_t MemSize = VT.getScalarSizeInBits() / 8; 645 646 #define LCALLS(A, B) \ 647 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } 648 #define LCALL5(A) \ 649 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) 650 switch (Opc) { 651 case ISD::ATOMIC_CMP_SWAP: { 652 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; 653 return getOutlineAtomicHelper(LC, Order, MemSize); 654 } 655 case ISD::ATOMIC_SWAP: { 656 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; 657 return getOutlineAtomicHelper(LC, Order, MemSize); 658 } 659 case ISD::ATOMIC_LOAD_ADD: { 660 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; 661 return getOutlineAtomicHelper(LC, Order, MemSize); 662 } 663 case ISD::ATOMIC_LOAD_OR: { 664 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; 665 return getOutlineAtomicHelper(LC, Order, MemSize); 666 } 667 case ISD::ATOMIC_LOAD_CLR: { 668 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; 669 return getOutlineAtomicHelper(LC, Order, MemSize); 670 } 671 case ISD::ATOMIC_LOAD_XOR: { 672 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; 673 return getOutlineAtomicHelper(LC, Order, MemSize); 674 } 675 default: 676 return UNKNOWN_LIBCALL; 677 } 678 #undef LCALLS 679 #undef LCALL5 680 } 681 682 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 683 #define OP_TO_LIBCALL(Name, Enum) \ 684 case Name: \ 685 switch (VT.SimpleTy) { \ 686 default: \ 687 return UNKNOWN_LIBCALL; \ 688 case MVT::i8: \ 689 return Enum##_1; \ 690 case MVT::i16: \ 691 return Enum##_2; \ 692 case MVT::i32: \ 693 return Enum##_4; \ 694 case MVT::i64: \ 695 return Enum##_8; \ 696 case MVT::i128: \ 697 return Enum##_16; \ 698 } 699 700 switch (Opc) { 701 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 702 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 703 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 704 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 705 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 706 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 707 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 708 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 709 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 710 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 711 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 712 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 713 } 714 715 #undef OP_TO_LIBCALL 716 717 return UNKNOWN_LIBCALL; 718 } 719 720 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 721 switch (ElementSize) { 722 case 1: 723 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 724 case 2: 725 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 726 case 4: 727 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 728 case 8: 729 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 730 case 16: 731 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 732 default: 733 return UNKNOWN_LIBCALL; 734 } 735 } 736 737 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 738 switch (ElementSize) { 739 case 1: 740 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 741 case 2: 742 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 743 case 4: 744 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 745 case 8: 746 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 747 case 16: 748 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 749 default: 750 return UNKNOWN_LIBCALL; 751 } 752 } 753 754 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 755 switch (ElementSize) { 756 case 1: 757 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 758 case 2: 759 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 760 case 4: 761 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 762 case 8: 763 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 764 case 16: 765 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 766 default: 767 return UNKNOWN_LIBCALL; 768 } 769 } 770 771 /// InitCmpLibcallCCs - Set default comparison libcall CC. 772 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 773 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID); 774 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 775 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 776 CCs[RTLIB::OEQ_F128] = ISD::SETEQ; 777 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ; 778 CCs[RTLIB::UNE_F32] = ISD::SETNE; 779 CCs[RTLIB::UNE_F64] = ISD::SETNE; 780 CCs[RTLIB::UNE_F128] = ISD::SETNE; 781 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE; 782 CCs[RTLIB::OGE_F32] = ISD::SETGE; 783 CCs[RTLIB::OGE_F64] = ISD::SETGE; 784 CCs[RTLIB::OGE_F128] = ISD::SETGE; 785 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE; 786 CCs[RTLIB::OLT_F32] = ISD::SETLT; 787 CCs[RTLIB::OLT_F64] = ISD::SETLT; 788 CCs[RTLIB::OLT_F128] = ISD::SETLT; 789 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT; 790 CCs[RTLIB::OLE_F32] = ISD::SETLE; 791 CCs[RTLIB::OLE_F64] = ISD::SETLE; 792 CCs[RTLIB::OLE_F128] = ISD::SETLE; 793 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE; 794 CCs[RTLIB::OGT_F32] = ISD::SETGT; 795 CCs[RTLIB::OGT_F64] = ISD::SETGT; 796 CCs[RTLIB::OGT_F128] = ISD::SETGT; 797 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT; 798 CCs[RTLIB::UO_F32] = ISD::SETNE; 799 CCs[RTLIB::UO_F64] = ISD::SETNE; 800 CCs[RTLIB::UO_F128] = ISD::SETNE; 801 CCs[RTLIB::UO_PPCF128] = ISD::SETNE; 802 } 803 804 /// NOTE: The TargetMachine owns TLOF. 805 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { 806 initActions(); 807 808 // Perform these initializations only once. 809 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 810 MaxLoadsPerMemcmp = 8; 811 MaxGluedStoresPerMemcpy = 0; 812 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 813 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 814 HasMultipleConditionRegisters = false; 815 HasExtractBitsInsn = false; 816 JumpIsExpensive = JumpIsExpensiveOverride; 817 PredictableSelectIsExpensive = false; 818 EnableExtLdPromotion = false; 819 StackPointerRegisterToSaveRestore = 0; 820 BooleanContents = UndefinedBooleanContent; 821 BooleanFloatContents = UndefinedBooleanContent; 822 BooleanVectorContents = UndefinedBooleanContent; 823 SchedPreferenceInfo = Sched::ILP; 824 GatherAllAliasesMaxDepth = 18; 825 IsStrictFPEnabled = DisableStrictNodeMutation; 826 MaxBytesForAlignment = 0; 827 MaxAtomicSizeInBitsSupported = 0; 828 829 // Assume that even with libcalls, no target supports wider than 128 bit 830 // division. 831 MaxDivRemBitWidthSupported = 128; 832 833 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS; 834 835 MinCmpXchgSizeInBits = 0; 836 SupportsUnalignedAtomics = false; 837 838 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); 839 840 InitLibcalls(TM.getTargetTriple()); 841 InitCmpLibcallCCs(CmpLibcallCCs); 842 } 843 844 void TargetLoweringBase::initActions() { 845 // All operations default to being supported. 846 memset(OpActions, 0, sizeof(OpActions)); 847 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 848 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 849 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 850 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 851 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr); 852 std::fill(std::begin(TargetDAGCombineArray), 853 std::end(TargetDAGCombineArray), 0); 854 855 // Let extending atomic loads be unsupported by default. 856 for (MVT ValVT : MVT::all_valuetypes()) 857 for (MVT MemVT : MVT::all_valuetypes()) 858 setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT, MemVT, 859 Expand); 860 861 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to 862 // remove this and targets should individually set these types if not legal. 863 for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END, 864 force_iteration_on_noniterable_enum)) { 865 for (MVT VT : {MVT::i2, MVT::i4}) 866 OpActions[(unsigned)VT.SimpleTy][NT] = Expand; 867 } 868 for (MVT AVT : MVT::all_valuetypes()) { 869 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) { 870 setTruncStoreAction(AVT, VT, Expand); 871 setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand); 872 setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand); 873 } 874 } 875 for (unsigned IM = (unsigned)ISD::PRE_INC; 876 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 877 for (MVT VT : {MVT::i2, MVT::i4}) { 878 setIndexedLoadAction(IM, VT, Expand); 879 setIndexedStoreAction(IM, VT, Expand); 880 setIndexedMaskedLoadAction(IM, VT, Expand); 881 setIndexedMaskedStoreAction(IM, VT, Expand); 882 } 883 } 884 885 for (MVT VT : MVT::fp_valuetypes()) { 886 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); 887 if (IntVT.isValid()) { 888 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 889 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 890 } 891 } 892 893 // Set default actions for various operations. 894 for (MVT VT : MVT::all_valuetypes()) { 895 // Default all indexed load / store to expand. 896 for (unsigned IM = (unsigned)ISD::PRE_INC; 897 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 898 setIndexedLoadAction(IM, VT, Expand); 899 setIndexedStoreAction(IM, VT, Expand); 900 setIndexedMaskedLoadAction(IM, VT, Expand); 901 setIndexedMaskedStoreAction(IM, VT, Expand); 902 } 903 904 // Most backends expect to see the node which just returns the value loaded. 905 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 906 907 // These operations default to expand. 908 setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, 909 ISD::FMINNUM, ISD::FMAXNUM, 910 ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, 911 ISD::FMINIMUM, ISD::FMAXIMUM, 912 ISD::FMAD, ISD::SMIN, 913 ISD::SMAX, ISD::UMIN, 914 ISD::UMAX, ISD::ABS, 915 ISD::FSHL, ISD::FSHR, 916 ISD::SADDSAT, ISD::UADDSAT, 917 ISD::SSUBSAT, ISD::USUBSAT, 918 ISD::SSHLSAT, ISD::USHLSAT, 919 ISD::SMULFIX, ISD::SMULFIXSAT, 920 ISD::UMULFIX, ISD::UMULFIXSAT, 921 ISD::SDIVFIX, ISD::SDIVFIXSAT, 922 ISD::UDIVFIX, ISD::UDIVFIXSAT, 923 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 924 ISD::IS_FPCLASS}, 925 VT, Expand); 926 927 // Overflow operations default to expand 928 setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, 929 ISD::SMULO, ISD::UMULO}, 930 VT, Expand); 931 932 // Carry-using overflow operations default to expand. 933 setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY, 934 ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, 935 VT, Expand); 936 937 // ADDC/ADDE/SUBC/SUBE default to expand. 938 setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, 939 Expand); 940 941 // [US]CMP default to expand 942 setOperationAction({ISD::UCMP, ISD::SCMP}, VT, Expand); 943 944 // Halving adds 945 setOperationAction( 946 {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, 947 Expand); 948 949 // Absolute difference 950 setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); 951 952 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 953 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 954 Expand); 955 956 setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); 957 958 // These library functions default to expand. 959 setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP}, VT, 960 Expand); 961 962 // These operations default to expand for vector types. 963 if (VT.isVector()) 964 setOperationAction( 965 {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG, 966 ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG, 967 ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT, ISD::FTAN}, 968 VT, Expand); 969 970 // Constrained floating-point operations default to expand. 971 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 972 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 973 #include "llvm/IR/ConstrainedOps.def" 974 975 // For most targets @llvm.get.dynamic.area.offset just returns 0. 976 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 977 978 // Vector reduction default to expand. 979 setOperationAction( 980 {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, 981 ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, 982 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 983 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, 984 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM, 985 ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, 986 VT, Expand); 987 988 // Named vector shuffles default to expand. 989 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); 990 991 // VP operations default to expand. 992 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ 993 setOperationAction(ISD::SDOPC, VT, Expand); 994 #include "llvm/IR/VPIntrinsics.def" 995 996 // FP environment operations default to expand. 997 setOperationAction(ISD::GET_FPENV, VT, Expand); 998 setOperationAction(ISD::SET_FPENV, VT, Expand); 999 setOperationAction(ISD::RESET_FPENV, VT, Expand); 1000 } 1001 1002 // Most targets ignore the @llvm.prefetch intrinsic. 1003 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 1004 1005 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 1006 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 1007 1008 // Most targets also ignore the @llvm.readsteadycounter intrinsic. 1009 setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Expand); 1010 1011 // ConstantFP nodes default to expand. Targets can either change this to 1012 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 1013 // to optimize expansions for certain constants. 1014 setOperationAction(ISD::ConstantFP, 1015 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, 1016 Expand); 1017 1018 // These library functions default to expand. 1019 setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, 1020 ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, ISD::FNEARBYINT, 1021 ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, ISD::LROUND, 1022 ISD::LLROUND, ISD::LRINT, ISD::LLRINT, ISD::FROUNDEVEN, 1023 ISD::FTAN}, 1024 {MVT::f32, MVT::f64, MVT::f128}, Expand); 1025 1026 setOperationAction(ISD::FTAN, MVT::f16, Promote); 1027 // Default ISD::TRAP to expand (which turns it into abort). 1028 setOperationAction(ISD::TRAP, MVT::Other, Expand); 1029 1030 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 1031 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 1032 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 1033 1034 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); 1035 1036 setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand); 1037 setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand); 1038 1039 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { 1040 setOperationAction(ISD::GET_FPMODE, VT, Expand); 1041 setOperationAction(ISD::SET_FPMODE, VT, Expand); 1042 } 1043 setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand); 1044 1045 // This one by default will call __clear_cache unless the target 1046 // wants something different. 1047 setOperationAction(ISD::CLEAR_CACHE, MVT::Other, LibCall); 1048 } 1049 1050 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 1051 EVT) const { 1052 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 1053 } 1054 1055 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, 1056 const DataLayout &DL) const { 1057 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 1058 if (LHSTy.isVector()) 1059 return LHSTy; 1060 MVT ShiftVT = getScalarShiftAmountTy(DL, LHSTy); 1061 // If any possible shift value won't fit in the prefered type, just use 1062 // something safe. Assume it will be legalized when the shift is expanded. 1063 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) 1064 ShiftVT = MVT::i32; 1065 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && 1066 "ShiftVT is still too small!"); 1067 return ShiftVT; 1068 } 1069 1070 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 1071 assert(isTypeLegal(VT)); 1072 switch (Op) { 1073 default: 1074 return false; 1075 case ISD::SDIV: 1076 case ISD::UDIV: 1077 case ISD::SREM: 1078 case ISD::UREM: 1079 return true; 1080 } 1081 } 1082 1083 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, 1084 unsigned DestAS) const { 1085 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 1086 } 1087 1088 unsigned TargetLoweringBase::getBitWidthForCttzElements( 1089 Type *RetTy, ElementCount EC, bool ZeroIsPoison, 1090 const ConstantRange *VScaleRange) const { 1091 // Find the smallest "sensible" element type to use for the expansion. 1092 ConstantRange CR(APInt(64, EC.getKnownMinValue())); 1093 if (EC.isScalable()) 1094 CR = CR.umul_sat(*VScaleRange); 1095 1096 if (ZeroIsPoison) 1097 CR = CR.subtract(APInt(64, 1)); 1098 1099 unsigned EltWidth = RetTy->getScalarSizeInBits(); 1100 EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits()); 1101 EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8); 1102 1103 return EltWidth; 1104 } 1105 1106 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 1107 // If the command-line option was specified, ignore this request. 1108 if (!JumpIsExpensiveOverride.getNumOccurrences()) 1109 JumpIsExpensive = isExpensive; 1110 } 1111 1112 TargetLoweringBase::LegalizeKind 1113 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 1114 // If this is a simple type, use the ComputeRegisterProp mechanism. 1115 if (VT.isSimple()) { 1116 MVT SVT = VT.getSimpleVT(); 1117 assert((unsigned)SVT.SimpleTy < std::size(TransformToType)); 1118 MVT NVT = TransformToType[SVT.SimpleTy]; 1119 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 1120 1121 assert((LA == TypeLegal || LA == TypeSoftenFloat || 1122 LA == TypeSoftPromoteHalf || 1123 (NVT.isVector() || 1124 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 1125 "Promote may not follow Expand or Promote"); 1126 1127 if (LA == TypeSplitVector) 1128 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); 1129 if (LA == TypeScalarizeVector) 1130 return LegalizeKind(LA, SVT.getVectorElementType()); 1131 return LegalizeKind(LA, NVT); 1132 } 1133 1134 // Handle Extended Scalar Types. 1135 if (!VT.isVector()) { 1136 assert(VT.isInteger() && "Float types must be simple"); 1137 unsigned BitSize = VT.getSizeInBits(); 1138 // First promote to a power-of-two size, then expand if necessary. 1139 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1140 EVT NVT = VT.getRoundIntegerType(Context); 1141 assert(NVT != VT && "Unable to round integer VT"); 1142 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1143 // Avoid multi-step promotion. 1144 if (NextStep.first == TypePromoteInteger) 1145 return NextStep; 1146 // Return rounded integer type. 1147 return LegalizeKind(TypePromoteInteger, NVT); 1148 } 1149 1150 return LegalizeKind(TypeExpandInteger, 1151 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 1152 } 1153 1154 // Handle vector types. 1155 ElementCount NumElts = VT.getVectorElementCount(); 1156 EVT EltVT = VT.getVectorElementType(); 1157 1158 // Vectors with only one element are always scalarized. 1159 if (NumElts.isScalar()) 1160 return LegalizeKind(TypeScalarizeVector, EltVT); 1161 1162 // Try to widen vector elements until the element type is a power of two and 1163 // promote it to a legal type later on, for example: 1164 // <3 x i8> -> <4 x i8> -> <4 x i32> 1165 if (EltVT.isInteger()) { 1166 // Vectors with a number of elements that is not a power of two are always 1167 // widened, for example <3 x i8> -> <4 x i8>. 1168 if (!VT.isPow2VectorType()) { 1169 NumElts = NumElts.coefficientNextPowerOf2(); 1170 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1171 return LegalizeKind(TypeWidenVector, NVT); 1172 } 1173 1174 // Examine the element type. 1175 LegalizeKind LK = getTypeConversion(Context, EltVT); 1176 1177 // If type is to be expanded, split the vector. 1178 // <4 x i140> -> <2 x i140> 1179 if (LK.first == TypeExpandInteger) { 1180 if (VT.getVectorElementCount().isScalable()) 1181 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1182 return LegalizeKind(TypeSplitVector, 1183 VT.getHalfNumVectorElementsVT(Context)); 1184 } 1185 1186 // Promote the integer element types until a legal vector type is found 1187 // or until the element integer type is too big. If a legal type was not 1188 // found, fallback to the usual mechanism of widening/splitting the 1189 // vector. 1190 EVT OldEltVT = EltVT; 1191 while (true) { 1192 // Increase the bitwidth of the element to the next pow-of-two 1193 // (which is greater than 8 bits). 1194 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 1195 .getRoundIntegerType(Context); 1196 1197 // Stop trying when getting a non-simple element type. 1198 // Note that vector elements may be greater than legal vector element 1199 // types. Example: X86 XMM registers hold 64bit element on 32bit 1200 // systems. 1201 if (!EltVT.isSimple()) 1202 break; 1203 1204 // Build a new vector type and check if it is legal. 1205 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1206 // Found a legal promoted vector type. 1207 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1208 return LegalizeKind(TypePromoteInteger, 1209 EVT::getVectorVT(Context, EltVT, NumElts)); 1210 } 1211 1212 // Reset the type to the unexpanded type if we did not find a legal vector 1213 // type with a promoted vector element type. 1214 EltVT = OldEltVT; 1215 } 1216 1217 // Try to widen the vector until a legal type is found. 1218 // If there is no wider legal type, split the vector. 1219 while (true) { 1220 // Round up to the next power of 2. 1221 NumElts = NumElts.coefficientNextPowerOf2(); 1222 1223 // If there is no simple vector type with this many elements then there 1224 // cannot be a larger legal vector type. Note that this assumes that 1225 // there are no skipped intermediate vector types in the simple types. 1226 if (!EltVT.isSimple()) 1227 break; 1228 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1229 if (LargerVector == MVT()) 1230 break; 1231 1232 // If this type is legal then widen the vector. 1233 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1234 return LegalizeKind(TypeWidenVector, LargerVector); 1235 } 1236 1237 // Widen odd vectors to next power of two. 1238 if (!VT.isPow2VectorType()) { 1239 EVT NVT = VT.getPow2VectorType(Context); 1240 return LegalizeKind(TypeWidenVector, NVT); 1241 } 1242 1243 if (VT.getVectorElementCount() == ElementCount::getScalable(1)) 1244 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1245 1246 // Vectors with illegal element types are expanded. 1247 EVT NVT = EVT::getVectorVT(Context, EltVT, 1248 VT.getVectorElementCount().divideCoefficientBy(2)); 1249 return LegalizeKind(TypeSplitVector, NVT); 1250 } 1251 1252 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 1253 unsigned &NumIntermediates, 1254 MVT &RegisterVT, 1255 TargetLoweringBase *TLI) { 1256 // Figure out the right, legal destination reg to copy into. 1257 ElementCount EC = VT.getVectorElementCount(); 1258 MVT EltTy = VT.getVectorElementType(); 1259 1260 unsigned NumVectorRegs = 1; 1261 1262 // Scalable vectors cannot be scalarized, so splitting or widening is 1263 // required. 1264 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) 1265 llvm_unreachable( 1266 "Splitting or widening of non-power-of-2 MVTs is not implemented."); 1267 1268 // FIXME: We don't support non-power-of-2-sized vectors for now. 1269 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 1270 if (!isPowerOf2_32(EC.getKnownMinValue())) { 1271 // Split EC to unit size (scalable property is preserved). 1272 NumVectorRegs = EC.getKnownMinValue(); 1273 EC = ElementCount::getFixed(1); 1274 } 1275 1276 // Divide the input until we get to a supported size. This will 1277 // always end up with an EC that represent a scalar or a scalable 1278 // scalar. 1279 while (EC.getKnownMinValue() > 1 && 1280 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 1281 EC = EC.divideCoefficientBy(2); 1282 NumVectorRegs <<= 1; 1283 } 1284 1285 NumIntermediates = NumVectorRegs; 1286 1287 MVT NewVT = MVT::getVectorVT(EltTy, EC); 1288 if (!TLI->isTypeLegal(NewVT)) 1289 NewVT = EltTy; 1290 IntermediateVT = NewVT; 1291 1292 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); 1293 1294 // Convert sizes such as i33 to i64. 1295 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits); 1296 1297 MVT DestVT = TLI->getRegisterType(NewVT); 1298 RegisterVT = DestVT; 1299 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1300 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); 1301 1302 // Otherwise, promotion or legal types use the same number of registers as 1303 // the vector decimated to the appropriate level. 1304 return NumVectorRegs; 1305 } 1306 1307 /// isLegalRC - Return true if the value types that can be represented by the 1308 /// specified register class are all legal. 1309 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 1310 const TargetRegisterClass &RC) const { 1311 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1312 if (isTypeLegal(*I)) 1313 return true; 1314 return false; 1315 } 1316 1317 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1318 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1319 MachineBasicBlock * 1320 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1321 MachineBasicBlock *MBB) const { 1322 MachineInstr *MI = &InitialMI; 1323 MachineFunction &MF = *MI->getMF(); 1324 MachineFrameInfo &MFI = MF.getFrameInfo(); 1325 1326 // We're handling multiple types of operands here: 1327 // PATCHPOINT MetaArgs - live-in, read only, direct 1328 // STATEPOINT Deopt Spill - live-through, read only, indirect 1329 // STATEPOINT Deopt Alloca - live-through, read only, direct 1330 // (We're currently conservative and mark the deopt slots read/write in 1331 // practice.) 1332 // STATEPOINT GC Spill - live-through, read/write, indirect 1333 // STATEPOINT GC Alloca - live-through, read/write, direct 1334 // The live-in vs live-through is handled already (the live through ones are 1335 // all stack slots), but we need to handle the different type of stackmap 1336 // operands and memory effects here. 1337 1338 if (llvm::none_of(MI->operands(), 1339 [](MachineOperand &Operand) { return Operand.isFI(); })) 1340 return MBB; 1341 1342 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1343 1344 // Inherit previous memory operands. 1345 MIB.cloneMemRefs(*MI); 1346 1347 for (unsigned i = 0; i < MI->getNumOperands(); ++i) { 1348 MachineOperand &MO = MI->getOperand(i); 1349 if (!MO.isFI()) { 1350 // Index of Def operand this Use it tied to. 1351 // Since Defs are coming before Uses, if Use is tied, then 1352 // index of Def must be smaller that index of that Use. 1353 // Also, Defs preserve their position in new MI. 1354 unsigned TiedTo = i; 1355 if (MO.isReg() && MO.isTied()) 1356 TiedTo = MI->findTiedOperandIdx(i); 1357 MIB.add(MO); 1358 if (TiedTo < i) 1359 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); 1360 continue; 1361 } 1362 1363 // foldMemoryOperand builds a new MI after replacing a single FI operand 1364 // with the canonical set of five x86 addressing-mode operands. 1365 int FI = MO.getIndex(); 1366 1367 // Add frame index operands recognized by stackmaps.cpp 1368 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1369 // indirect-mem-ref tag, size, #FI, offset. 1370 // Used for spills inserted by StatepointLowering. This codepath is not 1371 // used for patchpoints/stackmaps at all, for these spilling is done via 1372 // foldMemoryOperand callback only. 1373 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1374 MIB.addImm(StackMaps::IndirectMemRefOp); 1375 MIB.addImm(MFI.getObjectSize(FI)); 1376 MIB.add(MO); 1377 MIB.addImm(0); 1378 } else { 1379 // direct-mem-ref tag, #FI, offset. 1380 // Used by patchpoint, and direct alloca arguments to statepoints 1381 MIB.addImm(StackMaps::DirectMemRefOp); 1382 MIB.add(MO); 1383 MIB.addImm(0); 1384 } 1385 1386 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1387 1388 // Add a new memory operand for this FI. 1389 assert(MFI.getObjectOffset(FI) != -1); 1390 1391 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1392 // PATCHPOINT should be updated to do the same. (TODO) 1393 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1394 auto Flags = MachineMemOperand::MOLoad; 1395 MachineMemOperand *MMO = MF.getMachineMemOperand( 1396 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1397 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1398 MIB->addMemOperand(MF, MMO); 1399 } 1400 } 1401 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1402 MI->eraseFromParent(); 1403 return MBB; 1404 } 1405 1406 /// findRepresentativeClass - Return the largest legal super-reg register class 1407 /// of the register class for the specified type and its associated "cost". 1408 // This function is in TargetLowering because it uses RegClassForVT which would 1409 // need to be moved to TargetRegisterInfo and would necessitate moving 1410 // isTypeLegal over as well - a massive change that would just require 1411 // TargetLowering having a TargetRegisterInfo class member that it would use. 1412 std::pair<const TargetRegisterClass *, uint8_t> 1413 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1414 MVT VT) const { 1415 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1416 if (!RC) 1417 return std::make_pair(RC, 0); 1418 1419 // Compute the set of all super-register classes. 1420 BitVector SuperRegRC(TRI->getNumRegClasses()); 1421 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1422 SuperRegRC.setBitsInMask(RCI.getMask()); 1423 1424 // Find the first legal register class with the largest spill size. 1425 const TargetRegisterClass *BestRC = RC; 1426 for (unsigned i : SuperRegRC.set_bits()) { 1427 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1428 // We want the largest possible spill size. 1429 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1430 continue; 1431 if (!isLegalRC(*TRI, *SuperRC)) 1432 continue; 1433 BestRC = SuperRC; 1434 } 1435 return std::make_pair(BestRC, 1); 1436 } 1437 1438 /// computeRegisterProperties - Once all of the register classes are added, 1439 /// this allows us to compute derived properties we expose. 1440 void TargetLoweringBase::computeRegisterProperties( 1441 const TargetRegisterInfo *TRI) { 1442 // Everything defaults to needing one register. 1443 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1444 NumRegistersForVT[i] = 1; 1445 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1446 } 1447 // ...except isVoid, which doesn't need any registers. 1448 NumRegistersForVT[MVT::isVoid] = 0; 1449 1450 // Find the largest integer register class. 1451 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1452 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1453 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1454 1455 // Every integer value type larger than this largest register takes twice as 1456 // many registers to represent as the previous ValueType. 1457 for (unsigned ExpandedReg = LargestIntReg + 1; 1458 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1459 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1460 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1461 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1462 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1463 TypeExpandInteger); 1464 } 1465 1466 // Inspect all of the ValueType's smaller than the largest integer 1467 // register to see which ones need promotion. 1468 unsigned LegalIntReg = LargestIntReg; 1469 for (unsigned IntReg = LargestIntReg - 1; 1470 IntReg >= (unsigned)MVT::i1; --IntReg) { 1471 MVT IVT = (MVT::SimpleValueType)IntReg; 1472 if (isTypeLegal(IVT)) { 1473 LegalIntReg = IntReg; 1474 } else { 1475 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1476 (MVT::SimpleValueType)LegalIntReg; 1477 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1478 } 1479 } 1480 1481 // ppcf128 type is really two f64's. 1482 if (!isTypeLegal(MVT::ppcf128)) { 1483 if (isTypeLegal(MVT::f64)) { 1484 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1485 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1486 TransformToType[MVT::ppcf128] = MVT::f64; 1487 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1488 } else { 1489 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1490 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1491 TransformToType[MVT::ppcf128] = MVT::i128; 1492 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1493 } 1494 } 1495 1496 // Decide how to handle f128. If the target does not have native f128 support, 1497 // expand it to i128 and we will be generating soft float library calls. 1498 if (!isTypeLegal(MVT::f128)) { 1499 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1500 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1501 TransformToType[MVT::f128] = MVT::i128; 1502 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1503 } 1504 1505 // Decide how to handle f80. If the target does not have native f80 support, 1506 // expand it to i96 and we will be generating soft float library calls. 1507 if (!isTypeLegal(MVT::f80)) { 1508 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32]; 1509 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32]; 1510 TransformToType[MVT::f80] = MVT::i32; 1511 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat); 1512 } 1513 1514 // Decide how to handle f64. If the target does not have native f64 support, 1515 // expand it to i64 and we will be generating soft float library calls. 1516 if (!isTypeLegal(MVT::f64)) { 1517 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1518 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1519 TransformToType[MVT::f64] = MVT::i64; 1520 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1521 } 1522 1523 // Decide how to handle f32. If the target does not have native f32 support, 1524 // expand it to i32 and we will be generating soft float library calls. 1525 if (!isTypeLegal(MVT::f32)) { 1526 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1527 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1528 TransformToType[MVT::f32] = MVT::i32; 1529 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1530 } 1531 1532 // Decide how to handle f16. If the target does not have native f16 support, 1533 // promote it to f32, because there are no f16 library calls (except for 1534 // conversions). 1535 if (!isTypeLegal(MVT::f16)) { 1536 // Allow targets to control how we legalize half. 1537 bool SoftPromoteHalfType = softPromoteHalfType(); 1538 bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType(); 1539 1540 if (!UseFPRegsForHalfType) { 1541 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1542 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1543 } else { 1544 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1545 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1546 } 1547 TransformToType[MVT::f16] = MVT::f32; 1548 if (SoftPromoteHalfType) { 1549 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1550 } else { 1551 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1552 } 1553 } 1554 1555 // Decide how to handle bf16. If the target does not have native bf16 support, 1556 // promote it to f32, because there are no bf16 library calls (except for 1557 // converting from f32 to bf16). 1558 if (!isTypeLegal(MVT::bf16)) { 1559 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32]; 1560 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32]; 1561 TransformToType[MVT::bf16] = MVT::f32; 1562 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf); 1563 } 1564 1565 // Loop over all of the vector value types to see which need transformations. 1566 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1567 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1568 MVT VT = (MVT::SimpleValueType) i; 1569 if (isTypeLegal(VT)) 1570 continue; 1571 1572 MVT EltVT = VT.getVectorElementType(); 1573 ElementCount EC = VT.getVectorElementCount(); 1574 bool IsLegalWiderType = false; 1575 bool IsScalable = VT.isScalableVector(); 1576 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1577 switch (PreferredAction) { 1578 case TypePromoteInteger: { 1579 MVT::SimpleValueType EndVT = IsScalable ? 1580 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1581 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1582 // Try to promote the elements of integer vectors. If no legal 1583 // promotion was found, fall through to the widen-vector method. 1584 for (unsigned nVT = i + 1; 1585 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1586 MVT SVT = (MVT::SimpleValueType) nVT; 1587 // Promote vectors of integers to vectors with the same number 1588 // of elements, with a wider element type. 1589 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && 1590 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { 1591 TransformToType[i] = SVT; 1592 RegisterTypeForVT[i] = SVT; 1593 NumRegistersForVT[i] = 1; 1594 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1595 IsLegalWiderType = true; 1596 break; 1597 } 1598 } 1599 if (IsLegalWiderType) 1600 break; 1601 [[fallthrough]]; 1602 } 1603 1604 case TypeWidenVector: 1605 if (isPowerOf2_32(EC.getKnownMinValue())) { 1606 // Try to widen the vector. 1607 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1608 MVT SVT = (MVT::SimpleValueType) nVT; 1609 if (SVT.getVectorElementType() == EltVT && 1610 SVT.isScalableVector() == IsScalable && 1611 SVT.getVectorElementCount().getKnownMinValue() > 1612 EC.getKnownMinValue() && 1613 isTypeLegal(SVT)) { 1614 TransformToType[i] = SVT; 1615 RegisterTypeForVT[i] = SVT; 1616 NumRegistersForVT[i] = 1; 1617 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1618 IsLegalWiderType = true; 1619 break; 1620 } 1621 } 1622 if (IsLegalWiderType) 1623 break; 1624 } else { 1625 // Only widen to the next power of 2 to keep consistency with EVT. 1626 MVT NVT = VT.getPow2VectorType(); 1627 if (isTypeLegal(NVT)) { 1628 TransformToType[i] = NVT; 1629 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1630 RegisterTypeForVT[i] = NVT; 1631 NumRegistersForVT[i] = 1; 1632 break; 1633 } 1634 } 1635 [[fallthrough]]; 1636 1637 case TypeSplitVector: 1638 case TypeScalarizeVector: { 1639 MVT IntermediateVT; 1640 MVT RegisterVT; 1641 unsigned NumIntermediates; 1642 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1643 NumIntermediates, RegisterVT, this); 1644 NumRegistersForVT[i] = NumRegisters; 1645 assert(NumRegistersForVT[i] == NumRegisters && 1646 "NumRegistersForVT size cannot represent NumRegisters!"); 1647 RegisterTypeForVT[i] = RegisterVT; 1648 1649 MVT NVT = VT.getPow2VectorType(); 1650 if (NVT == VT) { 1651 // Type is already a power of 2. The default action is to split. 1652 TransformToType[i] = MVT::Other; 1653 if (PreferredAction == TypeScalarizeVector) 1654 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1655 else if (PreferredAction == TypeSplitVector) 1656 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1657 else if (EC.getKnownMinValue() > 1) 1658 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1659 else 1660 ValueTypeActions.setTypeAction(VT, EC.isScalable() 1661 ? TypeScalarizeScalableVector 1662 : TypeScalarizeVector); 1663 } else { 1664 TransformToType[i] = NVT; 1665 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1666 } 1667 break; 1668 } 1669 default: 1670 llvm_unreachable("Unknown vector legalization action!"); 1671 } 1672 } 1673 1674 // Determine the 'representative' register class for each value type. 1675 // An representative register class is the largest (meaning one which is 1676 // not a sub-register class / subreg register class) legal register class for 1677 // a group of value types. For example, on i386, i8, i16, and i32 1678 // representative would be GR32; while on x86_64 it's GR64. 1679 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1680 const TargetRegisterClass* RRC; 1681 uint8_t Cost; 1682 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1683 RepRegClassForVT[i] = RRC; 1684 RepRegClassCostForVT[i] = Cost; 1685 } 1686 } 1687 1688 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1689 EVT VT) const { 1690 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1691 return getPointerTy(DL).SimpleTy; 1692 } 1693 1694 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1695 return MVT::i32; // return the default value 1696 } 1697 1698 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1699 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1700 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1701 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1702 /// 1703 /// This method returns the number of registers needed, and the VT for each 1704 /// register. It also returns the VT and quantity of the intermediate values 1705 /// before they are promoted/expanded. 1706 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, 1707 EVT VT, EVT &IntermediateVT, 1708 unsigned &NumIntermediates, 1709 MVT &RegisterVT) const { 1710 ElementCount EltCnt = VT.getVectorElementCount(); 1711 1712 // If there is a wider vector type with the same element type as this one, 1713 // or a promoted vector type that has the same number of elements which 1714 // are wider, then we should convert to that legal vector type. 1715 // This handles things like <2 x float> -> <4 x float> and 1716 // <4 x i1> -> <4 x i32>. 1717 LegalizeTypeAction TA = getTypeAction(Context, VT); 1718 if (!EltCnt.isScalar() && 1719 (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1720 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1721 if (isTypeLegal(RegisterEVT)) { 1722 IntermediateVT = RegisterEVT; 1723 RegisterVT = RegisterEVT.getSimpleVT(); 1724 NumIntermediates = 1; 1725 return 1; 1726 } 1727 } 1728 1729 // Figure out the right, legal destination reg to copy into. 1730 EVT EltTy = VT.getVectorElementType(); 1731 1732 unsigned NumVectorRegs = 1; 1733 1734 // Scalable vectors cannot be scalarized, so handle the legalisation of the 1735 // types like done elsewhere in SelectionDAG. 1736 if (EltCnt.isScalable()) { 1737 LegalizeKind LK; 1738 EVT PartVT = VT; 1739 do { 1740 // Iterate until we've found a legal (part) type to hold VT. 1741 LK = getTypeConversion(Context, PartVT); 1742 PartVT = LK.second; 1743 } while (LK.first != TypeLegal); 1744 1745 if (!PartVT.isVector()) { 1746 report_fatal_error( 1747 "Don't know how to legalize this scalable vector type"); 1748 } 1749 1750 NumIntermediates = 1751 divideCeil(VT.getVectorElementCount().getKnownMinValue(), 1752 PartVT.getVectorElementCount().getKnownMinValue()); 1753 IntermediateVT = PartVT; 1754 RegisterVT = getRegisterType(Context, IntermediateVT); 1755 return NumIntermediates; 1756 } 1757 1758 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally 1759 // we could break down into LHS/RHS like LegalizeDAG does. 1760 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { 1761 NumVectorRegs = EltCnt.getKnownMinValue(); 1762 EltCnt = ElementCount::getFixed(1); 1763 } 1764 1765 // Divide the input until we get to a supported size. This will always 1766 // end with a scalar if the target doesn't support vectors. 1767 while (EltCnt.getKnownMinValue() > 1 && 1768 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1769 EltCnt = EltCnt.divideCoefficientBy(2); 1770 NumVectorRegs <<= 1; 1771 } 1772 1773 NumIntermediates = NumVectorRegs; 1774 1775 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1776 if (!isTypeLegal(NewVT)) 1777 NewVT = EltTy; 1778 IntermediateVT = NewVT; 1779 1780 MVT DestVT = getRegisterType(Context, NewVT); 1781 RegisterVT = DestVT; 1782 1783 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. 1784 TypeSize NewVTSize = NewVT.getSizeInBits(); 1785 // Convert sizes such as i33 to i64. 1786 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue())) 1787 NewVTSize = NewVTSize.coefficientNextPowerOf2(); 1788 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1789 } 1790 1791 // Otherwise, promotion or legal types use the same number of registers as 1792 // the vector decimated to the appropriate level. 1793 return NumVectorRegs; 1794 } 1795 1796 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1797 uint64_t NumCases, 1798 uint64_t Range, 1799 ProfileSummaryInfo *PSI, 1800 BlockFrequencyInfo *BFI) const { 1801 // FIXME: This function check the maximum table size and density, but the 1802 // minimum size is not checked. It would be nice if the minimum size is 1803 // also combined within this function. Currently, the minimum size check is 1804 // performed in findJumpTable() in SelectionDAGBuiler and 1805 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1806 const bool OptForSize = 1807 SI->getParent()->getParent()->hasOptSize() || 1808 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1809 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1810 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1811 1812 // Check whether the number of cases is small enough and 1813 // the range is dense enough for a jump table. 1814 return (OptForSize || Range <= MaxJumpTableSize) && 1815 (NumCases * 100 >= Range * MinDensity); 1816 } 1817 1818 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context, 1819 EVT ConditionVT) const { 1820 return getRegisterType(Context, ConditionVT); 1821 } 1822 1823 /// Get the EVTs and ArgFlags collections that represent the legalized return 1824 /// type of the given function. This does not require a DAG or a return value, 1825 /// and is suitable for use before any DAGs for the function are constructed. 1826 /// TODO: Move this out of TargetLowering.cpp. 1827 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1828 AttributeList attr, 1829 SmallVectorImpl<ISD::OutputArg> &Outs, 1830 const TargetLowering &TLI, const DataLayout &DL) { 1831 SmallVector<EVT, 4> ValueVTs; 1832 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1833 unsigned NumValues = ValueVTs.size(); 1834 if (NumValues == 0) return; 1835 1836 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1837 EVT VT = ValueVTs[j]; 1838 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1839 1840 if (attr.hasRetAttr(Attribute::SExt)) 1841 ExtendKind = ISD::SIGN_EXTEND; 1842 else if (attr.hasRetAttr(Attribute::ZExt)) 1843 ExtendKind = ISD::ZERO_EXTEND; 1844 1845 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1846 VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind); 1847 1848 unsigned NumParts = 1849 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1850 MVT PartVT = 1851 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1852 1853 // 'inreg' on function refers to return value 1854 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1855 if (attr.hasRetAttr(Attribute::InReg)) 1856 Flags.setInReg(); 1857 1858 // Propagate extension type if any 1859 if (attr.hasRetAttr(Attribute::SExt)) 1860 Flags.setSExt(); 1861 else if (attr.hasRetAttr(Attribute::ZExt)) 1862 Flags.setZExt(); 1863 1864 for (unsigned i = 0; i < NumParts; ++i) { 1865 ISD::ArgFlagsTy OutFlags = Flags; 1866 if (NumParts > 1 && i == 0) 1867 OutFlags.setSplit(); 1868 else if (i == NumParts - 1 && i != 0) 1869 OutFlags.setSplitEnd(); 1870 1871 Outs.push_back( 1872 ISD::OutputArg(OutFlags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1873 } 1874 } 1875 } 1876 1877 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1878 /// function arguments in the caller parameter area. This is the actual 1879 /// alignment, not its logarithm. 1880 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1881 const DataLayout &DL) const { 1882 return DL.getABITypeAlign(Ty).value(); 1883 } 1884 1885 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1886 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1887 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { 1888 // Check if the specified alignment is sufficient based on the data layout. 1889 // TODO: While using the data layout works in practice, a better solution 1890 // would be to implement this check directly (make this a virtual function). 1891 // For example, the ABI alignment may change based on software platform while 1892 // this function should only be affected by hardware implementation. 1893 Type *Ty = VT.getTypeForEVT(Context); 1894 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { 1895 // Assume that an access that meets the ABI-specified alignment is fast. 1896 if (Fast != nullptr) 1897 *Fast = 1; 1898 return true; 1899 } 1900 1901 // This is a misaligned access. 1902 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1903 } 1904 1905 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1906 LLVMContext &Context, const DataLayout &DL, EVT VT, 1907 const MachineMemOperand &MMO, unsigned *Fast) const { 1908 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1909 MMO.getAlign(), MMO.getFlags(), Fast); 1910 } 1911 1912 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1913 const DataLayout &DL, EVT VT, 1914 unsigned AddrSpace, Align Alignment, 1915 MachineMemOperand::Flags Flags, 1916 unsigned *Fast) const { 1917 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1918 Flags, Fast); 1919 } 1920 1921 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1922 const DataLayout &DL, EVT VT, 1923 const MachineMemOperand &MMO, 1924 unsigned *Fast) const { 1925 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1926 MMO.getFlags(), Fast); 1927 } 1928 1929 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1930 const DataLayout &DL, LLT Ty, 1931 const MachineMemOperand &MMO, 1932 unsigned *Fast) const { 1933 EVT VT = getApproximateEVTForLLT(Ty, DL, Context); 1934 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1935 MMO.getFlags(), Fast); 1936 } 1937 1938 //===----------------------------------------------------------------------===// 1939 // TargetTransformInfo Helpers 1940 //===----------------------------------------------------------------------===// 1941 1942 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1943 enum InstructionOpcodes { 1944 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1945 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1946 #include "llvm/IR/Instruction.def" 1947 }; 1948 switch (static_cast<InstructionOpcodes>(Opcode)) { 1949 case Ret: return 0; 1950 case Br: return 0; 1951 case Switch: return 0; 1952 case IndirectBr: return 0; 1953 case Invoke: return 0; 1954 case CallBr: return 0; 1955 case Resume: return 0; 1956 case Unreachable: return 0; 1957 case CleanupRet: return 0; 1958 case CatchRet: return 0; 1959 case CatchPad: return 0; 1960 case CatchSwitch: return 0; 1961 case CleanupPad: return 0; 1962 case FNeg: return ISD::FNEG; 1963 case Add: return ISD::ADD; 1964 case FAdd: return ISD::FADD; 1965 case Sub: return ISD::SUB; 1966 case FSub: return ISD::FSUB; 1967 case Mul: return ISD::MUL; 1968 case FMul: return ISD::FMUL; 1969 case UDiv: return ISD::UDIV; 1970 case SDiv: return ISD::SDIV; 1971 case FDiv: return ISD::FDIV; 1972 case URem: return ISD::UREM; 1973 case SRem: return ISD::SREM; 1974 case FRem: return ISD::FREM; 1975 case Shl: return ISD::SHL; 1976 case LShr: return ISD::SRL; 1977 case AShr: return ISD::SRA; 1978 case And: return ISD::AND; 1979 case Or: return ISD::OR; 1980 case Xor: return ISD::XOR; 1981 case Alloca: return 0; 1982 case Load: return ISD::LOAD; 1983 case Store: return ISD::STORE; 1984 case GetElementPtr: return 0; 1985 case Fence: return 0; 1986 case AtomicCmpXchg: return 0; 1987 case AtomicRMW: return 0; 1988 case Trunc: return ISD::TRUNCATE; 1989 case ZExt: return ISD::ZERO_EXTEND; 1990 case SExt: return ISD::SIGN_EXTEND; 1991 case FPToUI: return ISD::FP_TO_UINT; 1992 case FPToSI: return ISD::FP_TO_SINT; 1993 case UIToFP: return ISD::UINT_TO_FP; 1994 case SIToFP: return ISD::SINT_TO_FP; 1995 case FPTrunc: return ISD::FP_ROUND; 1996 case FPExt: return ISD::FP_EXTEND; 1997 case PtrToInt: return ISD::BITCAST; 1998 case IntToPtr: return ISD::BITCAST; 1999 case BitCast: return ISD::BITCAST; 2000 case AddrSpaceCast: return ISD::ADDRSPACECAST; 2001 case ICmp: return ISD::SETCC; 2002 case FCmp: return ISD::SETCC; 2003 case PHI: return 0; 2004 case Call: return 0; 2005 case Select: return ISD::SELECT; 2006 case UserOp1: return 0; 2007 case UserOp2: return 0; 2008 case VAArg: return 0; 2009 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 2010 case InsertElement: return ISD::INSERT_VECTOR_ELT; 2011 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 2012 case ExtractValue: return ISD::MERGE_VALUES; 2013 case InsertValue: return ISD::MERGE_VALUES; 2014 case LandingPad: return 0; 2015 case Freeze: return ISD::FREEZE; 2016 } 2017 2018 llvm_unreachable("Unknown instruction type encountered!"); 2019 } 2020 2021 Value * 2022 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 2023 bool UseTLS) const { 2024 // compiler-rt provides a variable with a magic name. Targets that do not 2025 // link with compiler-rt may also provide such a variable. 2026 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 2027 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 2028 auto UnsafeStackPtr = 2029 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 2030 2031 Type *StackPtrTy = PointerType::getUnqual(M->getContext()); 2032 2033 if (!UnsafeStackPtr) { 2034 auto TLSModel = UseTLS ? 2035 GlobalValue::InitialExecTLSModel : 2036 GlobalValue::NotThreadLocal; 2037 // The global variable is not defined yet, define it ourselves. 2038 // We use the initial-exec TLS model because we do not support the 2039 // variable living anywhere other than in the main executable. 2040 UnsafeStackPtr = new GlobalVariable( 2041 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 2042 UnsafeStackPtrVar, nullptr, TLSModel); 2043 } else { 2044 // The variable exists, check its type and attributes. 2045 if (UnsafeStackPtr->getValueType() != StackPtrTy) 2046 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 2047 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 2048 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 2049 (UseTLS ? "" : "not ") + "be thread-local"); 2050 } 2051 return UnsafeStackPtr; 2052 } 2053 2054 Value * 2055 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { 2056 if (!TM.getTargetTriple().isAndroid()) 2057 return getDefaultSafeStackPointerLocation(IRB, true); 2058 2059 // Android provides a libc function to retrieve the address of the current 2060 // thread's unsafe stack pointer. 2061 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 2062 auto *PtrTy = PointerType::getUnqual(M->getContext()); 2063 FunctionCallee Fn = 2064 M->getOrInsertFunction("__safestack_pointer_address", PtrTy); 2065 return IRB.CreateCall(Fn); 2066 } 2067 2068 //===----------------------------------------------------------------------===// 2069 // Loop Strength Reduction hooks 2070 //===----------------------------------------------------------------------===// 2071 2072 /// isLegalAddressingMode - Return true if the addressing mode represented 2073 /// by AM is legal for this target, for a load/store of the specified type. 2074 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 2075 const AddrMode &AM, Type *Ty, 2076 unsigned AS, Instruction *I) const { 2077 // The default implementation of this implements a conservative RISCy, r+r and 2078 // r+i addr mode. 2079 2080 // Scalable offsets not supported 2081 if (AM.ScalableOffset) 2082 return false; 2083 2084 // Allows a sign-extended 16-bit immediate field. 2085 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2086 return false; 2087 2088 // No global is ever allowed as a base. 2089 if (AM.BaseGV) 2090 return false; 2091 2092 // Only support r+r, 2093 switch (AM.Scale) { 2094 case 0: // "r+i" or just "i", depending on HasBaseReg. 2095 break; 2096 case 1: 2097 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2098 return false; 2099 // Otherwise we have r+r or r+i. 2100 break; 2101 case 2: 2102 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2103 return false; 2104 // Allow 2*r as r+r. 2105 break; 2106 default: // Don't allow n * r 2107 return false; 2108 } 2109 2110 return true; 2111 } 2112 2113 //===----------------------------------------------------------------------===// 2114 // Stack Protector 2115 //===----------------------------------------------------------------------===// 2116 2117 // For OpenBSD return its special guard variable. Otherwise return nullptr, 2118 // so that SelectionDAG handle SSP. 2119 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { 2120 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 2121 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 2122 PointerType *PtrTy = PointerType::getUnqual(M.getContext()); 2123 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy); 2124 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C)) 2125 G->setVisibility(GlobalValue::HiddenVisibility); 2126 return C; 2127 } 2128 return nullptr; 2129 } 2130 2131 // Currently only support "standard" __stack_chk_guard. 2132 // TODO: add LOAD_STACK_GUARD support. 2133 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 2134 if (!M.getNamedValue("__stack_chk_guard")) { 2135 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()), 2136 false, GlobalVariable::ExternalLinkage, 2137 nullptr, "__stack_chk_guard"); 2138 2139 // FreeBSD has "__stack_chk_guard" defined externally on libc.so 2140 if (M.getDirectAccessExternalData() && 2141 !TM.getTargetTriple().isWindowsGNUEnvironment() && 2142 !(TM.getTargetTriple().isPPC64() && 2143 TM.getTargetTriple().isOSFreeBSD()) && 2144 (!TM.getTargetTriple().isOSDarwin() || 2145 TM.getRelocationModel() == Reloc::Static)) 2146 GV->setDSOLocal(true); 2147 } 2148 } 2149 2150 // Currently only support "standard" __stack_chk_guard. 2151 // TODO: add LOAD_STACK_GUARD support. 2152 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 2153 return M.getNamedValue("__stack_chk_guard"); 2154 } 2155 2156 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 2157 return nullptr; 2158 } 2159 2160 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 2161 return MinimumJumpTableEntries; 2162 } 2163 2164 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 2165 MinimumJumpTableEntries = Val; 2166 } 2167 2168 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 2169 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 2170 } 2171 2172 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 2173 return MaximumJumpTableSize; 2174 } 2175 2176 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 2177 MaximumJumpTableSize = Val; 2178 } 2179 2180 bool TargetLoweringBase::isJumpTableRelative() const { 2181 return getTargetMachine().isPositionIndependent(); 2182 } 2183 2184 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { 2185 if (TM.Options.LoopAlignment) 2186 return Align(TM.Options.LoopAlignment); 2187 return PrefLoopAlignment; 2188 } 2189 2190 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( 2191 MachineBasicBlock *MBB) const { 2192 return MaxBytesForAlignment; 2193 } 2194 2195 //===----------------------------------------------------------------------===// 2196 // Reciprocal Estimates 2197 //===----------------------------------------------------------------------===// 2198 2199 /// Get the reciprocal estimate attribute string for a function that will 2200 /// override the target defaults. 2201 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 2202 const Function &F = MF.getFunction(); 2203 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 2204 } 2205 2206 /// Construct a string for the given reciprocal operation of the given type. 2207 /// This string should match the corresponding option to the front-end's 2208 /// "-mrecip" flag assuming those strings have been passed through in an 2209 /// attribute string. For example, "vec-divf" for a division of a vXf32. 2210 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 2211 std::string Name = VT.isVector() ? "vec-" : ""; 2212 2213 Name += IsSqrt ? "sqrt" : "div"; 2214 2215 // TODO: Handle other float types? 2216 if (VT.getScalarType() == MVT::f64) { 2217 Name += "d"; 2218 } else if (VT.getScalarType() == MVT::f16) { 2219 Name += "h"; 2220 } else { 2221 assert(VT.getScalarType() == MVT::f32 && 2222 "Unexpected FP type for reciprocal estimate"); 2223 Name += "f"; 2224 } 2225 2226 return Name; 2227 } 2228 2229 /// Return the character position and value (a single numeric character) of a 2230 /// customized refinement operation in the input string if it exists. Return 2231 /// false if there is no customized refinement step count. 2232 static bool parseRefinementStep(StringRef In, size_t &Position, 2233 uint8_t &Value) { 2234 const char RefStepToken = ':'; 2235 Position = In.find(RefStepToken); 2236 if (Position == StringRef::npos) 2237 return false; 2238 2239 StringRef RefStepString = In.substr(Position + 1); 2240 // Allow exactly one numeric character for the additional refinement 2241 // step parameter. 2242 if (RefStepString.size() == 1) { 2243 char RefStepChar = RefStepString[0]; 2244 if (isDigit(RefStepChar)) { 2245 Value = RefStepChar - '0'; 2246 return true; 2247 } 2248 } 2249 report_fatal_error("Invalid refinement step for -recip."); 2250 } 2251 2252 /// For the input attribute string, return one of the ReciprocalEstimate enum 2253 /// status values (enabled, disabled, or not specified) for this operation on 2254 /// the specified data type. 2255 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 2256 if (Override.empty()) 2257 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2258 2259 SmallVector<StringRef, 4> OverrideVector; 2260 Override.split(OverrideVector, ','); 2261 unsigned NumArgs = OverrideVector.size(); 2262 2263 // Check if "all", "none", or "default" was specified. 2264 if (NumArgs == 1) { 2265 // Look for an optional setting of the number of refinement steps needed 2266 // for this type of reciprocal operation. 2267 size_t RefPos; 2268 uint8_t RefSteps; 2269 if (parseRefinementStep(Override, RefPos, RefSteps)) { 2270 // Split the string for further processing. 2271 Override = Override.substr(0, RefPos); 2272 } 2273 2274 // All reciprocal types are enabled. 2275 if (Override == "all") 2276 return TargetLoweringBase::ReciprocalEstimate::Enabled; 2277 2278 // All reciprocal types are disabled. 2279 if (Override == "none") 2280 return TargetLoweringBase::ReciprocalEstimate::Disabled; 2281 2282 // Target defaults for enablement are used. 2283 if (Override == "default") 2284 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2285 } 2286 2287 // The attribute string may omit the size suffix ('f'/'d'). 2288 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2289 std::string VTNameNoSize = VTName; 2290 VTNameNoSize.pop_back(); 2291 static const char DisabledPrefix = '!'; 2292 2293 for (StringRef RecipType : OverrideVector) { 2294 size_t RefPos; 2295 uint8_t RefSteps; 2296 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 2297 RecipType = RecipType.substr(0, RefPos); 2298 2299 // Ignore the disablement token for string matching. 2300 bool IsDisabled = RecipType[0] == DisabledPrefix; 2301 if (IsDisabled) 2302 RecipType = RecipType.substr(1); 2303 2304 if (RecipType == VTName || RecipType == VTNameNoSize) 2305 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 2306 : TargetLoweringBase::ReciprocalEstimate::Enabled; 2307 } 2308 2309 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2310 } 2311 2312 /// For the input attribute string, return the customized refinement step count 2313 /// for this operation on the specified data type. If the step count does not 2314 /// exist, return the ReciprocalEstimate enum value for unspecified. 2315 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 2316 if (Override.empty()) 2317 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2318 2319 SmallVector<StringRef, 4> OverrideVector; 2320 Override.split(OverrideVector, ','); 2321 unsigned NumArgs = OverrideVector.size(); 2322 2323 // Check if "all", "default", or "none" was specified. 2324 if (NumArgs == 1) { 2325 // Look for an optional setting of the number of refinement steps needed 2326 // for this type of reciprocal operation. 2327 size_t RefPos; 2328 uint8_t RefSteps; 2329 if (!parseRefinementStep(Override, RefPos, RefSteps)) 2330 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2331 2332 // Split the string for further processing. 2333 Override = Override.substr(0, RefPos); 2334 assert(Override != "none" && 2335 "Disabled reciprocals, but specifed refinement steps?"); 2336 2337 // If this is a general override, return the specified number of steps. 2338 if (Override == "all" || Override == "default") 2339 return RefSteps; 2340 } 2341 2342 // The attribute string may omit the size suffix ('f'/'d'). 2343 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2344 std::string VTNameNoSize = VTName; 2345 VTNameNoSize.pop_back(); 2346 2347 for (StringRef RecipType : OverrideVector) { 2348 size_t RefPos; 2349 uint8_t RefSteps; 2350 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 2351 continue; 2352 2353 RecipType = RecipType.substr(0, RefPos); 2354 if (RecipType == VTName || RecipType == VTNameNoSize) 2355 return RefSteps; 2356 } 2357 2358 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2359 } 2360 2361 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2362 MachineFunction &MF) const { 2363 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2364 } 2365 2366 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2367 MachineFunction &MF) const { 2368 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2369 } 2370 2371 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2372 MachineFunction &MF) const { 2373 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2374 } 2375 2376 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2377 MachineFunction &MF) const { 2378 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2379 } 2380 2381 bool TargetLoweringBase::isLoadBitCastBeneficial( 2382 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, 2383 const MachineMemOperand &MMO) const { 2384 // Single-element vectors are scalarized, so we should generally avoid having 2385 // any memory operations on such types, as they would get scalarized too. 2386 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() && 2387 BitcastVT.getVectorNumElements() == 1) 2388 return false; 2389 2390 // Don't do if we could do an indexed load on the original type, but not on 2391 // the new one. 2392 if (!LoadVT.isSimple() || !BitcastVT.isSimple()) 2393 return true; 2394 2395 MVT LoadMVT = LoadVT.getSimpleVT(); 2396 2397 // Don't bother doing this if it's just going to be promoted again later, as 2398 // doing so might interfere with other combines. 2399 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && 2400 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) 2401 return false; 2402 2403 unsigned Fast = 0; 2404 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, 2405 MMO, &Fast) && 2406 Fast; 2407 } 2408 2409 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2410 MF.getRegInfo().freezeReservedRegs(); 2411 } 2412 2413 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags( 2414 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC, 2415 const TargetLibraryInfo *LibInfo) const { 2416 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2417 if (LI.isVolatile()) 2418 Flags |= MachineMemOperand::MOVolatile; 2419 2420 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2421 Flags |= MachineMemOperand::MONonTemporal; 2422 2423 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2424 Flags |= MachineMemOperand::MOInvariant; 2425 2426 if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(), 2427 LI.getAlign(), DL, &LI, AC, 2428 /*DT=*/nullptr, LibInfo)) 2429 Flags |= MachineMemOperand::MODereferenceable; 2430 2431 Flags |= getTargetMMOFlags(LI); 2432 return Flags; 2433 } 2434 2435 MachineMemOperand::Flags 2436 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2437 const DataLayout &DL) const { 2438 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2439 2440 if (SI.isVolatile()) 2441 Flags |= MachineMemOperand::MOVolatile; 2442 2443 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2444 Flags |= MachineMemOperand::MONonTemporal; 2445 2446 // FIXME: Not preserving dereferenceable 2447 Flags |= getTargetMMOFlags(SI); 2448 return Flags; 2449 } 2450 2451 MachineMemOperand::Flags 2452 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2453 const DataLayout &DL) const { 2454 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2455 2456 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2457 if (RMW->isVolatile()) 2458 Flags |= MachineMemOperand::MOVolatile; 2459 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2460 if (CmpX->isVolatile()) 2461 Flags |= MachineMemOperand::MOVolatile; 2462 } else 2463 llvm_unreachable("not an atomic instruction"); 2464 2465 // FIXME: Not preserving dereferenceable 2466 Flags |= getTargetMMOFlags(AI); 2467 return Flags; 2468 } 2469 2470 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, 2471 Instruction *Inst, 2472 AtomicOrdering Ord) const { 2473 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) 2474 return Builder.CreateFence(Ord); 2475 else 2476 return nullptr; 2477 } 2478 2479 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, 2480 Instruction *Inst, 2481 AtomicOrdering Ord) const { 2482 if (isAcquireOrStronger(Ord)) 2483 return Builder.CreateFence(Ord); 2484 else 2485 return nullptr; 2486 } 2487 2488 //===----------------------------------------------------------------------===// 2489 // GlobalISel Hooks 2490 //===----------------------------------------------------------------------===// 2491 2492 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2493 const TargetTransformInfo *TTI) const { 2494 auto &MF = *MI.getMF(); 2495 auto &MRI = MF.getRegInfo(); 2496 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2497 // this helper function computes the maximum number of uses we should consider 2498 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2499 // break even in terms of code size when the original MI has 2 users vs 2500 // choosing to potentially spill. Any more than 2 users we we have a net code 2501 // size increase. This doesn't take into account register pressure though. 2502 auto maxUses = [](unsigned RematCost) { 2503 // A cost of 1 means remats are basically free. 2504 if (RematCost == 1) 2505 return std::numeric_limits<unsigned>::max(); 2506 if (RematCost == 2) 2507 return 2U; 2508 2509 // Remat is too expensive, only sink if there's one user. 2510 if (RematCost > 2) 2511 return 1U; 2512 llvm_unreachable("Unexpected remat cost"); 2513 }; 2514 2515 switch (MI.getOpcode()) { 2516 default: 2517 return false; 2518 // Constants-like instructions should be close to their users. 2519 // We don't want long live-ranges for them. 2520 case TargetOpcode::G_CONSTANT: 2521 case TargetOpcode::G_FCONSTANT: 2522 case TargetOpcode::G_FRAME_INDEX: 2523 case TargetOpcode::G_INTTOPTR: 2524 return true; 2525 case TargetOpcode::G_GLOBAL_VALUE: { 2526 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2527 Register Reg = MI.getOperand(0).getReg(); 2528 unsigned MaxUses = maxUses(RematCost); 2529 if (MaxUses == UINT_MAX) 2530 return true; // Remats are "free" so always localize. 2531 return MRI.hasAtMostUserInstrs(Reg, MaxUses); 2532 } 2533 } 2534 } 2535