1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the WebAssemblyTargetLowering class. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "WebAssemblyISelLowering.h" 15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 16 #include "Utils/WebAssemblyTypeUtilities.h" 17 #include "WebAssemblyMachineFunctionInfo.h" 18 #include "WebAssemblySubtarget.h" 19 #include "WebAssemblyTargetMachine.h" 20 #include "WebAssemblyUtilities.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunctionPass.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineJumpTableInfo.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAG.h" 29 #include "llvm/CodeGen/SelectionDAGNodes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/IntrinsicsWebAssembly.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/KnownBits.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 using namespace llvm; 43 44 #define DEBUG_TYPE "wasm-lower" 45 46 WebAssemblyTargetLowering::WebAssemblyTargetLowering( 47 const TargetMachine &TM, const WebAssemblySubtarget &STI) 48 : TargetLowering(TM), Subtarget(&STI) { 49 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32; 50 51 // Booleans always contain 0 or 1. 52 setBooleanContents(ZeroOrOneBooleanContent); 53 // Except in SIMD vectors 54 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 55 // We don't know the microarchitecture here, so just reduce register pressure. 56 setSchedulingPreference(Sched::RegPressure); 57 // Tell ISel that we have a stack pointer. 58 setStackPointerRegisterToSaveRestore( 59 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); 60 // Set up the register classes. 61 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); 62 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); 63 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); 64 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); 65 if (Subtarget->hasSIMD128()) { 66 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass); 67 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); 68 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); 69 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); 70 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); 71 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); 72 } 73 if (Subtarget->hasHalfPrecision()) { 74 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass); 75 } 76 if (Subtarget->hasReferenceTypes()) { 77 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass); 78 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass); 79 if (Subtarget->hasExceptionHandling()) { 80 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass); 81 } 82 } 83 // Compute derived properties from the register classes. 84 computeRegisterProperties(Subtarget->getRegisterInfo()); 85 86 // Transform loads and stores to pointers in address space 1 to loads and 87 // stores to WebAssembly global variables, outside linear memory. 88 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) { 89 setOperationAction(ISD::LOAD, T, Custom); 90 setOperationAction(ISD::STORE, T, Custom); 91 } 92 if (Subtarget->hasSIMD128()) { 93 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 94 MVT::v2f64}) { 95 setOperationAction(ISD::LOAD, T, Custom); 96 setOperationAction(ISD::STORE, T, Custom); 97 } 98 } 99 if (Subtarget->hasReferenceTypes()) { 100 // We need custom load and store lowering for both externref, funcref and 101 // Other. The MVT::Other here represents tables of reference types. 102 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) { 103 setOperationAction(ISD::LOAD, T, Custom); 104 setOperationAction(ISD::STORE, T, Custom); 105 } 106 } 107 108 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); 109 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom); 110 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); 111 setOperationAction(ISD::JumpTable, MVTPtr, Custom); 112 setOperationAction(ISD::BlockAddress, MVTPtr, Custom); 113 setOperationAction(ISD::BRIND, MVT::Other, Custom); 114 setOperationAction(ISD::CLEAR_CACHE, MVT::Other, Custom); 115 116 // Take the default expansion for va_arg, va_copy, and va_end. There is no 117 // default action for va_start, so we do that custom. 118 setOperationAction(ISD::VASTART, MVT::Other, Custom); 119 setOperationAction(ISD::VAARG, MVT::Other, Expand); 120 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 121 setOperationAction(ISD::VAEND, MVT::Other, Expand); 122 123 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { 124 // Don't expand the floating-point types to constant pools. 125 setOperationAction(ISD::ConstantFP, T, Legal); 126 // Expand floating-point comparisons. 127 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE, 128 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) 129 setCondCodeAction(CC, T, Expand); 130 // Expand floating-point library function operators. 131 for (auto Op : 132 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA}) 133 setOperationAction(Op, T, Expand); 134 // Note supported floating-point library function operators that otherwise 135 // default to expand. 136 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, 137 ISD::FRINT, ISD::FROUNDEVEN}) 138 setOperationAction(Op, T, Legal); 139 // Support minimum and maximum, which otherwise default to expand. 140 setOperationAction(ISD::FMINIMUM, T, Legal); 141 setOperationAction(ISD::FMAXIMUM, T, Legal); 142 // WebAssembly currently has no builtin f16 support. 143 setOperationAction(ISD::FP16_TO_FP, T, Expand); 144 setOperationAction(ISD::FP_TO_FP16, T, Expand); 145 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand); 146 setTruncStoreAction(T, MVT::f16, Expand); 147 } 148 149 if (Subtarget->hasHalfPrecision()) { 150 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); 151 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); 152 } 153 154 // Expand unavailable integer operations. 155 for (auto Op : 156 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, 157 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS, 158 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { 159 for (auto T : {MVT::i32, MVT::i64}) 160 setOperationAction(Op, T, Expand); 161 if (Subtarget->hasSIMD128()) 162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 163 setOperationAction(Op, T, Expand); 164 } 165 166 if (Subtarget->hasNontrappingFPToInt()) 167 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) 168 for (auto T : {MVT::i32, MVT::i64}) 169 setOperationAction(Op, T, Custom); 170 171 // SIMD-specific configuration 172 if (Subtarget->hasSIMD128()) { 173 // Combine vector mask reductions into alltrue/anytrue 174 setTargetDAGCombine(ISD::SETCC); 175 176 // Convert vector to integer bitcasts to bitmask 177 setTargetDAGCombine(ISD::BITCAST); 178 179 // Hoist bitcasts out of shuffles 180 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 181 182 // Combine extends of extract_subvectors into widening ops 183 setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}); 184 185 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into 186 // conversions ops 187 setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND, 188 ISD::EXTRACT_SUBVECTOR}); 189 190 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa 191 // into conversion ops 192 setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 193 ISD::FP_ROUND, ISD::CONCAT_VECTORS}); 194 195 setTargetDAGCombine(ISD::TRUNCATE); 196 197 // Support saturating add for i8x16 and i16x8 198 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) 199 for (auto T : {MVT::v16i8, MVT::v8i16}) 200 setOperationAction(Op, T, Legal); 201 202 // Support integer abs 203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 204 setOperationAction(ISD::ABS, T, Legal); 205 206 // Custom lower BUILD_VECTORs to minimize number of replace_lanes 207 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 208 MVT::v2f64}) 209 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 210 211 // We have custom shuffle lowering to expose the shuffle mask 212 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 213 MVT::v2f64}) 214 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); 215 216 // Support splatting 217 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 218 MVT::v2f64}) 219 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 220 221 // Custom lowering since wasm shifts must have a scalar shift amount 222 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) 223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 224 setOperationAction(Op, T, Custom); 225 226 // Custom lower lane accesses to expand out variable indices 227 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) 228 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 229 MVT::v2f64}) 230 setOperationAction(Op, T, Custom); 231 232 // There is no i8x16.mul instruction 233 setOperationAction(ISD::MUL, MVT::v16i8, Expand); 234 235 // There is no vector conditional select instruction 236 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 237 MVT::v2f64}) 238 setOperationAction(ISD::SELECT_CC, T, Expand); 239 240 // Expand integer operations supported for scalars but not SIMD 241 for (auto Op : 242 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) 243 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 244 setOperationAction(Op, T, Expand); 245 246 // But we do have integer min and max operations 247 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 248 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) 249 setOperationAction(Op, T, Legal); 250 251 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz. 252 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal); 253 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand); 254 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand); 255 256 // Custom lower bit counting operations for other types to scalarize them. 257 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}) 258 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64}) 259 setOperationAction(Op, T, Custom); 260 261 // Expand float operations supported for scalars but not SIMD 262 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, 263 ISD::FEXP, ISD::FEXP2}) 264 for (auto T : {MVT::v4f32, MVT::v2f64}) 265 setOperationAction(Op, T, Expand); 266 267 // Unsigned comparison operations are unavailable for i64x2 vectors. 268 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE}) 269 setCondCodeAction(CC, MVT::v2i64, Custom); 270 271 // 64x2 conversions are not in the spec 272 for (auto Op : 273 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}) 274 for (auto T : {MVT::v2i64, MVT::v2f64}) 275 setOperationAction(Op, T, Expand); 276 277 // But saturating fp_to_int converstions are 278 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) 279 setOperationAction(Op, MVT::v4i32, Custom); 280 281 // Support vector extending 282 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) { 283 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Custom); 284 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Custom); 285 } 286 } 287 288 // As a special case, these operators use the type to mean the type to 289 // sign-extend from. 290 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 291 if (!Subtarget->hasSignExt()) { 292 // Sign extends are legal only when extending a vector extract 293 auto Action = Subtarget->hasSIMD128() ? Custom : Expand; 294 for (auto T : {MVT::i8, MVT::i16, MVT::i32}) 295 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action); 296 } 297 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) 298 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); 299 300 // Dynamic stack allocation: use the default expansion. 301 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 302 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 303 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); 304 305 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 306 setOperationAction(ISD::FrameIndex, MVT::i64, Custom); 307 setOperationAction(ISD::CopyToReg, MVT::Other, Custom); 308 309 // Expand these forms; we pattern-match the forms that we can handle in isel. 310 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) 311 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) 312 setOperationAction(Op, T, Expand); 313 314 // We have custom switch handling. 315 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 316 317 // WebAssembly doesn't have: 318 // - Floating-point extending loads. 319 // - Floating-point truncating stores. 320 // - i1 extending loads. 321 // - truncating SIMD stores and most extending loads 322 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 323 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 324 for (auto T : MVT::integer_valuetypes()) 325 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 326 setLoadExtAction(Ext, T, MVT::i1, Promote); 327 if (Subtarget->hasSIMD128()) { 328 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, 329 MVT::v2f64}) { 330 for (auto MemT : MVT::fixedlen_vector_valuetypes()) { 331 if (MVT(T) != MemT) { 332 setTruncStoreAction(T, MemT, Expand); 333 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 334 setLoadExtAction(Ext, T, MemT, Expand); 335 } 336 } 337 } 338 // But some vector extending loads are legal 339 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { 340 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal); 341 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal); 342 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal); 343 } 344 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal); 345 } 346 347 // Don't do anything clever with build_pairs 348 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 349 350 // Trap lowers to wasm unreachable 351 setOperationAction(ISD::TRAP, MVT::Other, Legal); 352 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 353 354 // Exception handling intrinsics 355 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 356 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 357 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 358 359 setMaxAtomicSizeInBitsSupported(64); 360 361 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is 362 // consistent with the f64 and f128 names. 363 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 364 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 365 366 // Define the emscripten name for return address helper. 367 // TODO: when implementing other Wasm backends, make this generic or only do 368 // this on emscripten depending on what they end up doing. 369 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address"); 370 371 // Always convert switches to br_tables unless there is only one case, which 372 // is equivalent to a simple branch. This reduces code size for wasm, and we 373 // defer possible jump table optimizations to the VM. 374 setMinimumJumpTableEntries(2); 375 } 376 377 MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL, 378 uint32_t AS) const { 379 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF) 380 return MVT::externref; 381 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF) 382 return MVT::funcref; 383 return TargetLowering::getPointerTy(DL, AS); 384 } 385 386 MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL, 387 uint32_t AS) const { 388 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF) 389 return MVT::externref; 390 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF) 391 return MVT::funcref; 392 return TargetLowering::getPointerMemTy(DL, AS); 393 } 394 395 TargetLowering::AtomicExpansionKind 396 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 397 // We have wasm instructions for these 398 switch (AI->getOperation()) { 399 case AtomicRMWInst::Add: 400 case AtomicRMWInst::Sub: 401 case AtomicRMWInst::And: 402 case AtomicRMWInst::Or: 403 case AtomicRMWInst::Xor: 404 case AtomicRMWInst::Xchg: 405 return AtomicExpansionKind::None; 406 default: 407 break; 408 } 409 return AtomicExpansionKind::CmpXChg; 410 } 411 412 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const { 413 // Implementation copied from X86TargetLowering. 414 unsigned Opc = VecOp.getOpcode(); 415 416 // Assume target opcodes can't be scalarized. 417 // TODO - do we have any exceptions? 418 if (Opc >= ISD::BUILTIN_OP_END) 419 return false; 420 421 // If the vector op is not supported, try to convert to scalar. 422 EVT VecVT = VecOp.getValueType(); 423 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT)) 424 return true; 425 426 // If the vector op is supported, but the scalar op is not, the transform may 427 // not be worthwhile. 428 EVT ScalarVT = VecVT.getScalarType(); 429 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT); 430 } 431 432 FastISel *WebAssemblyTargetLowering::createFastISel( 433 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const { 434 return WebAssembly::createFastISel(FuncInfo, LibInfo); 435 } 436 437 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, 438 EVT VT) const { 439 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); 440 if (BitWidth > 1 && BitWidth < 8) 441 BitWidth = 8; 442 443 if (BitWidth > 64) { 444 // The shift will be lowered to a libcall, and compiler-rt libcalls expect 445 // the count to be an i32. 446 BitWidth = 32; 447 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && 448 "32-bit shift counts ought to be enough for anyone"); 449 } 450 451 MVT Result = MVT::getIntegerVT(BitWidth); 452 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE && 453 "Unable to represent scalar shift amount type"); 454 return Result; 455 } 456 457 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an 458 // undefined result on invalid/overflow, to the WebAssembly opcode, which 459 // traps on invalid/overflow. 460 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL, 461 MachineBasicBlock *BB, 462 const TargetInstrInfo &TII, 463 bool IsUnsigned, bool Int64, 464 bool Float64, unsigned LoweredOpcode) { 465 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 466 467 Register OutReg = MI.getOperand(0).getReg(); 468 Register InReg = MI.getOperand(1).getReg(); 469 470 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32; 471 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32; 472 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32; 473 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32; 474 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; 475 unsigned Eqz = WebAssembly::EQZ_I32; 476 unsigned And = WebAssembly::AND_I32; 477 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN; 478 int64_t Substitute = IsUnsigned ? 0 : Limit; 479 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit; 480 auto &Context = BB->getParent()->getFunction().getContext(); 481 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context); 482 483 const BasicBlock *LLVMBB = BB->getBasicBlock(); 484 MachineFunction *F = BB->getParent(); 485 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); 486 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB); 487 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); 488 489 MachineFunction::iterator It = ++BB->getIterator(); 490 F->insert(It, FalseMBB); 491 F->insert(It, TrueMBB); 492 F->insert(It, DoneMBB); 493 494 // Transfer the remainder of BB and its successor edges to DoneMBB. 495 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); 496 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 497 498 BB->addSuccessor(TrueMBB); 499 BB->addSuccessor(FalseMBB); 500 TrueMBB->addSuccessor(DoneMBB); 501 FalseMBB->addSuccessor(DoneMBB); 502 503 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg; 504 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 505 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 506 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 507 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 508 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 509 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 510 511 MI.eraseFromParent(); 512 // For signed numbers, we can do a single comparison to determine whether 513 // fabs(x) is within range. 514 if (IsUnsigned) { 515 Tmp0 = InReg; 516 } else { 517 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg); 518 } 519 BuildMI(BB, DL, TII.get(FConst), Tmp1) 520 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal))); 521 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1); 522 523 // For unsigned numbers, we have to do a separate comparison with zero. 524 if (IsUnsigned) { 525 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 526 Register SecondCmpReg = 527 MRI.createVirtualRegister(&WebAssembly::I32RegClass); 528 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 529 BuildMI(BB, DL, TII.get(FConst), Tmp1) 530 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0))); 531 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1); 532 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg); 533 CmpReg = AndReg; 534 } 535 536 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg); 537 538 // Create the CFG diamond to select between doing the conversion or using 539 // the substitute value. 540 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg); 541 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg); 542 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB); 543 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute); 544 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg) 545 .addReg(FalseReg) 546 .addMBB(FalseMBB) 547 .addReg(TrueReg) 548 .addMBB(TrueMBB); 549 550 return DoneMBB; 551 } 552 553 static MachineBasicBlock * 554 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, 555 const WebAssemblySubtarget *Subtarget, 556 const TargetInstrInfo &TII) { 557 MachineInstr &CallParams = *CallResults.getPrevNode(); 558 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS); 559 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS || 560 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS); 561 562 bool IsIndirect = 563 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI(); 564 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS; 565 566 bool IsFuncrefCall = false; 567 if (IsIndirect && CallParams.getOperand(0).isReg()) { 568 Register Reg = CallParams.getOperand(0).getReg(); 569 const MachineFunction *MF = BB->getParent(); 570 const MachineRegisterInfo &MRI = MF->getRegInfo(); 571 const TargetRegisterClass *TRC = MRI.getRegClass(Reg); 572 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass); 573 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes()); 574 } 575 576 unsigned CallOp; 577 if (IsIndirect && IsRetCall) { 578 CallOp = WebAssembly::RET_CALL_INDIRECT; 579 } else if (IsIndirect) { 580 CallOp = WebAssembly::CALL_INDIRECT; 581 } else if (IsRetCall) { 582 CallOp = WebAssembly::RET_CALL; 583 } else { 584 CallOp = WebAssembly::CALL; 585 } 586 587 MachineFunction &MF = *BB->getParent(); 588 const MCInstrDesc &MCID = TII.get(CallOp); 589 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL)); 590 591 // Move the function pointer to the end of the arguments for indirect calls 592 if (IsIndirect) { 593 auto FnPtr = CallParams.getOperand(0); 594 CallParams.removeOperand(0); 595 596 // For funcrefs, call_indirect is done through __funcref_call_table and the 597 // funcref is always installed in slot 0 of the table, therefore instead of 598 // having the function pointer added at the end of the params list, a zero 599 // (the index in 600 // __funcref_call_table is added). 601 if (IsFuncrefCall) { 602 Register RegZero = 603 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass); 604 MachineInstrBuilder MIBC0 = 605 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0); 606 607 BB->insert(CallResults.getIterator(), MIBC0); 608 MachineInstrBuilder(MF, CallParams).addReg(RegZero); 609 } else 610 CallParams.addOperand(FnPtr); 611 } 612 613 for (auto Def : CallResults.defs()) 614 MIB.add(Def); 615 616 if (IsIndirect) { 617 // Placeholder for the type index. 618 MIB.addImm(0); 619 // The table into which this call_indirect indexes. 620 MCSymbolWasm *Table = IsFuncrefCall 621 ? WebAssembly::getOrCreateFuncrefCallTableSymbol( 622 MF.getContext(), Subtarget) 623 : WebAssembly::getOrCreateFunctionTableSymbol( 624 MF.getContext(), Subtarget); 625 if (Subtarget->hasReferenceTypes()) { 626 MIB.addSym(Table); 627 } else { 628 // For the MVP there is at most one table whose number is 0, but we can't 629 // write a table symbol or issue relocations. Instead we just ensure the 630 // table is live and write a zero. 631 Table->setNoStrip(); 632 MIB.addImm(0); 633 } 634 } 635 636 for (auto Use : CallParams.uses()) 637 MIB.add(Use); 638 639 BB->insert(CallResults.getIterator(), MIB); 640 CallParams.eraseFromParent(); 641 CallResults.eraseFromParent(); 642 643 // If this is a funcref call, to avoid hidden GC roots, we need to clear the 644 // table slot with ref.null upon call_indirect return. 645 // 646 // This generates the following code, which comes right after a call_indirect 647 // of a funcref: 648 // 649 // i32.const 0 650 // ref.null func 651 // table.set __funcref_call_table 652 if (IsIndirect && IsFuncrefCall) { 653 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol( 654 MF.getContext(), Subtarget); 655 Register RegZero = 656 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass); 657 MachineInstr *Const0 = 658 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0); 659 BB->insertAfter(MIB.getInstr()->getIterator(), Const0); 660 661 Register RegFuncref = 662 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass); 663 MachineInstr *RefNull = 664 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref); 665 BB->insertAfter(Const0->getIterator(), RefNull); 666 667 MachineInstr *TableSet = 668 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF)) 669 .addSym(Table) 670 .addReg(RegZero) 671 .addReg(RegFuncref); 672 BB->insertAfter(RefNull->getIterator(), TableSet); 673 } 674 675 return BB; 676 } 677 678 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter( 679 MachineInstr &MI, MachineBasicBlock *BB) const { 680 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 681 DebugLoc DL = MI.getDebugLoc(); 682 683 switch (MI.getOpcode()) { 684 default: 685 llvm_unreachable("Unexpected instr type to insert"); 686 case WebAssembly::FP_TO_SINT_I32_F32: 687 return LowerFPToInt(MI, DL, BB, TII, false, false, false, 688 WebAssembly::I32_TRUNC_S_F32); 689 case WebAssembly::FP_TO_UINT_I32_F32: 690 return LowerFPToInt(MI, DL, BB, TII, true, false, false, 691 WebAssembly::I32_TRUNC_U_F32); 692 case WebAssembly::FP_TO_SINT_I64_F32: 693 return LowerFPToInt(MI, DL, BB, TII, false, true, false, 694 WebAssembly::I64_TRUNC_S_F32); 695 case WebAssembly::FP_TO_UINT_I64_F32: 696 return LowerFPToInt(MI, DL, BB, TII, true, true, false, 697 WebAssembly::I64_TRUNC_U_F32); 698 case WebAssembly::FP_TO_SINT_I32_F64: 699 return LowerFPToInt(MI, DL, BB, TII, false, false, true, 700 WebAssembly::I32_TRUNC_S_F64); 701 case WebAssembly::FP_TO_UINT_I32_F64: 702 return LowerFPToInt(MI, DL, BB, TII, true, false, true, 703 WebAssembly::I32_TRUNC_U_F64); 704 case WebAssembly::FP_TO_SINT_I64_F64: 705 return LowerFPToInt(MI, DL, BB, TII, false, true, true, 706 WebAssembly::I64_TRUNC_S_F64); 707 case WebAssembly::FP_TO_UINT_I64_F64: 708 return LowerFPToInt(MI, DL, BB, TII, true, true, true, 709 WebAssembly::I64_TRUNC_U_F64); 710 case WebAssembly::CALL_RESULTS: 711 case WebAssembly::RET_CALL_RESULTS: 712 return LowerCallResults(MI, DL, BB, Subtarget, TII); 713 } 714 } 715 716 const char * 717 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const { 718 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) { 719 case WebAssemblyISD::FIRST_NUMBER: 720 case WebAssemblyISD::FIRST_MEM_OPCODE: 721 break; 722 #define HANDLE_NODETYPE(NODE) \ 723 case WebAssemblyISD::NODE: \ 724 return "WebAssemblyISD::" #NODE; 725 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE) 726 #include "WebAssemblyISD.def" 727 #undef HANDLE_MEM_NODETYPE 728 #undef HANDLE_NODETYPE 729 } 730 return nullptr; 731 } 732 733 std::pair<unsigned, const TargetRegisterClass *> 734 WebAssemblyTargetLowering::getRegForInlineAsmConstraint( 735 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 736 // First, see if this is a constraint that directly corresponds to a 737 // WebAssembly register class. 738 if (Constraint.size() == 1) { 739 switch (Constraint[0]) { 740 case 'r': 741 assert(VT != MVT::iPTR && "Pointer MVT not expected here"); 742 if (Subtarget->hasSIMD128() && VT.isVector()) { 743 if (VT.getSizeInBits() == 128) 744 return std::make_pair(0U, &WebAssembly::V128RegClass); 745 } 746 if (VT.isInteger() && !VT.isVector()) { 747 if (VT.getSizeInBits() <= 32) 748 return std::make_pair(0U, &WebAssembly::I32RegClass); 749 if (VT.getSizeInBits() <= 64) 750 return std::make_pair(0U, &WebAssembly::I64RegClass); 751 } 752 if (VT.isFloatingPoint() && !VT.isVector()) { 753 switch (VT.getSizeInBits()) { 754 case 32: 755 return std::make_pair(0U, &WebAssembly::F32RegClass); 756 case 64: 757 return std::make_pair(0U, &WebAssembly::F64RegClass); 758 default: 759 break; 760 } 761 } 762 break; 763 default: 764 break; 765 } 766 } 767 768 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 769 } 770 771 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { 772 // Assume ctz is a relatively cheap operation. 773 return true; 774 } 775 776 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { 777 // Assume clz is a relatively cheap operation. 778 return true; 779 } 780 781 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, 782 const AddrMode &AM, 783 Type *Ty, unsigned AS, 784 Instruction *I) const { 785 // WebAssembly offsets are added as unsigned without wrapping. The 786 // isLegalAddressingMode gives us no way to determine if wrapping could be 787 // happening, so we approximate this by accepting only non-negative offsets. 788 if (AM.BaseOffs < 0) 789 return false; 790 791 // WebAssembly has no scale register operands. 792 if (AM.Scale != 0) 793 return false; 794 795 // Everything else is legal. 796 return true; 797 } 798 799 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( 800 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/, 801 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const { 802 // WebAssembly supports unaligned accesses, though it should be declared 803 // with the p2align attribute on loads and stores which do so, and there 804 // may be a performance impact. We tell LLVM they're "fast" because 805 // for the kinds of things that LLVM uses this for (merging adjacent stores 806 // of constants, etc.), WebAssembly implementations will either want the 807 // unaligned access or they'll split anyway. 808 if (Fast) 809 *Fast = 1; 810 return true; 811 } 812 813 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, 814 AttributeList Attr) const { 815 // The current thinking is that wasm engines will perform this optimization, 816 // so we can save on code size. 817 return true; 818 } 819 820 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 821 EVT ExtT = ExtVal.getValueType(); 822 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0); 823 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) || 824 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) || 825 (ExtT == MVT::v2i64 && MemT == MVT::v2i32); 826 } 827 828 bool WebAssemblyTargetLowering::isOffsetFoldingLegal( 829 const GlobalAddressSDNode *GA) const { 830 // Wasm doesn't support function addresses with offsets 831 const GlobalValue *GV = GA->getGlobal(); 832 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA); 833 } 834 835 bool WebAssemblyTargetLowering::shouldSinkOperands( 836 Instruction *I, SmallVectorImpl<Use *> &Ops) const { 837 using namespace llvm::PatternMatch; 838 839 if (!I->getType()->isVectorTy() || !I->isShift()) 840 return false; 841 842 Value *V = I->getOperand(1); 843 // We dont need to sink constant splat. 844 if (dyn_cast<Constant>(V)) 845 return false; 846 847 if (match(V, m_Shuffle(m_InsertElt(m_Value(), m_Value(), m_ZeroInt()), 848 m_Value(), m_ZeroMask()))) { 849 // Sink insert 850 Ops.push_back(&cast<Instruction>(V)->getOperandUse(0)); 851 // Sink shuffle 852 Ops.push_back(&I->getOperandUse(1)); 853 return true; 854 } 855 856 return false; 857 } 858 859 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL, 860 LLVMContext &C, 861 EVT VT) const { 862 if (VT.isVector()) 863 return VT.changeVectorElementTypeToInteger(); 864 865 // So far, all branch instructions in Wasm take an I32 condition. 866 // The default TargetLowering::getSetCCResultType returns the pointer size, 867 // which would be useful to reduce instruction counts when testing 868 // against 64-bit pointers/values if at some point Wasm supports that. 869 return EVT::getIntegerVT(C, 32); 870 } 871 872 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 873 const CallInst &I, 874 MachineFunction &MF, 875 unsigned Intrinsic) const { 876 switch (Intrinsic) { 877 case Intrinsic::wasm_memory_atomic_notify: 878 Info.opc = ISD::INTRINSIC_W_CHAIN; 879 Info.memVT = MVT::i32; 880 Info.ptrVal = I.getArgOperand(0); 881 Info.offset = 0; 882 Info.align = Align(4); 883 // atomic.notify instruction does not really load the memory specified with 884 // this argument, but MachineMemOperand should either be load or store, so 885 // we set this to a load. 886 // FIXME Volatile isn't really correct, but currently all LLVM atomic 887 // instructions are treated as volatiles in the backend, so we should be 888 // consistent. The same applies for wasm_atomic_wait intrinsics too. 889 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 890 return true; 891 case Intrinsic::wasm_memory_atomic_wait32: 892 Info.opc = ISD::INTRINSIC_W_CHAIN; 893 Info.memVT = MVT::i32; 894 Info.ptrVal = I.getArgOperand(0); 895 Info.offset = 0; 896 Info.align = Align(4); 897 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 898 return true; 899 case Intrinsic::wasm_memory_atomic_wait64: 900 Info.opc = ISD::INTRINSIC_W_CHAIN; 901 Info.memVT = MVT::i64; 902 Info.ptrVal = I.getArgOperand(0); 903 Info.offset = 0; 904 Info.align = Align(8); 905 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 906 return true; 907 case Intrinsic::wasm_loadf16_f32: 908 Info.opc = ISD::INTRINSIC_W_CHAIN; 909 Info.memVT = MVT::f16; 910 Info.ptrVal = I.getArgOperand(0); 911 Info.offset = 0; 912 Info.align = Align(2); 913 Info.flags = MachineMemOperand::MOLoad; 914 return true; 915 case Intrinsic::wasm_storef16_f32: 916 Info.opc = ISD::INTRINSIC_VOID; 917 Info.memVT = MVT::f16; 918 Info.ptrVal = I.getArgOperand(1); 919 Info.offset = 0; 920 Info.align = Align(2); 921 Info.flags = MachineMemOperand::MOStore; 922 return true; 923 default: 924 return false; 925 } 926 } 927 928 void WebAssemblyTargetLowering::computeKnownBitsForTargetNode( 929 const SDValue Op, KnownBits &Known, const APInt &DemandedElts, 930 const SelectionDAG &DAG, unsigned Depth) const { 931 switch (Op.getOpcode()) { 932 default: 933 break; 934 case ISD::INTRINSIC_WO_CHAIN: { 935 unsigned IntNo = Op.getConstantOperandVal(0); 936 switch (IntNo) { 937 default: 938 break; 939 case Intrinsic::wasm_bitmask: { 940 unsigned BitWidth = Known.getBitWidth(); 941 EVT VT = Op.getOperand(1).getSimpleValueType(); 942 unsigned PossibleBits = VT.getVectorNumElements(); 943 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits); 944 Known.Zero |= ZeroMask; 945 break; 946 } 947 } 948 } 949 } 950 } 951 952 TargetLoweringBase::LegalizeTypeAction 953 WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const { 954 if (VT.isFixedLengthVector()) { 955 MVT EltVT = VT.getVectorElementType(); 956 // We have legal vector types with these lane types, so widening the 957 // vector would let us use some of the lanes directly without having to 958 // extend or truncate values. 959 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 || 960 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64) 961 return TypeWidenVector; 962 } 963 964 return TargetLoweringBase::getPreferredVectorAction(VT); 965 } 966 967 bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts( 968 SDValue Op, const TargetLoweringOpt &TLO) const { 969 // ISel process runs DAGCombiner after legalization; this step is called 970 // SelectionDAG optimization phase. This post-legalization combining process 971 // runs DAGCombiner on each node, and if there was a change to be made, 972 // re-runs legalization again on it and its user nodes to make sure 973 // everythiing is in a legalized state. 974 // 975 // The legalization calls lowering routines, and we do our custom lowering for 976 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements 977 // into zeros. But there is a set of routines in DAGCombiner that turns unused 978 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts 979 // turns unused vector elements into undefs. But this routine does not work 980 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This 981 // combination can result in a infinite loop, in which undefs are converted to 982 // zeros in legalization and back to undefs in combining. 983 // 984 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from 985 // running for build_vectors. 986 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys) 987 return false; 988 return true; 989 } 990 991 //===----------------------------------------------------------------------===// 992 // WebAssembly Lowering private implementation. 993 //===----------------------------------------------------------------------===// 994 995 //===----------------------------------------------------------------------===// 996 // Lowering Code 997 //===----------------------------------------------------------------------===// 998 999 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) { 1000 MachineFunction &MF = DAG.getMachineFunction(); 1001 DAG.getContext()->diagnose( 1002 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); 1003 } 1004 1005 // Test whether the given calling convention is supported. 1006 static bool callingConvSupported(CallingConv::ID CallConv) { 1007 // We currently support the language-independent target-independent 1008 // conventions. We don't yet have a way to annotate calls with properties like 1009 // "cold", and we don't have any call-clobbered registers, so these are mostly 1010 // all handled the same. 1011 return CallConv == CallingConv::C || CallConv == CallingConv::Fast || 1012 CallConv == CallingConv::Cold || 1013 CallConv == CallingConv::PreserveMost || 1014 CallConv == CallingConv::PreserveAll || 1015 CallConv == CallingConv::CXX_FAST_TLS || 1016 CallConv == CallingConv::WASM_EmscriptenInvoke || 1017 CallConv == CallingConv::Swift; 1018 } 1019 1020 SDValue 1021 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, 1022 SmallVectorImpl<SDValue> &InVals) const { 1023 SelectionDAG &DAG = CLI.DAG; 1024 SDLoc DL = CLI.DL; 1025 SDValue Chain = CLI.Chain; 1026 SDValue Callee = CLI.Callee; 1027 MachineFunction &MF = DAG.getMachineFunction(); 1028 auto Layout = MF.getDataLayout(); 1029 1030 CallingConv::ID CallConv = CLI.CallConv; 1031 if (!callingConvSupported(CallConv)) 1032 fail(DL, DAG, 1033 "WebAssembly doesn't support language-specific or target-specific " 1034 "calling conventions yet"); 1035 if (CLI.IsPatchPoint) 1036 fail(DL, DAG, "WebAssembly doesn't support patch point yet"); 1037 1038 if (CLI.IsTailCall) { 1039 auto NoTail = [&](const char *Msg) { 1040 if (CLI.CB && CLI.CB->isMustTailCall()) 1041 fail(DL, DAG, Msg); 1042 CLI.IsTailCall = false; 1043 }; 1044 1045 if (!Subtarget->hasTailCall()) 1046 NoTail("WebAssembly 'tail-call' feature not enabled"); 1047 1048 // Varargs calls cannot be tail calls because the buffer is on the stack 1049 if (CLI.IsVarArg) 1050 NoTail("WebAssembly does not support varargs tail calls"); 1051 1052 // Do not tail call unless caller and callee return types match 1053 const Function &F = MF.getFunction(); 1054 const TargetMachine &TM = getTargetMachine(); 1055 Type *RetTy = F.getReturnType(); 1056 SmallVector<MVT, 4> CallerRetTys; 1057 SmallVector<MVT, 4> CalleeRetTys; 1058 computeLegalValueVTs(F, TM, RetTy, CallerRetTys); 1059 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys); 1060 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() && 1061 std::equal(CallerRetTys.begin(), CallerRetTys.end(), 1062 CalleeRetTys.begin()); 1063 if (!TypesMatch) 1064 NoTail("WebAssembly tail call requires caller and callee return types to " 1065 "match"); 1066 1067 // If pointers to local stack values are passed, we cannot tail call 1068 if (CLI.CB) { 1069 for (auto &Arg : CLI.CB->args()) { 1070 Value *Val = Arg.get(); 1071 // Trace the value back through pointer operations 1072 while (true) { 1073 Value *Src = Val->stripPointerCastsAndAliases(); 1074 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src)) 1075 Src = GEP->getPointerOperand(); 1076 if (Val == Src) 1077 break; 1078 Val = Src; 1079 } 1080 if (isa<AllocaInst>(Val)) { 1081 NoTail( 1082 "WebAssembly does not support tail calling with stack arguments"); 1083 break; 1084 } 1085 } 1086 } 1087 } 1088 1089 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1090 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1091 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1092 1093 // The generic code may have added an sret argument. If we're lowering an 1094 // invoke function, the ABI requires that the function pointer be the first 1095 // argument, so we may have to swap the arguments. 1096 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 && 1097 Outs[0].Flags.isSRet()) { 1098 std::swap(Outs[0], Outs[1]); 1099 std::swap(OutVals[0], OutVals[1]); 1100 } 1101 1102 bool HasSwiftSelfArg = false; 1103 bool HasSwiftErrorArg = false; 1104 unsigned NumFixedArgs = 0; 1105 for (unsigned I = 0; I < Outs.size(); ++I) { 1106 const ISD::OutputArg &Out = Outs[I]; 1107 SDValue &OutVal = OutVals[I]; 1108 HasSwiftSelfArg |= Out.Flags.isSwiftSelf(); 1109 HasSwiftErrorArg |= Out.Flags.isSwiftError(); 1110 if (Out.Flags.isNest()) 1111 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 1112 if (Out.Flags.isInAlloca()) 1113 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 1114 if (Out.Flags.isInConsecutiveRegs()) 1115 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 1116 if (Out.Flags.isInConsecutiveRegsLast()) 1117 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 1118 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) { 1119 auto &MFI = MF.getFrameInfo(); 1120 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(), 1121 Out.Flags.getNonZeroByValAlign(), 1122 /*isSS=*/false); 1123 SDValue SizeNode = 1124 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32); 1125 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 1126 Chain = DAG.getMemcpy( 1127 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(), 1128 /*isVolatile*/ false, /*AlwaysInline=*/false, 1129 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); 1130 OutVal = FINode; 1131 } 1132 // Count the number of fixed args *after* legalization. 1133 NumFixedArgs += Out.IsFixed; 1134 } 1135 1136 bool IsVarArg = CLI.IsVarArg; 1137 auto PtrVT = getPointerTy(Layout); 1138 1139 // For swiftcc, emit additional swiftself and swifterror arguments 1140 // if there aren't. These additional arguments are also added for callee 1141 // signature They are necessary to match callee and caller signature for 1142 // indirect call. 1143 if (CallConv == CallingConv::Swift) { 1144 if (!HasSwiftSelfArg) { 1145 NumFixedArgs++; 1146 ISD::OutputArg Arg; 1147 Arg.Flags.setSwiftSelf(); 1148 CLI.Outs.push_back(Arg); 1149 SDValue ArgVal = DAG.getUNDEF(PtrVT); 1150 CLI.OutVals.push_back(ArgVal); 1151 } 1152 if (!HasSwiftErrorArg) { 1153 NumFixedArgs++; 1154 ISD::OutputArg Arg; 1155 Arg.Flags.setSwiftError(); 1156 CLI.Outs.push_back(Arg); 1157 SDValue ArgVal = DAG.getUNDEF(PtrVT); 1158 CLI.OutVals.push_back(ArgVal); 1159 } 1160 } 1161 1162 // Analyze operands of the call, assigning locations to each operand. 1163 SmallVector<CCValAssign, 16> ArgLocs; 1164 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1165 1166 if (IsVarArg) { 1167 // Outgoing non-fixed arguments are placed in a buffer. First 1168 // compute their offsets and the total amount of buffer space needed. 1169 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) { 1170 const ISD::OutputArg &Out = Outs[I]; 1171 SDValue &Arg = OutVals[I]; 1172 EVT VT = Arg.getValueType(); 1173 assert(VT != MVT::iPTR && "Legalized args should be concrete"); 1174 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 1175 Align Alignment = 1176 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty)); 1177 unsigned Offset = 1178 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment); 1179 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), 1180 Offset, VT.getSimpleVT(), 1181 CCValAssign::Full)); 1182 } 1183 } 1184 1185 unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); 1186 1187 SDValue FINode; 1188 if (IsVarArg && NumBytes) { 1189 // For non-fixed arguments, next emit stores to store the argument values 1190 // to the stack buffer at the offsets computed above. 1191 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, 1192 Layout.getStackAlignment(), 1193 /*isSS=*/false); 1194 unsigned ValNo = 0; 1195 SmallVector<SDValue, 8> Chains; 1196 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) { 1197 assert(ArgLocs[ValNo].getValNo() == ValNo && 1198 "ArgLocs should remain in order and only hold varargs args"); 1199 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); 1200 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 1201 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode, 1202 DAG.getConstant(Offset, DL, PtrVT)); 1203 Chains.push_back( 1204 DAG.getStore(Chain, DL, Arg, Add, 1205 MachinePointerInfo::getFixedStack(MF, FI, Offset))); 1206 } 1207 if (!Chains.empty()) 1208 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1209 } else if (IsVarArg) { 1210 FINode = DAG.getIntPtrConstant(0, DL); 1211 } 1212 1213 if (Callee->getOpcode() == ISD::GlobalAddress) { 1214 // If the callee is a GlobalAddress node (quite common, every direct call 1215 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress 1216 // doesn't at MO_GOT which is not needed for direct calls. 1217 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee); 1218 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 1219 getPointerTy(DAG.getDataLayout()), 1220 GA->getOffset()); 1221 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL, 1222 getPointerTy(DAG.getDataLayout()), Callee); 1223 } 1224 1225 // Compute the operands for the CALLn node. 1226 SmallVector<SDValue, 16> Ops; 1227 Ops.push_back(Chain); 1228 Ops.push_back(Callee); 1229 1230 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs 1231 // isn't reliable. 1232 Ops.append(OutVals.begin(), 1233 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); 1234 // Add a pointer to the vararg buffer. 1235 if (IsVarArg) 1236 Ops.push_back(FINode); 1237 1238 SmallVector<EVT, 8> InTys; 1239 for (const auto &In : Ins) { 1240 assert(!In.Flags.isByVal() && "byval is not valid for return values"); 1241 assert(!In.Flags.isNest() && "nest is not valid for return values"); 1242 if (In.Flags.isInAlloca()) 1243 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values"); 1244 if (In.Flags.isInConsecutiveRegs()) 1245 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values"); 1246 if (In.Flags.isInConsecutiveRegsLast()) 1247 fail(DL, DAG, 1248 "WebAssembly hasn't implemented cons regs last return values"); 1249 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in 1250 // registers. 1251 InTys.push_back(In.VT); 1252 } 1253 1254 // Lastly, if this is a call to a funcref we need to add an instruction 1255 // table.set to the chain and transform the call. 1256 if (CLI.CB && WebAssembly::isWebAssemblyFuncrefType( 1257 CLI.CB->getCalledOperand()->getType())) { 1258 // In the absence of function references proposal where a funcref call is 1259 // lowered to call_ref, using reference types we generate a table.set to set 1260 // the funcref to a special table used solely for this purpose, followed by 1261 // a call_indirect. Here we just generate the table set, and return the 1262 // SDValue of the table.set so that LowerCall can finalize the lowering by 1263 // generating the call_indirect. 1264 SDValue Chain = Ops[0]; 1265 1266 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol( 1267 MF.getContext(), Subtarget); 1268 SDValue Sym = DAG.getMCSymbol(Table, PtrVT); 1269 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32); 1270 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee}; 1271 SDValue TableSet = DAG.getMemIntrinsicNode( 1272 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps, 1273 MVT::funcref, 1274 // Machine Mem Operand args 1275 MachinePointerInfo( 1276 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF), 1277 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()), 1278 MachineMemOperand::MOStore); 1279 1280 Ops[0] = TableSet; // The new chain is the TableSet itself 1281 } 1282 1283 if (CLI.IsTailCall) { 1284 // ret_calls do not return values to the current frame 1285 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1286 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops); 1287 } 1288 1289 InTys.push_back(MVT::Other); 1290 SDVTList InTyList = DAG.getVTList(InTys); 1291 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops); 1292 1293 for (size_t I = 0; I < Ins.size(); ++I) 1294 InVals.push_back(Res.getValue(I)); 1295 1296 // Return the chain 1297 return Res.getValue(Ins.size()); 1298 } 1299 1300 bool WebAssemblyTargetLowering::CanLowerReturn( 1301 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, 1302 const SmallVectorImpl<ISD::OutputArg> &Outs, 1303 LLVMContext & /*Context*/) const { 1304 // WebAssembly can only handle returning tuples with multivalue enabled 1305 return WebAssembly::canLowerReturn(Outs.size(), Subtarget); 1306 } 1307 1308 SDValue WebAssemblyTargetLowering::LowerReturn( 1309 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, 1310 const SmallVectorImpl<ISD::OutputArg> &Outs, 1311 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 1312 SelectionDAG &DAG) const { 1313 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) && 1314 "MVP WebAssembly can only return up to one value"); 1315 if (!callingConvSupported(CallConv)) 1316 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 1317 1318 SmallVector<SDValue, 4> RetOps(1, Chain); 1319 RetOps.append(OutVals.begin(), OutVals.end()); 1320 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); 1321 1322 // Record the number and types of the return values. 1323 for (const ISD::OutputArg &Out : Outs) { 1324 assert(!Out.Flags.isByVal() && "byval is not valid for return values"); 1325 assert(!Out.Flags.isNest() && "nest is not valid for return values"); 1326 assert(Out.IsFixed && "non-fixed return value is not valid"); 1327 if (Out.Flags.isInAlloca()) 1328 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); 1329 if (Out.Flags.isInConsecutiveRegs()) 1330 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); 1331 if (Out.Flags.isInConsecutiveRegsLast()) 1332 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); 1333 } 1334 1335 return Chain; 1336 } 1337 1338 SDValue WebAssemblyTargetLowering::LowerFormalArguments( 1339 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1340 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1341 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1342 if (!callingConvSupported(CallConv)) 1343 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 1344 1345 MachineFunction &MF = DAG.getMachineFunction(); 1346 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); 1347 1348 // Set up the incoming ARGUMENTS value, which serves to represent the liveness 1349 // of the incoming values before they're represented by virtual registers. 1350 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); 1351 1352 bool HasSwiftErrorArg = false; 1353 bool HasSwiftSelfArg = false; 1354 for (const ISD::InputArg &In : Ins) { 1355 HasSwiftSelfArg |= In.Flags.isSwiftSelf(); 1356 HasSwiftErrorArg |= In.Flags.isSwiftError(); 1357 if (In.Flags.isInAlloca()) 1358 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 1359 if (In.Flags.isNest()) 1360 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 1361 if (In.Flags.isInConsecutiveRegs()) 1362 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 1363 if (In.Flags.isInConsecutiveRegsLast()) 1364 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 1365 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in 1366 // registers. 1367 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, 1368 DAG.getTargetConstant(InVals.size(), 1369 DL, MVT::i32)) 1370 : DAG.getUNDEF(In.VT)); 1371 1372 // Record the number and types of arguments. 1373 MFI->addParam(In.VT); 1374 } 1375 1376 // For swiftcc, emit additional swiftself and swifterror arguments 1377 // if there aren't. These additional arguments are also added for callee 1378 // signature They are necessary to match callee and caller signature for 1379 // indirect call. 1380 auto PtrVT = getPointerTy(MF.getDataLayout()); 1381 if (CallConv == CallingConv::Swift) { 1382 if (!HasSwiftSelfArg) { 1383 MFI->addParam(PtrVT); 1384 } 1385 if (!HasSwiftErrorArg) { 1386 MFI->addParam(PtrVT); 1387 } 1388 } 1389 // Varargs are copied into a buffer allocated by the caller, and a pointer to 1390 // the buffer is passed as an argument. 1391 if (IsVarArg) { 1392 MVT PtrVT = getPointerTy(MF.getDataLayout()); 1393 Register VarargVreg = 1394 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); 1395 MFI->setVarargBufferVreg(VarargVreg); 1396 Chain = DAG.getCopyToReg( 1397 Chain, DL, VarargVreg, 1398 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, 1399 DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); 1400 MFI->addParam(PtrVT); 1401 } 1402 1403 // Record the number and types of arguments and results. 1404 SmallVector<MVT, 4> Params; 1405 SmallVector<MVT, 4> Results; 1406 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(), 1407 MF.getFunction(), DAG.getTarget(), Params, Results); 1408 for (MVT VT : Results) 1409 MFI->addResult(VT); 1410 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify 1411 // the param logic here with ComputeSignatureVTs 1412 assert(MFI->getParams().size() == Params.size() && 1413 std::equal(MFI->getParams().begin(), MFI->getParams().end(), 1414 Params.begin())); 1415 1416 return Chain; 1417 } 1418 1419 void WebAssemblyTargetLowering::ReplaceNodeResults( 1420 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 1421 switch (N->getOpcode()) { 1422 case ISD::SIGN_EXTEND_INREG: 1423 // Do not add any results, signifying that N should not be custom lowered 1424 // after all. This happens because simd128 turns on custom lowering for 1425 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an 1426 // illegal type. 1427 break; 1428 case ISD::SIGN_EXTEND_VECTOR_INREG: 1429 case ISD::ZERO_EXTEND_VECTOR_INREG: 1430 // Do not add any results, signifying that N should not be custom lowered. 1431 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all. 1432 break; 1433 default: 1434 llvm_unreachable( 1435 "ReplaceNodeResults not implemented for this op for WebAssembly!"); 1436 } 1437 } 1438 1439 //===----------------------------------------------------------------------===// 1440 // Custom lowering hooks. 1441 //===----------------------------------------------------------------------===// 1442 1443 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, 1444 SelectionDAG &DAG) const { 1445 SDLoc DL(Op); 1446 switch (Op.getOpcode()) { 1447 default: 1448 llvm_unreachable("unimplemented operation lowering"); 1449 return SDValue(); 1450 case ISD::FrameIndex: 1451 return LowerFrameIndex(Op, DAG); 1452 case ISD::GlobalAddress: 1453 return LowerGlobalAddress(Op, DAG); 1454 case ISD::GlobalTLSAddress: 1455 return LowerGlobalTLSAddress(Op, DAG); 1456 case ISD::ExternalSymbol: 1457 return LowerExternalSymbol(Op, DAG); 1458 case ISD::JumpTable: 1459 return LowerJumpTable(Op, DAG); 1460 case ISD::BR_JT: 1461 return LowerBR_JT(Op, DAG); 1462 case ISD::VASTART: 1463 return LowerVASTART(Op, DAG); 1464 case ISD::BlockAddress: 1465 case ISD::BRIND: 1466 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); 1467 return SDValue(); 1468 case ISD::RETURNADDR: 1469 return LowerRETURNADDR(Op, DAG); 1470 case ISD::FRAMEADDR: 1471 return LowerFRAMEADDR(Op, DAG); 1472 case ISD::CopyToReg: 1473 return LowerCopyToReg(Op, DAG); 1474 case ISD::EXTRACT_VECTOR_ELT: 1475 case ISD::INSERT_VECTOR_ELT: 1476 return LowerAccessVectorElement(Op, DAG); 1477 case ISD::INTRINSIC_VOID: 1478 case ISD::INTRINSIC_WO_CHAIN: 1479 case ISD::INTRINSIC_W_CHAIN: 1480 return LowerIntrinsic(Op, DAG); 1481 case ISD::SIGN_EXTEND_INREG: 1482 return LowerSIGN_EXTEND_INREG(Op, DAG); 1483 case ISD::ZERO_EXTEND_VECTOR_INREG: 1484 case ISD::SIGN_EXTEND_VECTOR_INREG: 1485 return LowerEXTEND_VECTOR_INREG(Op, DAG); 1486 case ISD::BUILD_VECTOR: 1487 return LowerBUILD_VECTOR(Op, DAG); 1488 case ISD::VECTOR_SHUFFLE: 1489 return LowerVECTOR_SHUFFLE(Op, DAG); 1490 case ISD::SETCC: 1491 return LowerSETCC(Op, DAG); 1492 case ISD::SHL: 1493 case ISD::SRA: 1494 case ISD::SRL: 1495 return LowerShift(Op, DAG); 1496 case ISD::FP_TO_SINT_SAT: 1497 case ISD::FP_TO_UINT_SAT: 1498 return LowerFP_TO_INT_SAT(Op, DAG); 1499 case ISD::LOAD: 1500 return LowerLoad(Op, DAG); 1501 case ISD::STORE: 1502 return LowerStore(Op, DAG); 1503 case ISD::CTPOP: 1504 case ISD::CTLZ: 1505 case ISD::CTTZ: 1506 return DAG.UnrollVectorOp(Op.getNode()); 1507 case ISD::CLEAR_CACHE: 1508 report_fatal_error("llvm.clear_cache is not supported on wasm"); 1509 } 1510 } 1511 1512 static bool IsWebAssemblyGlobal(SDValue Op) { 1513 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1514 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace()); 1515 1516 return false; 1517 } 1518 1519 static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op, 1520 SelectionDAG &DAG) { 1521 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op); 1522 if (!FI) 1523 return std::nullopt; 1524 1525 auto &MF = DAG.getMachineFunction(); 1526 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex()); 1527 } 1528 1529 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op, 1530 SelectionDAG &DAG) const { 1531 SDLoc DL(Op); 1532 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 1533 const SDValue &Value = SN->getValue(); 1534 const SDValue &Base = SN->getBasePtr(); 1535 const SDValue &Offset = SN->getOffset(); 1536 1537 if (IsWebAssemblyGlobal(Base)) { 1538 if (!Offset->isUndef()) 1539 report_fatal_error("unexpected offset when storing to webassembly global", 1540 false); 1541 1542 SDVTList Tys = DAG.getVTList(MVT::Other); 1543 SDValue Ops[] = {SN->getChain(), Value, Base}; 1544 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops, 1545 SN->getMemoryVT(), SN->getMemOperand()); 1546 } 1547 1548 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) { 1549 if (!Offset->isUndef()) 1550 report_fatal_error("unexpected offset when storing to webassembly local", 1551 false); 1552 1553 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32); 1554 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain. 1555 SDValue Ops[] = {SN->getChain(), Idx, Value}; 1556 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops); 1557 } 1558 1559 if (WebAssembly::isWasmVarAddressSpace(SN->getAddressSpace())) 1560 report_fatal_error( 1561 "Encountered an unlowerable store to the wasm_var address space", 1562 false); 1563 1564 return Op; 1565 } 1566 1567 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op, 1568 SelectionDAG &DAG) const { 1569 SDLoc DL(Op); 1570 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 1571 const SDValue &Base = LN->getBasePtr(); 1572 const SDValue &Offset = LN->getOffset(); 1573 1574 if (IsWebAssemblyGlobal(Base)) { 1575 if (!Offset->isUndef()) 1576 report_fatal_error( 1577 "unexpected offset when loading from webassembly global", false); 1578 1579 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other); 1580 SDValue Ops[] = {LN->getChain(), Base}; 1581 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops, 1582 LN->getMemoryVT(), LN->getMemOperand()); 1583 } 1584 1585 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) { 1586 if (!Offset->isUndef()) 1587 report_fatal_error( 1588 "unexpected offset when loading from webassembly local", false); 1589 1590 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32); 1591 EVT LocalVT = LN->getValueType(0); 1592 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT, 1593 {LN->getChain(), Idx}); 1594 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL); 1595 assert(Result->getNumValues() == 2 && "Loads must carry a chain!"); 1596 return Result; 1597 } 1598 1599 if (WebAssembly::isWasmVarAddressSpace(LN->getAddressSpace())) 1600 report_fatal_error( 1601 "Encountered an unlowerable load from the wasm_var address space", 1602 false); 1603 1604 return Op; 1605 } 1606 1607 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, 1608 SelectionDAG &DAG) const { 1609 SDValue Src = Op.getOperand(2); 1610 if (isa<FrameIndexSDNode>(Src.getNode())) { 1611 // CopyToReg nodes don't support FrameIndex operands. Other targets select 1612 // the FI to some LEA-like instruction, but since we don't have that, we 1613 // need to insert some kind of instruction that can take an FI operand and 1614 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy 1615 // local.copy between Op and its FI operand. 1616 SDValue Chain = Op.getOperand(0); 1617 SDLoc DL(Op); 1618 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); 1619 EVT VT = Src.getValueType(); 1620 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32 1621 : WebAssembly::COPY_I64, 1622 DL, VT, Src), 1623 0); 1624 return Op.getNode()->getNumValues() == 1 1625 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) 1626 : DAG.getCopyToReg(Chain, DL, Reg, Copy, 1627 Op.getNumOperands() == 4 ? Op.getOperand(3) 1628 : SDValue()); 1629 } 1630 return SDValue(); 1631 } 1632 1633 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, 1634 SelectionDAG &DAG) const { 1635 int FI = cast<FrameIndexSDNode>(Op)->getIndex(); 1636 return DAG.getTargetFrameIndex(FI, Op.getValueType()); 1637 } 1638 1639 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op, 1640 SelectionDAG &DAG) const { 1641 SDLoc DL(Op); 1642 1643 if (!Subtarget->getTargetTriple().isOSEmscripten()) { 1644 fail(DL, DAG, 1645 "Non-Emscripten WebAssembly hasn't implemented " 1646 "__builtin_return_address"); 1647 return SDValue(); 1648 } 1649 1650 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1651 return SDValue(); 1652 1653 unsigned Depth = Op.getConstantOperandVal(0); 1654 MakeLibCallOptions CallOptions; 1655 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(), 1656 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL) 1657 .first; 1658 } 1659 1660 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, 1661 SelectionDAG &DAG) const { 1662 // Non-zero depths are not supported by WebAssembly currently. Use the 1663 // legalizer's default expansion, which is to return 0 (what this function is 1664 // documented to do). 1665 if (Op.getConstantOperandVal(0) > 0) 1666 return SDValue(); 1667 1668 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); 1669 EVT VT = Op.getValueType(); 1670 Register FP = 1671 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction()); 1672 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT); 1673 } 1674 1675 SDValue 1676 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1677 SelectionDAG &DAG) const { 1678 SDLoc DL(Op); 1679 const auto *GA = cast<GlobalAddressSDNode>(Op); 1680 1681 MachineFunction &MF = DAG.getMachineFunction(); 1682 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory()) 1683 report_fatal_error("cannot use thread-local storage without bulk memory", 1684 false); 1685 1686 const GlobalValue *GV = GA->getGlobal(); 1687 1688 // Currently only Emscripten supports dynamic linking with threads. Therefore, 1689 // on other targets, if we have thread-local storage, only the local-exec 1690 // model is possible. 1691 auto model = Subtarget->getTargetTriple().isOSEmscripten() 1692 ? GV->getThreadLocalMode() 1693 : GlobalValue::LocalExecTLSModel; 1694 1695 // Unsupported TLS modes 1696 assert(model != GlobalValue::NotThreadLocal); 1697 assert(model != GlobalValue::InitialExecTLSModel); 1698 1699 if (model == GlobalValue::LocalExecTLSModel || 1700 model == GlobalValue::LocalDynamicTLSModel || 1701 (model == GlobalValue::GeneralDynamicTLSModel && 1702 getTargetMachine().shouldAssumeDSOLocal(GV))) { 1703 // For DSO-local TLS variables we use offset from __tls_base 1704 1705 MVT PtrVT = getPointerTy(DAG.getDataLayout()); 1706 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64 1707 : WebAssembly::GLOBAL_GET_I32; 1708 const char *BaseName = MF.createExternalSymbolName("__tls_base"); 1709 1710 SDValue BaseAddr( 1711 DAG.getMachineNode(GlobalGet, DL, PtrVT, 1712 DAG.getTargetExternalSymbol(BaseName, PtrVT)), 1713 0); 1714 1715 SDValue TLSOffset = DAG.getTargetGlobalAddress( 1716 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL); 1717 SDValue SymOffset = 1718 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset); 1719 1720 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset); 1721 } 1722 1723 assert(model == GlobalValue::GeneralDynamicTLSModel); 1724 1725 EVT VT = Op.getValueType(); 1726 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1727 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, 1728 GA->getOffset(), 1729 WebAssemblyII::MO_GOT_TLS)); 1730 } 1731 1732 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, 1733 SelectionDAG &DAG) const { 1734 SDLoc DL(Op); 1735 const auto *GA = cast<GlobalAddressSDNode>(Op); 1736 EVT VT = Op.getValueType(); 1737 assert(GA->getTargetFlags() == 0 && 1738 "Unexpected target flags on generic GlobalAddressSDNode"); 1739 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace())) 1740 fail(DL, DAG, "Invalid address space for WebAssembly target"); 1741 1742 unsigned OperandFlags = 0; 1743 const GlobalValue *GV = GA->getGlobal(); 1744 // Since WebAssembly tables cannot yet be shared accross modules, we don't 1745 // need special treatment for tables in PIC mode. 1746 if (isPositionIndependent() && 1747 !WebAssembly::isWebAssemblyTableType(GV->getValueType())) { 1748 if (getTargetMachine().shouldAssumeDSOLocal(GV)) { 1749 MachineFunction &MF = DAG.getMachineFunction(); 1750 MVT PtrVT = getPointerTy(MF.getDataLayout()); 1751 const char *BaseName; 1752 if (GV->getValueType()->isFunctionTy()) { 1753 BaseName = MF.createExternalSymbolName("__table_base"); 1754 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL; 1755 } else { 1756 BaseName = MF.createExternalSymbolName("__memory_base"); 1757 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL; 1758 } 1759 SDValue BaseAddr = 1760 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 1761 DAG.getTargetExternalSymbol(BaseName, PtrVT)); 1762 1763 SDValue SymAddr = DAG.getNode( 1764 WebAssemblyISD::WrapperREL, DL, VT, 1765 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(), 1766 OperandFlags)); 1767 1768 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr); 1769 } 1770 OperandFlags = WebAssemblyII::MO_GOT; 1771 } 1772 1773 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1774 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, 1775 GA->getOffset(), OperandFlags)); 1776 } 1777 1778 SDValue 1779 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op, 1780 SelectionDAG &DAG) const { 1781 SDLoc DL(Op); 1782 const auto *ES = cast<ExternalSymbolSDNode>(Op); 1783 EVT VT = Op.getValueType(); 1784 assert(ES->getTargetFlags() == 0 && 1785 "Unexpected target flags on generic ExternalSymbolSDNode"); 1786 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1787 DAG.getTargetExternalSymbol(ES->getSymbol(), VT)); 1788 } 1789 1790 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, 1791 SelectionDAG &DAG) const { 1792 // There's no need for a Wrapper node because we always incorporate a jump 1793 // table operand into a BR_TABLE instruction, rather than ever 1794 // materializing it in a register. 1795 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1796 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), 1797 JT->getTargetFlags()); 1798 } 1799 1800 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, 1801 SelectionDAG &DAG) const { 1802 SDLoc DL(Op); 1803 SDValue Chain = Op.getOperand(0); 1804 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1)); 1805 SDValue Index = Op.getOperand(2); 1806 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); 1807 1808 SmallVector<SDValue, 8> Ops; 1809 Ops.push_back(Chain); 1810 Ops.push_back(Index); 1811 1812 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); 1813 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; 1814 1815 // Add an operand for each case. 1816 for (auto *MBB : MBBs) 1817 Ops.push_back(DAG.getBasicBlock(MBB)); 1818 1819 // Add the first MBB as a dummy default target for now. This will be replaced 1820 // with the proper default target (and the preceding range check eliminated) 1821 // if possible by WebAssemblyFixBrTableDefaults. 1822 Ops.push_back(DAG.getBasicBlock(*MBBs.begin())); 1823 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops); 1824 } 1825 1826 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, 1827 SelectionDAG &DAG) const { 1828 SDLoc DL(Op); 1829 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); 1830 1831 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>(); 1832 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1833 1834 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL, 1835 MFI->getVarargBufferVreg(), PtrVT); 1836 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1), 1837 MachinePointerInfo(SV)); 1838 } 1839 1840 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, 1841 SelectionDAG &DAG) const { 1842 MachineFunction &MF = DAG.getMachineFunction(); 1843 unsigned IntNo; 1844 switch (Op.getOpcode()) { 1845 case ISD::INTRINSIC_VOID: 1846 case ISD::INTRINSIC_W_CHAIN: 1847 IntNo = Op.getConstantOperandVal(1); 1848 break; 1849 case ISD::INTRINSIC_WO_CHAIN: 1850 IntNo = Op.getConstantOperandVal(0); 1851 break; 1852 default: 1853 llvm_unreachable("Invalid intrinsic"); 1854 } 1855 SDLoc DL(Op); 1856 1857 switch (IntNo) { 1858 default: 1859 return SDValue(); // Don't custom lower most intrinsics. 1860 1861 case Intrinsic::wasm_lsda: { 1862 auto PtrVT = getPointerTy(MF.getDataLayout()); 1863 const char *SymName = MF.createExternalSymbolName( 1864 "GCC_except_table" + std::to_string(MF.getFunctionNumber())); 1865 if (isPositionIndependent()) { 1866 SDValue Node = DAG.getTargetExternalSymbol( 1867 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL); 1868 const char *BaseName = MF.createExternalSymbolName("__memory_base"); 1869 SDValue BaseAddr = 1870 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 1871 DAG.getTargetExternalSymbol(BaseName, PtrVT)); 1872 SDValue SymAddr = 1873 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node); 1874 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr); 1875 } 1876 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT); 1877 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node); 1878 } 1879 1880 case Intrinsic::wasm_shuffle: { 1881 // Drop in-chain and replace undefs, but otherwise pass through unchanged 1882 SDValue Ops[18]; 1883 size_t OpIdx = 0; 1884 Ops[OpIdx++] = Op.getOperand(1); 1885 Ops[OpIdx++] = Op.getOperand(2); 1886 while (OpIdx < 18) { 1887 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1); 1888 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) { 1889 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant; 1890 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget); 1891 } else { 1892 Ops[OpIdx++] = MaskIdx; 1893 } 1894 } 1895 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops); 1896 } 1897 } 1898 } 1899 1900 SDValue 1901 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 1902 SelectionDAG &DAG) const { 1903 SDLoc DL(Op); 1904 // If sign extension operations are disabled, allow sext_inreg only if operand 1905 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign 1906 // extension operations, but allowing sext_inreg in this context lets us have 1907 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg 1908 // everywhere would be simpler in this file, but would necessitate large and 1909 // brittle patterns to undo the expansion and select extract_lane_s 1910 // instructions. 1911 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); 1912 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1913 return SDValue(); 1914 1915 const SDValue &Extract = Op.getOperand(0); 1916 MVT VecT = Extract.getOperand(0).getSimpleValueType(); 1917 if (VecT.getVectorElementType().getSizeInBits() > 32) 1918 return SDValue(); 1919 MVT ExtractedLaneT = 1920 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT(); 1921 MVT ExtractedVecT = 1922 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); 1923 if (ExtractedVecT == VecT) 1924 return Op; 1925 1926 // Bitcast vector to appropriate type to ensure ISel pattern coverage 1927 const SDNode *Index = Extract.getOperand(1).getNode(); 1928 if (!isa<ConstantSDNode>(Index)) 1929 return SDValue(); 1930 unsigned IndexVal = Index->getAsZExtVal(); 1931 unsigned Scale = 1932 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); 1933 assert(Scale > 1); 1934 SDValue NewIndex = 1935 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0)); 1936 SDValue NewExtract = DAG.getNode( 1937 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), 1938 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); 1939 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract, 1940 Op.getOperand(1)); 1941 } 1942 1943 SDValue 1944 WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op, 1945 SelectionDAG &DAG) const { 1946 SDLoc DL(Op); 1947 EVT VT = Op.getValueType(); 1948 SDValue Src = Op.getOperand(0); 1949 EVT SrcVT = Src.getValueType(); 1950 1951 if (SrcVT.getVectorElementType() == MVT::i1 || 1952 SrcVT.getVectorElementType() == MVT::i64) 1953 return SDValue(); 1954 1955 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 && 1956 "Unexpected extension factor."); 1957 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits(); 1958 1959 if (Scale != 2 && Scale != 4 && Scale != 8) 1960 return SDValue(); 1961 1962 unsigned Ext; 1963 switch (Op.getOpcode()) { 1964 case ISD::ZERO_EXTEND_VECTOR_INREG: 1965 Ext = WebAssemblyISD::EXTEND_LOW_U; 1966 break; 1967 case ISD::SIGN_EXTEND_VECTOR_INREG: 1968 Ext = WebAssemblyISD::EXTEND_LOW_S; 1969 break; 1970 } 1971 1972 SDValue Ret = Src; 1973 while (Scale != 1) { 1974 Ret = DAG.getNode(Ext, DL, 1975 Ret.getValueType() 1976 .widenIntegerVectorElementType(*DAG.getContext()) 1977 .getHalfNumVectorElementsVT(*DAG.getContext()), 1978 Ret); 1979 Scale /= 2; 1980 } 1981 assert(Ret.getValueType() == VT); 1982 return Ret; 1983 } 1984 1985 static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) { 1986 SDLoc DL(Op); 1987 if (Op.getValueType() != MVT::v2f64) 1988 return SDValue(); 1989 1990 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec, 1991 unsigned &Index) -> bool { 1992 switch (Op.getOpcode()) { 1993 case ISD::SINT_TO_FP: 1994 Opcode = WebAssemblyISD::CONVERT_LOW_S; 1995 break; 1996 case ISD::UINT_TO_FP: 1997 Opcode = WebAssemblyISD::CONVERT_LOW_U; 1998 break; 1999 case ISD::FP_EXTEND: 2000 Opcode = WebAssemblyISD::PROMOTE_LOW; 2001 break; 2002 default: 2003 return false; 2004 } 2005 2006 auto ExtractVector = Op.getOperand(0); 2007 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2008 return false; 2009 2010 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode())) 2011 return false; 2012 2013 SrcVec = ExtractVector.getOperand(0); 2014 Index = ExtractVector.getConstantOperandVal(1); 2015 return true; 2016 }; 2017 2018 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex; 2019 SDValue LHSSrcVec, RHSSrcVec; 2020 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) || 2021 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex)) 2022 return SDValue(); 2023 2024 if (LHSOpcode != RHSOpcode) 2025 return SDValue(); 2026 2027 MVT ExpectedSrcVT; 2028 switch (LHSOpcode) { 2029 case WebAssemblyISD::CONVERT_LOW_S: 2030 case WebAssemblyISD::CONVERT_LOW_U: 2031 ExpectedSrcVT = MVT::v4i32; 2032 break; 2033 case WebAssemblyISD::PROMOTE_LOW: 2034 ExpectedSrcVT = MVT::v4f32; 2035 break; 2036 } 2037 if (LHSSrcVec.getValueType() != ExpectedSrcVT) 2038 return SDValue(); 2039 2040 auto Src = LHSSrcVec; 2041 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) { 2042 // Shuffle the source vector so that the converted lanes are the low lanes. 2043 Src = DAG.getVectorShuffle( 2044 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec, 2045 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1}); 2046 } 2047 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src); 2048 } 2049 2050 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, 2051 SelectionDAG &DAG) const { 2052 if (auto ConvertLow = LowerConvertLow(Op, DAG)) 2053 return ConvertLow; 2054 2055 SDLoc DL(Op); 2056 const EVT VecT = Op.getValueType(); 2057 const EVT LaneT = Op.getOperand(0).getValueType(); 2058 const size_t Lanes = Op.getNumOperands(); 2059 bool CanSwizzle = VecT == MVT::v16i8; 2060 2061 // BUILD_VECTORs are lowered to the instruction that initializes the highest 2062 // possible number of lanes at once followed by a sequence of replace_lane 2063 // instructions to individually initialize any remaining lanes. 2064 2065 // TODO: Tune this. For example, lanewise swizzling is very expensive, so 2066 // swizzled lanes should be given greater weight. 2067 2068 // TODO: Investigate looping rather than always extracting/replacing specific 2069 // lanes to fill gaps. 2070 2071 auto IsConstant = [](const SDValue &V) { 2072 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP; 2073 }; 2074 2075 // Returns the source vector and index vector pair if they exist. Checks for: 2076 // (extract_vector_elt 2077 // $src, 2078 // (sign_extend_inreg (extract_vector_elt $indices, $i)) 2079 // ) 2080 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) { 2081 auto Bail = std::make_pair(SDValue(), SDValue()); 2082 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2083 return Bail; 2084 const SDValue &SwizzleSrc = Lane->getOperand(0); 2085 const SDValue &IndexExt = Lane->getOperand(1); 2086 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG) 2087 return Bail; 2088 const SDValue &Index = IndexExt->getOperand(0); 2089 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2090 return Bail; 2091 const SDValue &SwizzleIndices = Index->getOperand(0); 2092 if (SwizzleSrc.getValueType() != MVT::v16i8 || 2093 SwizzleIndices.getValueType() != MVT::v16i8 || 2094 Index->getOperand(1)->getOpcode() != ISD::Constant || 2095 Index->getConstantOperandVal(1) != I) 2096 return Bail; 2097 return std::make_pair(SwizzleSrc, SwizzleIndices); 2098 }; 2099 2100 // If the lane is extracted from another vector at a constant index, return 2101 // that vector. The source vector must not have more lanes than the dest 2102 // because the shufflevector indices are in terms of the destination lanes and 2103 // would not be able to address the smaller individual source lanes. 2104 auto GetShuffleSrc = [&](const SDValue &Lane) { 2105 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2106 return SDValue(); 2107 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode())) 2108 return SDValue(); 2109 if (Lane->getOperand(0).getValueType().getVectorNumElements() > 2110 VecT.getVectorNumElements()) 2111 return SDValue(); 2112 return Lane->getOperand(0); 2113 }; 2114 2115 using ValueEntry = std::pair<SDValue, size_t>; 2116 SmallVector<ValueEntry, 16> SplatValueCounts; 2117 2118 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>; 2119 SmallVector<SwizzleEntry, 16> SwizzleCounts; 2120 2121 using ShuffleEntry = std::pair<SDValue, size_t>; 2122 SmallVector<ShuffleEntry, 16> ShuffleCounts; 2123 2124 auto AddCount = [](auto &Counts, const auto &Val) { 2125 auto CountIt = 2126 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; }); 2127 if (CountIt == Counts.end()) { 2128 Counts.emplace_back(Val, 1); 2129 } else { 2130 CountIt->second++; 2131 } 2132 }; 2133 2134 auto GetMostCommon = [](auto &Counts) { 2135 auto CommonIt = 2136 std::max_element(Counts.begin(), Counts.end(), llvm::less_second()); 2137 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector"); 2138 return *CommonIt; 2139 }; 2140 2141 size_t NumConstantLanes = 0; 2142 2143 // Count eligible lanes for each type of vector creation op 2144 for (size_t I = 0; I < Lanes; ++I) { 2145 const SDValue &Lane = Op->getOperand(I); 2146 if (Lane.isUndef()) 2147 continue; 2148 2149 AddCount(SplatValueCounts, Lane); 2150 2151 if (IsConstant(Lane)) 2152 NumConstantLanes++; 2153 if (auto ShuffleSrc = GetShuffleSrc(Lane)) 2154 AddCount(ShuffleCounts, ShuffleSrc); 2155 if (CanSwizzle) { 2156 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane); 2157 if (SwizzleSrcs.first) 2158 AddCount(SwizzleCounts, SwizzleSrcs); 2159 } 2160 } 2161 2162 SDValue SplatValue; 2163 size_t NumSplatLanes; 2164 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts); 2165 2166 SDValue SwizzleSrc; 2167 SDValue SwizzleIndices; 2168 size_t NumSwizzleLanes = 0; 2169 if (SwizzleCounts.size()) 2170 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices), 2171 NumSwizzleLanes) = GetMostCommon(SwizzleCounts); 2172 2173 // Shuffles can draw from up to two vectors, so find the two most common 2174 // sources. 2175 SDValue ShuffleSrc1, ShuffleSrc2; 2176 size_t NumShuffleLanes = 0; 2177 if (ShuffleCounts.size()) { 2178 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts); 2179 llvm::erase_if(ShuffleCounts, 2180 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; }); 2181 } 2182 if (ShuffleCounts.size()) { 2183 size_t AdditionalShuffleLanes; 2184 std::tie(ShuffleSrc2, AdditionalShuffleLanes) = 2185 GetMostCommon(ShuffleCounts); 2186 NumShuffleLanes += AdditionalShuffleLanes; 2187 } 2188 2189 // Predicate returning true if the lane is properly initialized by the 2190 // original instruction 2191 std::function<bool(size_t, const SDValue &)> IsLaneConstructed; 2192 SDValue Result; 2193 // Prefer swizzles over shuffles over vector consts over splats 2194 if (NumSwizzleLanes >= NumShuffleLanes && 2195 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) { 2196 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc, 2197 SwizzleIndices); 2198 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices); 2199 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) { 2200 return Swizzled == GetSwizzleSrcs(I, Lane); 2201 }; 2202 } else if (NumShuffleLanes >= NumConstantLanes && 2203 NumShuffleLanes >= NumSplatLanes) { 2204 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8; 2205 size_t DestLaneCount = VecT.getVectorNumElements(); 2206 size_t Scale1 = 1; 2207 size_t Scale2 = 1; 2208 SDValue Src1 = ShuffleSrc1; 2209 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT); 2210 if (Src1.getValueType() != VecT) { 2211 size_t LaneSize = 2212 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8; 2213 assert(LaneSize > DestLaneSize); 2214 Scale1 = LaneSize / DestLaneSize; 2215 Src1 = DAG.getBitcast(VecT, Src1); 2216 } 2217 if (Src2.getValueType() != VecT) { 2218 size_t LaneSize = 2219 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8; 2220 assert(LaneSize > DestLaneSize); 2221 Scale2 = LaneSize / DestLaneSize; 2222 Src2 = DAG.getBitcast(VecT, Src2); 2223 } 2224 2225 int Mask[16]; 2226 assert(DestLaneCount <= 16); 2227 for (size_t I = 0; I < DestLaneCount; ++I) { 2228 const SDValue &Lane = Op->getOperand(I); 2229 SDValue Src = GetShuffleSrc(Lane); 2230 if (Src == ShuffleSrc1) { 2231 Mask[I] = Lane->getConstantOperandVal(1) * Scale1; 2232 } else if (Src && Src == ShuffleSrc2) { 2233 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2; 2234 } else { 2235 Mask[I] = -1; 2236 } 2237 } 2238 ArrayRef<int> MaskRef(Mask, DestLaneCount); 2239 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef); 2240 IsLaneConstructed = [&](size_t, const SDValue &Lane) { 2241 auto Src = GetShuffleSrc(Lane); 2242 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2); 2243 }; 2244 } else if (NumConstantLanes >= NumSplatLanes) { 2245 SmallVector<SDValue, 16> ConstLanes; 2246 for (const SDValue &Lane : Op->op_values()) { 2247 if (IsConstant(Lane)) { 2248 // Values may need to be fixed so that they will sign extend to be 2249 // within the expected range during ISel. Check whether the value is in 2250 // bounds based on the lane bit width and if it is out of bounds, lop 2251 // off the extra bits and subtract 2^n to reflect giving the high bit 2252 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it 2253 // cannot possibly be out of range. 2254 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode()); 2255 int64_t Val = Const ? Const->getSExtValue() : 0; 2256 uint64_t LaneBits = 128 / Lanes; 2257 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) && 2258 "Unexpected out of bounds negative value"); 2259 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) { 2260 uint64_t Mask = (1ll << LaneBits) - 1; 2261 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask; 2262 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT)); 2263 } else { 2264 ConstLanes.push_back(Lane); 2265 } 2266 } else if (LaneT.isFloatingPoint()) { 2267 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT)); 2268 } else { 2269 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT)); 2270 } 2271 } 2272 Result = DAG.getBuildVector(VecT, DL, ConstLanes); 2273 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) { 2274 return IsConstant(Lane); 2275 }; 2276 } else { 2277 // Use a splat (which might be selected as a load splat) 2278 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue); 2279 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) { 2280 return Lane == SplatValue; 2281 }; 2282 } 2283 2284 assert(Result); 2285 assert(IsLaneConstructed); 2286 2287 // Add replace_lane instructions for any unhandled values 2288 for (size_t I = 0; I < Lanes; ++I) { 2289 const SDValue &Lane = Op->getOperand(I); 2290 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane)) 2291 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, 2292 DAG.getConstant(I, DL, MVT::i32)); 2293 } 2294 2295 return Result; 2296 } 2297 2298 SDValue 2299 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 2300 SelectionDAG &DAG) const { 2301 SDLoc DL(Op); 2302 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask(); 2303 MVT VecType = Op.getOperand(0).getSimpleValueType(); 2304 assert(VecType.is128BitVector() && "Unexpected shuffle vector type"); 2305 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8; 2306 2307 // Space for two vector args and sixteen mask indices 2308 SDValue Ops[18]; 2309 size_t OpIdx = 0; 2310 Ops[OpIdx++] = Op.getOperand(0); 2311 Ops[OpIdx++] = Op.getOperand(1); 2312 2313 // Expand mask indices to byte indices and materialize them as operands 2314 for (int M : Mask) { 2315 for (size_t J = 0; J < LaneBytes; ++J) { 2316 // Lower undefs (represented by -1 in mask) to {0..J}, which use a 2317 // whole lane of vector input, to allow further reduction at VM. E.g. 2318 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle. 2319 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J; 2320 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32); 2321 } 2322 } 2323 2324 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops); 2325 } 2326 2327 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op, 2328 SelectionDAG &DAG) const { 2329 SDLoc DL(Op); 2330 // The legalizer does not know how to expand the unsupported comparison modes 2331 // of i64x2 vectors, so we manually unroll them here. 2332 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64); 2333 SmallVector<SDValue, 2> LHS, RHS; 2334 DAG.ExtractVectorElements(Op->getOperand(0), LHS); 2335 DAG.ExtractVectorElements(Op->getOperand(1), RHS); 2336 const SDValue &CC = Op->getOperand(2); 2337 auto MakeLane = [&](unsigned I) { 2338 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I], 2339 DAG.getConstant(uint64_t(-1), DL, MVT::i64), 2340 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC); 2341 }; 2342 return DAG.getBuildVector(Op->getValueType(0), DL, 2343 {MakeLane(0), MakeLane(1)}); 2344 } 2345 2346 SDValue 2347 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op, 2348 SelectionDAG &DAG) const { 2349 // Allow constant lane indices, expand variable lane indices 2350 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode(); 2351 if (isa<ConstantSDNode>(IdxNode)) { 2352 // Ensure the index type is i32 to match the tablegen patterns 2353 uint64_t Idx = IdxNode->getAsZExtVal(); 2354 SmallVector<SDValue, 3> Ops(Op.getNode()->ops()); 2355 Ops[Op.getNumOperands() - 1] = 2356 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32); 2357 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops); 2358 } 2359 // Perform default expansion 2360 return SDValue(); 2361 } 2362 2363 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) { 2364 EVT LaneT = Op.getSimpleValueType().getVectorElementType(); 2365 // 32-bit and 64-bit unrolled shifts will have proper semantics 2366 if (LaneT.bitsGE(MVT::i32)) 2367 return DAG.UnrollVectorOp(Op.getNode()); 2368 // Otherwise mask the shift value to get proper semantics from 32-bit shift 2369 SDLoc DL(Op); 2370 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements(); 2371 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32); 2372 unsigned ShiftOpcode = Op.getOpcode(); 2373 SmallVector<SDValue, 16> ShiftedElements; 2374 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32); 2375 SmallVector<SDValue, 16> ShiftElements; 2376 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32); 2377 SmallVector<SDValue, 16> UnrolledOps; 2378 for (size_t i = 0; i < NumLanes; ++i) { 2379 SDValue MaskedShiftValue = 2380 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask); 2381 SDValue ShiftedValue = ShiftedElements[i]; 2382 if (ShiftOpcode == ISD::SRA) 2383 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, 2384 ShiftedValue, DAG.getValueType(LaneT)); 2385 UnrolledOps.push_back( 2386 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue)); 2387 } 2388 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps); 2389 } 2390 2391 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op, 2392 SelectionDAG &DAG) const { 2393 SDLoc DL(Op); 2394 2395 // Only manually lower vector shifts 2396 assert(Op.getSimpleValueType().isVector()); 2397 2398 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits(); 2399 auto ShiftVal = Op.getOperand(1); 2400 2401 // Try to skip bitmask operation since it is implied inside shift instruction 2402 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) { 2403 if (MaskOp.getOpcode() != ISD::AND) 2404 return MaskOp; 2405 SDValue LHS = MaskOp.getOperand(0); 2406 SDValue RHS = MaskOp.getOperand(1); 2407 if (MaskOp.getValueType().isVector()) { 2408 APInt MaskVal; 2409 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal)) 2410 std::swap(LHS, RHS); 2411 2412 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) && 2413 MaskVal == MaskBits) 2414 MaskOp = LHS; 2415 } else { 2416 if (!isa<ConstantSDNode>(RHS.getNode())) 2417 std::swap(LHS, RHS); 2418 2419 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode()); 2420 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits) 2421 MaskOp = LHS; 2422 } 2423 2424 return MaskOp; 2425 }; 2426 2427 // Skip vector and operation 2428 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1); 2429 ShiftVal = DAG.getSplatValue(ShiftVal); 2430 if (!ShiftVal) 2431 return unrollVectorShift(Op, DAG); 2432 2433 // Skip scalar and operation 2434 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1); 2435 // Use anyext because none of the high bits can affect the shift 2436 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32); 2437 2438 unsigned Opcode; 2439 switch (Op.getOpcode()) { 2440 case ISD::SHL: 2441 Opcode = WebAssemblyISD::VEC_SHL; 2442 break; 2443 case ISD::SRA: 2444 Opcode = WebAssemblyISD::VEC_SHR_S; 2445 break; 2446 case ISD::SRL: 2447 Opcode = WebAssemblyISD::VEC_SHR_U; 2448 break; 2449 default: 2450 llvm_unreachable("unexpected opcode"); 2451 } 2452 2453 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal); 2454 } 2455 2456 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op, 2457 SelectionDAG &DAG) const { 2458 SDLoc DL(Op); 2459 EVT ResT = Op.getValueType(); 2460 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2461 2462 if ((ResT == MVT::i32 || ResT == MVT::i64) && 2463 (SatVT == MVT::i32 || SatVT == MVT::i64)) 2464 return Op; 2465 2466 if (ResT == MVT::v4i32 && SatVT == MVT::i32) 2467 return Op; 2468 2469 return SDValue(); 2470 } 2471 2472 //===----------------------------------------------------------------------===// 2473 // Custom DAG combine hooks 2474 //===----------------------------------------------------------------------===// 2475 static SDValue 2476 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2477 auto &DAG = DCI.DAG; 2478 auto Shuffle = cast<ShuffleVectorSDNode>(N); 2479 2480 // Hoist vector bitcasts that don't change the number of lanes out of unary 2481 // shuffles, where they are less likely to get in the way of other combines. 2482 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) -> 2483 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask)))) 2484 SDValue Bitcast = N->getOperand(0); 2485 if (Bitcast.getOpcode() != ISD::BITCAST) 2486 return SDValue(); 2487 if (!N->getOperand(1).isUndef()) 2488 return SDValue(); 2489 SDValue CastOp = Bitcast.getOperand(0); 2490 EVT SrcType = CastOp.getValueType(); 2491 EVT DstType = Bitcast.getValueType(); 2492 if (!SrcType.is128BitVector() || 2493 SrcType.getVectorNumElements() != DstType.getVectorNumElements()) 2494 return SDValue(); 2495 SDValue NewShuffle = DAG.getVectorShuffle( 2496 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask()); 2497 return DAG.getBitcast(DstType, NewShuffle); 2498 } 2499 2500 /// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get 2501 /// split up into scalar instructions during legalization, and the vector 2502 /// extending instructions are selected in performVectorExtendCombine below. 2503 static SDValue 2504 performVectorExtendToFPCombine(SDNode *N, 2505 TargetLowering::DAGCombinerInfo &DCI) { 2506 auto &DAG = DCI.DAG; 2507 assert(N->getOpcode() == ISD::UINT_TO_FP || 2508 N->getOpcode() == ISD::SINT_TO_FP); 2509 2510 EVT InVT = N->getOperand(0)->getValueType(0); 2511 EVT ResVT = N->getValueType(0); 2512 MVT ExtVT; 2513 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8)) 2514 ExtVT = MVT::v4i32; 2515 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8)) 2516 ExtVT = MVT::v2i32; 2517 else 2518 return SDValue(); 2519 2520 unsigned Op = 2521 N->getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; 2522 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0)); 2523 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv); 2524 } 2525 2526 static SDValue 2527 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2528 auto &DAG = DCI.DAG; 2529 assert(N->getOpcode() == ISD::SIGN_EXTEND || 2530 N->getOpcode() == ISD::ZERO_EXTEND); 2531 2532 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if 2533 // possible before the extract_subvector can be expanded. 2534 auto Extract = N->getOperand(0); 2535 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR) 2536 return SDValue(); 2537 auto Source = Extract.getOperand(0); 2538 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 2539 if (IndexNode == nullptr) 2540 return SDValue(); 2541 auto Index = IndexNode->getZExtValue(); 2542 2543 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the 2544 // extracted subvector is the low or high half of its source. 2545 EVT ResVT = N->getValueType(0); 2546 if (ResVT == MVT::v8i16) { 2547 if (Extract.getValueType() != MVT::v8i8 || 2548 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8)) 2549 return SDValue(); 2550 } else if (ResVT == MVT::v4i32) { 2551 if (Extract.getValueType() != MVT::v4i16 || 2552 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4)) 2553 return SDValue(); 2554 } else if (ResVT == MVT::v2i64) { 2555 if (Extract.getValueType() != MVT::v2i32 || 2556 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2)) 2557 return SDValue(); 2558 } else { 2559 return SDValue(); 2560 } 2561 2562 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND; 2563 bool IsLow = Index == 0; 2564 2565 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S 2566 : WebAssemblyISD::EXTEND_HIGH_S) 2567 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U 2568 : WebAssemblyISD::EXTEND_HIGH_U); 2569 2570 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2571 } 2572 2573 static SDValue 2574 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2575 auto &DAG = DCI.DAG; 2576 2577 auto GetWasmConversionOp = [](unsigned Op) { 2578 switch (Op) { 2579 case ISD::FP_TO_SINT_SAT: 2580 return WebAssemblyISD::TRUNC_SAT_ZERO_S; 2581 case ISD::FP_TO_UINT_SAT: 2582 return WebAssemblyISD::TRUNC_SAT_ZERO_U; 2583 case ISD::FP_ROUND: 2584 return WebAssemblyISD::DEMOTE_ZERO; 2585 } 2586 llvm_unreachable("unexpected op"); 2587 }; 2588 2589 auto IsZeroSplat = [](SDValue SplatVal) { 2590 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode()); 2591 APInt SplatValue, SplatUndef; 2592 unsigned SplatBitSize; 2593 bool HasAnyUndefs; 2594 // Endianness doesn't matter in this context because we are looking for 2595 // an all-zero value. 2596 return Splat && 2597 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 2598 HasAnyUndefs) && 2599 SplatValue == 0; 2600 }; 2601 2602 if (N->getOpcode() == ISD::CONCAT_VECTORS) { 2603 // Combine this: 2604 // 2605 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0))) 2606 // 2607 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x). 2608 // 2609 // Or this: 2610 // 2611 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0))) 2612 // 2613 // into (f32x4.demote_zero_f64x2 $x). 2614 EVT ResVT; 2615 EVT ExpectedConversionType; 2616 auto Conversion = N->getOperand(0); 2617 auto ConversionOp = Conversion.getOpcode(); 2618 switch (ConversionOp) { 2619 case ISD::FP_TO_SINT_SAT: 2620 case ISD::FP_TO_UINT_SAT: 2621 ResVT = MVT::v4i32; 2622 ExpectedConversionType = MVT::v2i32; 2623 break; 2624 case ISD::FP_ROUND: 2625 ResVT = MVT::v4f32; 2626 ExpectedConversionType = MVT::v2f32; 2627 break; 2628 default: 2629 return SDValue(); 2630 } 2631 2632 if (N->getValueType(0) != ResVT) 2633 return SDValue(); 2634 2635 if (Conversion.getValueType() != ExpectedConversionType) 2636 return SDValue(); 2637 2638 auto Source = Conversion.getOperand(0); 2639 if (Source.getValueType() != MVT::v2f64) 2640 return SDValue(); 2641 2642 if (!IsZeroSplat(N->getOperand(1)) || 2643 N->getOperand(1).getValueType() != ExpectedConversionType) 2644 return SDValue(); 2645 2646 unsigned Op = GetWasmConversionOp(ConversionOp); 2647 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2648 } 2649 2650 // Combine this: 2651 // 2652 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32) 2653 // 2654 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x). 2655 // 2656 // Or this: 2657 // 2658 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0))))) 2659 // 2660 // into (f32x4.demote_zero_f64x2 $x). 2661 EVT ResVT; 2662 auto ConversionOp = N->getOpcode(); 2663 switch (ConversionOp) { 2664 case ISD::FP_TO_SINT_SAT: 2665 case ISD::FP_TO_UINT_SAT: 2666 ResVT = MVT::v4i32; 2667 break; 2668 case ISD::FP_ROUND: 2669 ResVT = MVT::v4f32; 2670 break; 2671 default: 2672 llvm_unreachable("unexpected op"); 2673 } 2674 2675 if (N->getValueType(0) != ResVT) 2676 return SDValue(); 2677 2678 auto Concat = N->getOperand(0); 2679 if (Concat.getValueType() != MVT::v4f64) 2680 return SDValue(); 2681 2682 auto Source = Concat.getOperand(0); 2683 if (Source.getValueType() != MVT::v2f64) 2684 return SDValue(); 2685 2686 if (!IsZeroSplat(Concat.getOperand(1)) || 2687 Concat.getOperand(1).getValueType() != MVT::v2f64) 2688 return SDValue(); 2689 2690 unsigned Op = GetWasmConversionOp(ConversionOp); 2691 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2692 } 2693 2694 // Helper to extract VectorWidth bits from Vec, starting from IdxVal. 2695 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, 2696 const SDLoc &DL, unsigned VectorWidth) { 2697 EVT VT = Vec.getValueType(); 2698 EVT ElVT = VT.getVectorElementType(); 2699 unsigned Factor = VT.getSizeInBits() / VectorWidth; 2700 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 2701 VT.getVectorNumElements() / Factor); 2702 2703 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR 2704 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits(); 2705 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2"); 2706 2707 // This is the index of the first element of the VectorWidth-bit chunk 2708 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits. 2709 IdxVal &= ~(ElemsPerChunk - 1); 2710 2711 // If the input is a buildvector just emit a smaller one. 2712 if (Vec.getOpcode() == ISD::BUILD_VECTOR) 2713 return DAG.getBuildVector(ResultVT, DL, 2714 Vec->ops().slice(IdxVal, ElemsPerChunk)); 2715 2716 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL); 2717 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx); 2718 } 2719 2720 // Helper to recursively truncate vector elements in half with NARROW_U. DstVT 2721 // is the expected destination value type after recursion. In is the initial 2722 // input. Note that the input should have enough leading zero bits to prevent 2723 // NARROW_U from saturating results. 2724 static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, 2725 SelectionDAG &DAG) { 2726 EVT SrcVT = In.getValueType(); 2727 2728 // No truncation required, we might get here due to recursive calls. 2729 if (SrcVT == DstVT) 2730 return In; 2731 2732 unsigned SrcSizeInBits = SrcVT.getSizeInBits(); 2733 unsigned NumElems = SrcVT.getVectorNumElements(); 2734 if (!isPowerOf2_32(NumElems)) 2735 return SDValue(); 2736 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation"); 2737 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation"); 2738 2739 LLVMContext &Ctx = *DAG.getContext(); 2740 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2); 2741 2742 // Narrow to the largest type possible: 2743 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u. 2744 EVT InVT = MVT::i16, OutVT = MVT::i8; 2745 if (SrcVT.getScalarSizeInBits() > 16) { 2746 InVT = MVT::i32; 2747 OutVT = MVT::i16; 2748 } 2749 unsigned SubSizeInBits = SrcSizeInBits / 2; 2750 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits()); 2751 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits()); 2752 2753 // Split lower/upper subvectors. 2754 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits); 2755 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits); 2756 2757 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors. 2758 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) { 2759 Lo = DAG.getBitcast(InVT, Lo); 2760 Hi = DAG.getBitcast(InVT, Hi); 2761 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi); 2762 return DAG.getBitcast(DstVT, Res); 2763 } 2764 2765 // Recursively narrow lower/upper subvectors, concat result and narrow again. 2766 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2); 2767 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG); 2768 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG); 2769 2770 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems); 2771 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi); 2772 return truncateVectorWithNARROW(DstVT, Res, DL, DAG); 2773 } 2774 2775 static SDValue performTruncateCombine(SDNode *N, 2776 TargetLowering::DAGCombinerInfo &DCI) { 2777 auto &DAG = DCI.DAG; 2778 2779 SDValue In = N->getOperand(0); 2780 EVT InVT = In.getValueType(); 2781 if (!InVT.isSimple()) 2782 return SDValue(); 2783 2784 EVT OutVT = N->getValueType(0); 2785 if (!OutVT.isVector()) 2786 return SDValue(); 2787 2788 EVT OutSVT = OutVT.getVectorElementType(); 2789 EVT InSVT = InVT.getVectorElementType(); 2790 // Currently only cover truncate to v16i8 or v8i16. 2791 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) && 2792 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector())) 2793 return SDValue(); 2794 2795 SDLoc DL(N); 2796 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(), 2797 OutVT.getScalarSizeInBits()); 2798 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT)); 2799 return truncateVectorWithNARROW(OutVT, In, DL, DAG); 2800 } 2801 2802 static SDValue performBitcastCombine(SDNode *N, 2803 TargetLowering::DAGCombinerInfo &DCI) { 2804 auto &DAG = DCI.DAG; 2805 SDLoc DL(N); 2806 SDValue Src = N->getOperand(0); 2807 EVT VT = N->getValueType(0); 2808 EVT SrcVT = Src.getValueType(); 2809 2810 // bitcast <N x i1> to iN 2811 // ==> bitmask 2812 if (DCI.isBeforeLegalize() && VT.isScalarInteger() && 2813 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) { 2814 unsigned NumElts = SrcVT.getVectorNumElements(); 2815 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2816 return SDValue(); 2817 EVT Width = MVT::getIntegerVT(128 / NumElts); 2818 return DAG.getZExtOrTrunc( 2819 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, 2820 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32), 2821 DAG.getSExtOrTrunc(N->getOperand(0), DL, 2822 SrcVT.changeVectorElementType(Width))}), 2823 DL, VT); 2824 } 2825 2826 return SDValue(); 2827 } 2828 2829 static SDValue performSETCCCombine(SDNode *N, 2830 TargetLowering::DAGCombinerInfo &DCI) { 2831 auto &DAG = DCI.DAG; 2832 2833 SDValue LHS = N->getOperand(0); 2834 SDValue RHS = N->getOperand(1); 2835 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get(); 2836 SDLoc DL(N); 2837 EVT VT = N->getValueType(0); 2838 2839 // setcc (iN (bitcast (vNi1 X))), 0, ne 2840 // ==> any_true (vNi1 X) 2841 // setcc (iN (bitcast (vNi1 X))), 0, eq 2842 // ==> xor (any_true (vNi1 X)), -1 2843 // setcc (iN (bitcast (vNi1 X))), -1, eq 2844 // ==> all_true (vNi1 X) 2845 // setcc (iN (bitcast (vNi1 X))), -1, ne 2846 // ==> xor (all_true (vNi1 X)), -1 2847 if (DCI.isBeforeLegalize() && VT.isScalarInteger() && 2848 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 2849 (isNullConstant(RHS) || isAllOnesConstant(RHS)) && 2850 LHS->getOpcode() == ISD::BITCAST) { 2851 EVT FromVT = LHS->getOperand(0).getValueType(); 2852 if (FromVT.isFixedLengthVector() && 2853 FromVT.getVectorElementType() == MVT::i1) { 2854 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue 2855 : Intrinsic::wasm_alltrue; 2856 unsigned NumElts = FromVT.getVectorNumElements(); 2857 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2858 return SDValue(); 2859 EVT Width = MVT::getIntegerVT(128 / NumElts); 2860 SDValue Ret = DAG.getZExtOrTrunc( 2861 DAG.getNode( 2862 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, 2863 {DAG.getConstant(Intrin, DL, MVT::i32), 2864 DAG.getSExtOrTrunc(LHS->getOperand(0), DL, 2865 FromVT.changeVectorElementType(Width))}), 2866 DL, MVT::i1); 2867 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) || 2868 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) { 2869 Ret = DAG.getNOT(DL, Ret, MVT::i1); 2870 } 2871 return DAG.getZExtOrTrunc(Ret, DL, VT); 2872 } 2873 } 2874 2875 return SDValue(); 2876 } 2877 2878 SDValue 2879 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, 2880 DAGCombinerInfo &DCI) const { 2881 switch (N->getOpcode()) { 2882 default: 2883 return SDValue(); 2884 case ISD::BITCAST: 2885 return performBitcastCombine(N, DCI); 2886 case ISD::SETCC: 2887 return performSETCCCombine(N, DCI); 2888 case ISD::VECTOR_SHUFFLE: 2889 return performVECTOR_SHUFFLECombine(N, DCI); 2890 case ISD::SIGN_EXTEND: 2891 case ISD::ZERO_EXTEND: 2892 return performVectorExtendCombine(N, DCI); 2893 case ISD::UINT_TO_FP: 2894 case ISD::SINT_TO_FP: 2895 return performVectorExtendToFPCombine(N, DCI); 2896 case ISD::FP_TO_SINT_SAT: 2897 case ISD::FP_TO_UINT_SAT: 2898 case ISD::FP_ROUND: 2899 case ISD::CONCAT_VECTORS: 2900 return performVectorTruncZeroCombine(N, DCI); 2901 case ISD::TRUNCATE: 2902 return performTruncateCombine(N, DCI); 2903 } 2904 } 2905