1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the WebAssemblyTargetLowering class. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "WebAssemblyISelLowering.h" 15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 16 #include "Utils/WebAssemblyTypeUtilities.h" 17 #include "WebAssemblyMachineFunctionInfo.h" 18 #include "WebAssemblySubtarget.h" 19 #include "WebAssemblyTargetMachine.h" 20 #include "WebAssemblyUtilities.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/SelectionDAGNodes.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/DiagnosticPrinter.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/IntrinsicsWebAssembly.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Target/TargetOptions.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "wasm-lower" 41 42 WebAssemblyTargetLowering::WebAssemblyTargetLowering( 43 const TargetMachine &TM, const WebAssemblySubtarget &STI) 44 : TargetLowering(TM), Subtarget(&STI) { 45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32; 46 47 // Booleans always contain 0 or 1. 48 setBooleanContents(ZeroOrOneBooleanContent); 49 // Except in SIMD vectors 50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 51 // We don't know the microarchitecture here, so just reduce register pressure. 52 setSchedulingPreference(Sched::RegPressure); 53 // Tell ISel that we have a stack pointer. 54 setStackPointerRegisterToSaveRestore( 55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); 56 // Set up the register classes. 57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); 58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); 59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); 60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); 61 if (Subtarget->hasSIMD128()) { 62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass); 63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); 64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); 65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); 66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); 67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); 68 } 69 if (Subtarget->hasFP16()) { 70 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass); 71 } 72 if (Subtarget->hasReferenceTypes()) { 73 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass); 74 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass); 75 if (Subtarget->hasExceptionHandling()) { 76 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass); 77 } 78 } 79 // Compute derived properties from the register classes. 80 computeRegisterProperties(Subtarget->getRegisterInfo()); 81 82 // Transform loads and stores to pointers in address space 1 to loads and 83 // stores to WebAssembly global variables, outside linear memory. 84 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) { 85 setOperationAction(ISD::LOAD, T, Custom); 86 setOperationAction(ISD::STORE, T, Custom); 87 } 88 if (Subtarget->hasSIMD128()) { 89 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 90 MVT::v2f64}) { 91 setOperationAction(ISD::LOAD, T, Custom); 92 setOperationAction(ISD::STORE, T, Custom); 93 } 94 } 95 if (Subtarget->hasFP16()) { 96 setOperationAction(ISD::LOAD, MVT::v8f16, Custom); 97 setOperationAction(ISD::STORE, MVT::v8f16, Custom); 98 } 99 if (Subtarget->hasReferenceTypes()) { 100 // We need custom load and store lowering for both externref, funcref and 101 // Other. The MVT::Other here represents tables of reference types. 102 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) { 103 setOperationAction(ISD::LOAD, T, Custom); 104 setOperationAction(ISD::STORE, T, Custom); 105 } 106 } 107 108 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); 109 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom); 110 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); 111 setOperationAction(ISD::JumpTable, MVTPtr, Custom); 112 setOperationAction(ISD::BlockAddress, MVTPtr, Custom); 113 setOperationAction(ISD::BRIND, MVT::Other, Custom); 114 setOperationAction(ISD::CLEAR_CACHE, MVT::Other, Custom); 115 116 // Take the default expansion for va_arg, va_copy, and va_end. There is no 117 // default action for va_start, so we do that custom. 118 setOperationAction(ISD::VASTART, MVT::Other, Custom); 119 setOperationAction(ISD::VAARG, MVT::Other, Expand); 120 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 121 setOperationAction(ISD::VAEND, MVT::Other, Expand); 122 123 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { 124 // Don't expand the floating-point types to constant pools. 125 setOperationAction(ISD::ConstantFP, T, Legal); 126 // Expand floating-point comparisons. 127 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE, 128 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) 129 setCondCodeAction(CC, T, Expand); 130 // Expand floating-point library function operators. 131 for (auto Op : 132 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA}) 133 setOperationAction(Op, T, Expand); 134 // Note supported floating-point library function operators that otherwise 135 // default to expand. 136 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, 137 ISD::FRINT, ISD::FROUNDEVEN}) 138 setOperationAction(Op, T, Legal); 139 // Support minimum and maximum, which otherwise default to expand. 140 setOperationAction(ISD::FMINIMUM, T, Legal); 141 setOperationAction(ISD::FMAXIMUM, T, Legal); 142 // WebAssembly currently has no builtin f16 support. 143 setOperationAction(ISD::FP16_TO_FP, T, Expand); 144 setOperationAction(ISD::FP_TO_FP16, T, Expand); 145 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand); 146 setTruncStoreAction(T, MVT::f16, Expand); 147 } 148 149 if (Subtarget->hasFP16()) { 150 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); 151 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); 152 } 153 154 // Expand unavailable integer operations. 155 for (auto Op : 156 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, 157 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS, 158 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { 159 for (auto T : {MVT::i32, MVT::i64}) 160 setOperationAction(Op, T, Expand); 161 if (Subtarget->hasSIMD128()) 162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 163 setOperationAction(Op, T, Expand); 164 } 165 166 if (Subtarget->hasWideArithmetic()) { 167 setOperationAction(ISD::ADD, MVT::i128, Custom); 168 setOperationAction(ISD::SUB, MVT::i128, Custom); 169 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Custom); 170 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 171 } 172 173 if (Subtarget->hasNontrappingFPToInt()) 174 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) 175 for (auto T : {MVT::i32, MVT::i64}) 176 setOperationAction(Op, T, Custom); 177 178 // SIMD-specific configuration 179 if (Subtarget->hasSIMD128()) { 180 // Combine vector mask reductions into alltrue/anytrue 181 setTargetDAGCombine(ISD::SETCC); 182 183 // Convert vector to integer bitcasts to bitmask 184 setTargetDAGCombine(ISD::BITCAST); 185 186 // Hoist bitcasts out of shuffles 187 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 188 189 // Combine extends of extract_subvectors into widening ops 190 setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}); 191 192 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into 193 // conversions ops 194 setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND, 195 ISD::EXTRACT_SUBVECTOR}); 196 197 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa 198 // into conversion ops 199 setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 200 ISD::FP_ROUND, ISD::CONCAT_VECTORS}); 201 202 setTargetDAGCombine(ISD::TRUNCATE); 203 204 // Support saturating add/sub for i8x16 and i16x8 205 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) 206 for (auto T : {MVT::v16i8, MVT::v8i16}) 207 setOperationAction(Op, T, Legal); 208 209 // Support integer abs 210 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 211 setOperationAction(ISD::ABS, T, Legal); 212 213 // Custom lower BUILD_VECTORs to minimize number of replace_lanes 214 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 215 MVT::v2f64}) 216 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 217 218 if (Subtarget->hasFP16()) 219 setOperationAction(ISD::BUILD_VECTOR, MVT::f16, Custom); 220 221 // We have custom shuffle lowering to expose the shuffle mask 222 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 223 MVT::v2f64}) 224 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); 225 226 // Support splatting 227 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 228 MVT::v2f64}) 229 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 230 231 // Custom lowering since wasm shifts must have a scalar shift amount 232 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) 233 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 234 setOperationAction(Op, T, Custom); 235 236 // Custom lower lane accesses to expand out variable indices 237 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) 238 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 239 MVT::v2f64}) 240 setOperationAction(Op, T, Custom); 241 242 // There is no i8x16.mul instruction 243 setOperationAction(ISD::MUL, MVT::v16i8, Expand); 244 245 // There is no vector conditional select instruction 246 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, 247 MVT::v2f64}) 248 setOperationAction(ISD::SELECT_CC, T, Expand); 249 250 // Expand integer operations supported for scalars but not SIMD 251 for (auto Op : 252 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) 253 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) 254 setOperationAction(Op, T, Expand); 255 256 // But we do have integer min and max operations 257 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 258 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) 259 setOperationAction(Op, T, Legal); 260 261 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz. 262 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal); 263 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand); 264 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand); 265 266 // Custom lower bit counting operations for other types to scalarize them. 267 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}) 268 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64}) 269 setOperationAction(Op, T, Custom); 270 271 // Expand float operations supported for scalars but not SIMD 272 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, 273 ISD::FEXP, ISD::FEXP2}) 274 for (auto T : {MVT::v4f32, MVT::v2f64}) 275 setOperationAction(Op, T, Expand); 276 277 // Unsigned comparison operations are unavailable for i64x2 vectors. 278 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE}) 279 setCondCodeAction(CC, MVT::v2i64, Custom); 280 281 // 64x2 conversions are not in the spec 282 for (auto Op : 283 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}) 284 for (auto T : {MVT::v2i64, MVT::v2f64}) 285 setOperationAction(Op, T, Expand); 286 287 // But saturating fp_to_int converstions are 288 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) { 289 setOperationAction(Op, MVT::v4i32, Custom); 290 if (Subtarget->hasFP16()) { 291 setOperationAction(Op, MVT::v8i16, Custom); 292 } 293 } 294 295 // Support vector extending 296 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) { 297 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Custom); 298 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Custom); 299 } 300 } 301 302 // As a special case, these operators use the type to mean the type to 303 // sign-extend from. 304 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 305 if (!Subtarget->hasSignExt()) { 306 // Sign extends are legal only when extending a vector extract 307 auto Action = Subtarget->hasSIMD128() ? Custom : Expand; 308 for (auto T : {MVT::i8, MVT::i16, MVT::i32}) 309 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action); 310 } 311 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) 312 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); 313 314 // Dynamic stack allocation: use the default expansion. 315 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 316 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 317 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); 318 319 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 320 setOperationAction(ISD::FrameIndex, MVT::i64, Custom); 321 setOperationAction(ISD::CopyToReg, MVT::Other, Custom); 322 323 // Expand these forms; we pattern-match the forms that we can handle in isel. 324 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) 325 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) 326 setOperationAction(Op, T, Expand); 327 328 // We have custom switch handling. 329 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 330 331 // WebAssembly doesn't have: 332 // - Floating-point extending loads. 333 // - Floating-point truncating stores. 334 // - i1 extending loads. 335 // - truncating SIMD stores and most extending loads 336 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 337 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 338 for (auto T : MVT::integer_valuetypes()) 339 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 340 setLoadExtAction(Ext, T, MVT::i1, Promote); 341 if (Subtarget->hasSIMD128()) { 342 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, 343 MVT::v2f64}) { 344 for (auto MemT : MVT::fixedlen_vector_valuetypes()) { 345 if (MVT(T) != MemT) { 346 setTruncStoreAction(T, MemT, Expand); 347 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 348 setLoadExtAction(Ext, T, MemT, Expand); 349 } 350 } 351 } 352 // But some vector extending loads are legal 353 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { 354 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal); 355 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal); 356 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal); 357 } 358 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal); 359 } 360 361 // Don't do anything clever with build_pairs 362 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 363 364 // Trap lowers to wasm unreachable 365 setOperationAction(ISD::TRAP, MVT::Other, Legal); 366 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 367 368 // Exception handling intrinsics 369 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 370 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 371 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 372 373 setMaxAtomicSizeInBitsSupported(64); 374 375 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is 376 // consistent with the f64 and f128 names. 377 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 378 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 379 380 // Define the emscripten name for return address helper. 381 // TODO: when implementing other Wasm backends, make this generic or only do 382 // this on emscripten depending on what they end up doing. 383 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address"); 384 385 // Always convert switches to br_tables unless there is only one case, which 386 // is equivalent to a simple branch. This reduces code size for wasm, and we 387 // defer possible jump table optimizations to the VM. 388 setMinimumJumpTableEntries(2); 389 } 390 391 MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL, 392 uint32_t AS) const { 393 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF) 394 return MVT::externref; 395 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF) 396 return MVT::funcref; 397 return TargetLowering::getPointerTy(DL, AS); 398 } 399 400 MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL, 401 uint32_t AS) const { 402 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF) 403 return MVT::externref; 404 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF) 405 return MVT::funcref; 406 return TargetLowering::getPointerMemTy(DL, AS); 407 } 408 409 TargetLowering::AtomicExpansionKind 410 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 411 // We have wasm instructions for these 412 switch (AI->getOperation()) { 413 case AtomicRMWInst::Add: 414 case AtomicRMWInst::Sub: 415 case AtomicRMWInst::And: 416 case AtomicRMWInst::Or: 417 case AtomicRMWInst::Xor: 418 case AtomicRMWInst::Xchg: 419 return AtomicExpansionKind::None; 420 default: 421 break; 422 } 423 return AtomicExpansionKind::CmpXChg; 424 } 425 426 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const { 427 // Implementation copied from X86TargetLowering. 428 unsigned Opc = VecOp.getOpcode(); 429 430 // Assume target opcodes can't be scalarized. 431 // TODO - do we have any exceptions? 432 if (Opc >= ISD::BUILTIN_OP_END || !isBinOp(Opc)) 433 return false; 434 435 // If the vector op is not supported, try to convert to scalar. 436 EVT VecVT = VecOp.getValueType(); 437 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT)) 438 return true; 439 440 // If the vector op is supported, but the scalar op is not, the transform may 441 // not be worthwhile. 442 EVT ScalarVT = VecVT.getScalarType(); 443 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT); 444 } 445 446 FastISel *WebAssemblyTargetLowering::createFastISel( 447 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const { 448 return WebAssembly::createFastISel(FuncInfo, LibInfo); 449 } 450 451 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, 452 EVT VT) const { 453 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); 454 if (BitWidth > 1 && BitWidth < 8) 455 BitWidth = 8; 456 457 if (BitWidth > 64) { 458 // The shift will be lowered to a libcall, and compiler-rt libcalls expect 459 // the count to be an i32. 460 BitWidth = 32; 461 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && 462 "32-bit shift counts ought to be enough for anyone"); 463 } 464 465 MVT Result = MVT::getIntegerVT(BitWidth); 466 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE && 467 "Unable to represent scalar shift amount type"); 468 return Result; 469 } 470 471 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an 472 // undefined result on invalid/overflow, to the WebAssembly opcode, which 473 // traps on invalid/overflow. 474 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL, 475 MachineBasicBlock *BB, 476 const TargetInstrInfo &TII, 477 bool IsUnsigned, bool Int64, 478 bool Float64, unsigned LoweredOpcode) { 479 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 480 481 Register OutReg = MI.getOperand(0).getReg(); 482 Register InReg = MI.getOperand(1).getReg(); 483 484 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32; 485 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32; 486 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32; 487 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32; 488 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; 489 unsigned Eqz = WebAssembly::EQZ_I32; 490 unsigned And = WebAssembly::AND_I32; 491 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN; 492 int64_t Substitute = IsUnsigned ? 0 : Limit; 493 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit; 494 auto &Context = BB->getParent()->getFunction().getContext(); 495 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context); 496 497 const BasicBlock *LLVMBB = BB->getBasicBlock(); 498 MachineFunction *F = BB->getParent(); 499 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); 500 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB); 501 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); 502 503 MachineFunction::iterator It = ++BB->getIterator(); 504 F->insert(It, FalseMBB); 505 F->insert(It, TrueMBB); 506 F->insert(It, DoneMBB); 507 508 // Transfer the remainder of BB and its successor edges to DoneMBB. 509 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); 510 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 511 512 BB->addSuccessor(TrueMBB); 513 BB->addSuccessor(FalseMBB); 514 TrueMBB->addSuccessor(DoneMBB); 515 FalseMBB->addSuccessor(DoneMBB); 516 517 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg; 518 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 519 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 520 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 521 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 522 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 523 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 524 525 MI.eraseFromParent(); 526 // For signed numbers, we can do a single comparison to determine whether 527 // fabs(x) is within range. 528 if (IsUnsigned) { 529 Tmp0 = InReg; 530 } else { 531 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg); 532 } 533 BuildMI(BB, DL, TII.get(FConst), Tmp1) 534 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal))); 535 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1); 536 537 // For unsigned numbers, we have to do a separate comparison with zero. 538 if (IsUnsigned) { 539 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 540 Register SecondCmpReg = 541 MRI.createVirtualRegister(&WebAssembly::I32RegClass); 542 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 543 BuildMI(BB, DL, TII.get(FConst), Tmp1) 544 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0))); 545 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1); 546 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg); 547 CmpReg = AndReg; 548 } 549 550 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg); 551 552 // Create the CFG diamond to select between doing the conversion or using 553 // the substitute value. 554 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg); 555 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg); 556 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB); 557 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute); 558 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg) 559 .addReg(FalseReg) 560 .addMBB(FalseMBB) 561 .addReg(TrueReg) 562 .addMBB(TrueMBB); 563 564 return DoneMBB; 565 } 566 567 // Lower a `MEMCPY` instruction into a CFG triangle around a `MEMORY_COPY` 568 // instuction to handle the zero-length case. 569 static MachineBasicBlock *LowerMemcpy(MachineInstr &MI, DebugLoc DL, 570 MachineBasicBlock *BB, 571 const TargetInstrInfo &TII, bool Int64) { 572 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 573 574 MachineOperand DstMem = MI.getOperand(0); 575 MachineOperand SrcMem = MI.getOperand(1); 576 MachineOperand Dst = MI.getOperand(2); 577 MachineOperand Src = MI.getOperand(3); 578 MachineOperand Len = MI.getOperand(4); 579 580 // We're going to add an extra use to `Len` to test if it's zero; that 581 // use shouldn't be a kill, even if the original use is. 582 MachineOperand NoKillLen = Len; 583 NoKillLen.setIsKill(false); 584 585 // Decide on which `MachineInstr` opcode we're going to use. 586 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32; 587 unsigned MemoryCopy = 588 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32; 589 590 // Create two new basic blocks; one for the new `memory.fill` that we can 591 // branch over, and one for the rest of the instructions after the original 592 // `memory.fill`. 593 const BasicBlock *LLVMBB = BB->getBasicBlock(); 594 MachineFunction *F = BB->getParent(); 595 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); 596 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); 597 598 MachineFunction::iterator It = ++BB->getIterator(); 599 F->insert(It, TrueMBB); 600 F->insert(It, DoneMBB); 601 602 // Transfer the remainder of BB and its successor edges to DoneMBB. 603 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); 604 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 605 606 // Connect the CFG edges. 607 BB->addSuccessor(TrueMBB); 608 BB->addSuccessor(DoneMBB); 609 TrueMBB->addSuccessor(DoneMBB); 610 611 // Create a virtual register for the `Eqz` result. 612 unsigned EqzReg; 613 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 614 615 // Erase the original `memory.copy`. 616 MI.eraseFromParent(); 617 618 // Test if `Len` is zero. 619 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen); 620 621 // Insert a new `memory.copy`. 622 BuildMI(TrueMBB, DL, TII.get(MemoryCopy)) 623 .add(DstMem) 624 .add(SrcMem) 625 .add(Dst) 626 .add(Src) 627 .add(Len); 628 629 // Create the CFG triangle. 630 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg); 631 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB); 632 633 return DoneMBB; 634 } 635 636 // Lower a `MEMSET` instruction into a CFG triangle around a `MEMORY_FILL` 637 // instuction to handle the zero-length case. 638 static MachineBasicBlock *LowerMemset(MachineInstr &MI, DebugLoc DL, 639 MachineBasicBlock *BB, 640 const TargetInstrInfo &TII, bool Int64) { 641 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 642 643 MachineOperand Mem = MI.getOperand(0); 644 MachineOperand Dst = MI.getOperand(1); 645 MachineOperand Val = MI.getOperand(2); 646 MachineOperand Len = MI.getOperand(3); 647 648 // We're going to add an extra use to `Len` to test if it's zero; that 649 // use shouldn't be a kill, even if the original use is. 650 MachineOperand NoKillLen = Len; 651 NoKillLen.setIsKill(false); 652 653 // Decide on which `MachineInstr` opcode we're going to use. 654 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32; 655 unsigned MemoryFill = 656 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32; 657 658 // Create two new basic blocks; one for the new `memory.fill` that we can 659 // branch over, and one for the rest of the instructions after the original 660 // `memory.fill`. 661 const BasicBlock *LLVMBB = BB->getBasicBlock(); 662 MachineFunction *F = BB->getParent(); 663 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); 664 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); 665 666 MachineFunction::iterator It = ++BB->getIterator(); 667 F->insert(It, TrueMBB); 668 F->insert(It, DoneMBB); 669 670 // Transfer the remainder of BB and its successor edges to DoneMBB. 671 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); 672 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 673 674 // Connect the CFG edges. 675 BB->addSuccessor(TrueMBB); 676 BB->addSuccessor(DoneMBB); 677 TrueMBB->addSuccessor(DoneMBB); 678 679 // Create a virtual register for the `Eqz` result. 680 unsigned EqzReg; 681 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 682 683 // Erase the original `memory.fill`. 684 MI.eraseFromParent(); 685 686 // Test if `Len` is zero. 687 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen); 688 689 // Insert a new `memory.copy`. 690 BuildMI(TrueMBB, DL, TII.get(MemoryFill)).add(Mem).add(Dst).add(Val).add(Len); 691 692 // Create the CFG triangle. 693 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg); 694 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB); 695 696 return DoneMBB; 697 } 698 699 static MachineBasicBlock * 700 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, 701 const WebAssemblySubtarget *Subtarget, 702 const TargetInstrInfo &TII) { 703 MachineInstr &CallParams = *CallResults.getPrevNode(); 704 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS); 705 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS || 706 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS); 707 708 bool IsIndirect = 709 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI(); 710 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS; 711 712 bool IsFuncrefCall = false; 713 if (IsIndirect && CallParams.getOperand(0).isReg()) { 714 Register Reg = CallParams.getOperand(0).getReg(); 715 const MachineFunction *MF = BB->getParent(); 716 const MachineRegisterInfo &MRI = MF->getRegInfo(); 717 const TargetRegisterClass *TRC = MRI.getRegClass(Reg); 718 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass); 719 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes()); 720 } 721 722 unsigned CallOp; 723 if (IsIndirect && IsRetCall) { 724 CallOp = WebAssembly::RET_CALL_INDIRECT; 725 } else if (IsIndirect) { 726 CallOp = WebAssembly::CALL_INDIRECT; 727 } else if (IsRetCall) { 728 CallOp = WebAssembly::RET_CALL; 729 } else { 730 CallOp = WebAssembly::CALL; 731 } 732 733 MachineFunction &MF = *BB->getParent(); 734 const MCInstrDesc &MCID = TII.get(CallOp); 735 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL)); 736 737 // Move the function pointer to the end of the arguments for indirect calls 738 if (IsIndirect) { 739 auto FnPtr = CallParams.getOperand(0); 740 CallParams.removeOperand(0); 741 742 // For funcrefs, call_indirect is done through __funcref_call_table and the 743 // funcref is always installed in slot 0 of the table, therefore instead of 744 // having the function pointer added at the end of the params list, a zero 745 // (the index in 746 // __funcref_call_table is added). 747 if (IsFuncrefCall) { 748 Register RegZero = 749 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass); 750 MachineInstrBuilder MIBC0 = 751 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0); 752 753 BB->insert(CallResults.getIterator(), MIBC0); 754 MachineInstrBuilder(MF, CallParams).addReg(RegZero); 755 } else 756 CallParams.addOperand(FnPtr); 757 } 758 759 for (auto Def : CallResults.defs()) 760 MIB.add(Def); 761 762 if (IsIndirect) { 763 // Placeholder for the type index. 764 MIB.addImm(0); 765 // The table into which this call_indirect indexes. 766 MCSymbolWasm *Table = IsFuncrefCall 767 ? WebAssembly::getOrCreateFuncrefCallTableSymbol( 768 MF.getContext(), Subtarget) 769 : WebAssembly::getOrCreateFunctionTableSymbol( 770 MF.getContext(), Subtarget); 771 if (Subtarget->hasCallIndirectOverlong()) { 772 MIB.addSym(Table); 773 } else { 774 // For the MVP there is at most one table whose number is 0, but we can't 775 // write a table symbol or issue relocations. Instead we just ensure the 776 // table is live and write a zero. 777 Table->setNoStrip(); 778 MIB.addImm(0); 779 } 780 } 781 782 for (auto Use : CallParams.uses()) 783 MIB.add(Use); 784 785 BB->insert(CallResults.getIterator(), MIB); 786 CallParams.eraseFromParent(); 787 CallResults.eraseFromParent(); 788 789 // If this is a funcref call, to avoid hidden GC roots, we need to clear the 790 // table slot with ref.null upon call_indirect return. 791 // 792 // This generates the following code, which comes right after a call_indirect 793 // of a funcref: 794 // 795 // i32.const 0 796 // ref.null func 797 // table.set __funcref_call_table 798 if (IsIndirect && IsFuncrefCall) { 799 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol( 800 MF.getContext(), Subtarget); 801 Register RegZero = 802 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass); 803 MachineInstr *Const0 = 804 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0); 805 BB->insertAfter(MIB.getInstr()->getIterator(), Const0); 806 807 Register RegFuncref = 808 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass); 809 MachineInstr *RefNull = 810 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref); 811 BB->insertAfter(Const0->getIterator(), RefNull); 812 813 MachineInstr *TableSet = 814 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF)) 815 .addSym(Table) 816 .addReg(RegZero) 817 .addReg(RegFuncref); 818 BB->insertAfter(RefNull->getIterator(), TableSet); 819 } 820 821 return BB; 822 } 823 824 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter( 825 MachineInstr &MI, MachineBasicBlock *BB) const { 826 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 827 DebugLoc DL = MI.getDebugLoc(); 828 829 switch (MI.getOpcode()) { 830 default: 831 llvm_unreachable("Unexpected instr type to insert"); 832 case WebAssembly::FP_TO_SINT_I32_F32: 833 return LowerFPToInt(MI, DL, BB, TII, false, false, false, 834 WebAssembly::I32_TRUNC_S_F32); 835 case WebAssembly::FP_TO_UINT_I32_F32: 836 return LowerFPToInt(MI, DL, BB, TII, true, false, false, 837 WebAssembly::I32_TRUNC_U_F32); 838 case WebAssembly::FP_TO_SINT_I64_F32: 839 return LowerFPToInt(MI, DL, BB, TII, false, true, false, 840 WebAssembly::I64_TRUNC_S_F32); 841 case WebAssembly::FP_TO_UINT_I64_F32: 842 return LowerFPToInt(MI, DL, BB, TII, true, true, false, 843 WebAssembly::I64_TRUNC_U_F32); 844 case WebAssembly::FP_TO_SINT_I32_F64: 845 return LowerFPToInt(MI, DL, BB, TII, false, false, true, 846 WebAssembly::I32_TRUNC_S_F64); 847 case WebAssembly::FP_TO_UINT_I32_F64: 848 return LowerFPToInt(MI, DL, BB, TII, true, false, true, 849 WebAssembly::I32_TRUNC_U_F64); 850 case WebAssembly::FP_TO_SINT_I64_F64: 851 return LowerFPToInt(MI, DL, BB, TII, false, true, true, 852 WebAssembly::I64_TRUNC_S_F64); 853 case WebAssembly::FP_TO_UINT_I64_F64: 854 return LowerFPToInt(MI, DL, BB, TII, true, true, true, 855 WebAssembly::I64_TRUNC_U_F64); 856 case WebAssembly::MEMCPY_A32: 857 return LowerMemcpy(MI, DL, BB, TII, false); 858 case WebAssembly::MEMCPY_A64: 859 return LowerMemcpy(MI, DL, BB, TII, true); 860 case WebAssembly::MEMSET_A32: 861 return LowerMemset(MI, DL, BB, TII, false); 862 case WebAssembly::MEMSET_A64: 863 return LowerMemset(MI, DL, BB, TII, true); 864 case WebAssembly::CALL_RESULTS: 865 case WebAssembly::RET_CALL_RESULTS: 866 return LowerCallResults(MI, DL, BB, Subtarget, TII); 867 } 868 } 869 870 const char * 871 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const { 872 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) { 873 case WebAssemblyISD::FIRST_NUMBER: 874 break; 875 #define HANDLE_NODETYPE(NODE) \ 876 case WebAssemblyISD::NODE: \ 877 return "WebAssemblyISD::" #NODE; 878 #include "WebAssemblyISD.def" 879 #undef HANDLE_NODETYPE 880 } 881 return nullptr; 882 } 883 884 std::pair<unsigned, const TargetRegisterClass *> 885 WebAssemblyTargetLowering::getRegForInlineAsmConstraint( 886 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 887 // First, see if this is a constraint that directly corresponds to a 888 // WebAssembly register class. 889 if (Constraint.size() == 1) { 890 switch (Constraint[0]) { 891 case 'r': 892 assert(VT != MVT::iPTR && "Pointer MVT not expected here"); 893 if (Subtarget->hasSIMD128() && VT.isVector()) { 894 if (VT.getSizeInBits() == 128) 895 return std::make_pair(0U, &WebAssembly::V128RegClass); 896 } 897 if (VT.isInteger() && !VT.isVector()) { 898 if (VT.getSizeInBits() <= 32) 899 return std::make_pair(0U, &WebAssembly::I32RegClass); 900 if (VT.getSizeInBits() <= 64) 901 return std::make_pair(0U, &WebAssembly::I64RegClass); 902 } 903 if (VT.isFloatingPoint() && !VT.isVector()) { 904 switch (VT.getSizeInBits()) { 905 case 32: 906 return std::make_pair(0U, &WebAssembly::F32RegClass); 907 case 64: 908 return std::make_pair(0U, &WebAssembly::F64RegClass); 909 default: 910 break; 911 } 912 } 913 break; 914 default: 915 break; 916 } 917 } 918 919 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 920 } 921 922 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { 923 // Assume ctz is a relatively cheap operation. 924 return true; 925 } 926 927 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { 928 // Assume clz is a relatively cheap operation. 929 return true; 930 } 931 932 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, 933 const AddrMode &AM, 934 Type *Ty, unsigned AS, 935 Instruction *I) const { 936 // WebAssembly offsets are added as unsigned without wrapping. The 937 // isLegalAddressingMode gives us no way to determine if wrapping could be 938 // happening, so we approximate this by accepting only non-negative offsets. 939 if (AM.BaseOffs < 0) 940 return false; 941 942 // WebAssembly has no scale register operands. 943 if (AM.Scale != 0) 944 return false; 945 946 // Everything else is legal. 947 return true; 948 } 949 950 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( 951 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/, 952 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const { 953 // WebAssembly supports unaligned accesses, though it should be declared 954 // with the p2align attribute on loads and stores which do so, and there 955 // may be a performance impact. We tell LLVM they're "fast" because 956 // for the kinds of things that LLVM uses this for (merging adjacent stores 957 // of constants, etc.), WebAssembly implementations will either want the 958 // unaligned access or they'll split anyway. 959 if (Fast) 960 *Fast = 1; 961 return true; 962 } 963 964 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, 965 AttributeList Attr) const { 966 // The current thinking is that wasm engines will perform this optimization, 967 // so we can save on code size. 968 return true; 969 } 970 971 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 972 EVT ExtT = ExtVal.getValueType(); 973 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0); 974 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) || 975 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) || 976 (ExtT == MVT::v2i64 && MemT == MVT::v2i32); 977 } 978 979 bool WebAssemblyTargetLowering::isOffsetFoldingLegal( 980 const GlobalAddressSDNode *GA) const { 981 // Wasm doesn't support function addresses with offsets 982 const GlobalValue *GV = GA->getGlobal(); 983 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA); 984 } 985 986 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL, 987 LLVMContext &C, 988 EVT VT) const { 989 if (VT.isVector()) 990 return VT.changeVectorElementTypeToInteger(); 991 992 // So far, all branch instructions in Wasm take an I32 condition. 993 // The default TargetLowering::getSetCCResultType returns the pointer size, 994 // which would be useful to reduce instruction counts when testing 995 // against 64-bit pointers/values if at some point Wasm supports that. 996 return EVT::getIntegerVT(C, 32); 997 } 998 999 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 1000 const CallInst &I, 1001 MachineFunction &MF, 1002 unsigned Intrinsic) const { 1003 switch (Intrinsic) { 1004 case Intrinsic::wasm_memory_atomic_notify: 1005 Info.opc = ISD::INTRINSIC_W_CHAIN; 1006 Info.memVT = MVT::i32; 1007 Info.ptrVal = I.getArgOperand(0); 1008 Info.offset = 0; 1009 Info.align = Align(4); 1010 // atomic.notify instruction does not really load the memory specified with 1011 // this argument, but MachineMemOperand should either be load or store, so 1012 // we set this to a load. 1013 // FIXME Volatile isn't really correct, but currently all LLVM atomic 1014 // instructions are treated as volatiles in the backend, so we should be 1015 // consistent. The same applies for wasm_atomic_wait intrinsics too. 1016 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 1017 return true; 1018 case Intrinsic::wasm_memory_atomic_wait32: 1019 Info.opc = ISD::INTRINSIC_W_CHAIN; 1020 Info.memVT = MVT::i32; 1021 Info.ptrVal = I.getArgOperand(0); 1022 Info.offset = 0; 1023 Info.align = Align(4); 1024 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 1025 return true; 1026 case Intrinsic::wasm_memory_atomic_wait64: 1027 Info.opc = ISD::INTRINSIC_W_CHAIN; 1028 Info.memVT = MVT::i64; 1029 Info.ptrVal = I.getArgOperand(0); 1030 Info.offset = 0; 1031 Info.align = Align(8); 1032 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 1033 return true; 1034 case Intrinsic::wasm_loadf16_f32: 1035 Info.opc = ISD::INTRINSIC_W_CHAIN; 1036 Info.memVT = MVT::f16; 1037 Info.ptrVal = I.getArgOperand(0); 1038 Info.offset = 0; 1039 Info.align = Align(2); 1040 Info.flags = MachineMemOperand::MOLoad; 1041 return true; 1042 case Intrinsic::wasm_storef16_f32: 1043 Info.opc = ISD::INTRINSIC_VOID; 1044 Info.memVT = MVT::f16; 1045 Info.ptrVal = I.getArgOperand(1); 1046 Info.offset = 0; 1047 Info.align = Align(2); 1048 Info.flags = MachineMemOperand::MOStore; 1049 return true; 1050 default: 1051 return false; 1052 } 1053 } 1054 1055 void WebAssemblyTargetLowering::computeKnownBitsForTargetNode( 1056 const SDValue Op, KnownBits &Known, const APInt &DemandedElts, 1057 const SelectionDAG &DAG, unsigned Depth) const { 1058 switch (Op.getOpcode()) { 1059 default: 1060 break; 1061 case ISD::INTRINSIC_WO_CHAIN: { 1062 unsigned IntNo = Op.getConstantOperandVal(0); 1063 switch (IntNo) { 1064 default: 1065 break; 1066 case Intrinsic::wasm_bitmask: { 1067 unsigned BitWidth = Known.getBitWidth(); 1068 EVT VT = Op.getOperand(1).getSimpleValueType(); 1069 unsigned PossibleBits = VT.getVectorNumElements(); 1070 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits); 1071 Known.Zero |= ZeroMask; 1072 break; 1073 } 1074 } 1075 } 1076 } 1077 } 1078 1079 TargetLoweringBase::LegalizeTypeAction 1080 WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const { 1081 if (VT.isFixedLengthVector()) { 1082 MVT EltVT = VT.getVectorElementType(); 1083 // We have legal vector types with these lane types, so widening the 1084 // vector would let us use some of the lanes directly without having to 1085 // extend or truncate values. 1086 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 || 1087 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64) 1088 return TypeWidenVector; 1089 } 1090 1091 return TargetLoweringBase::getPreferredVectorAction(VT); 1092 } 1093 1094 bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts( 1095 SDValue Op, const TargetLoweringOpt &TLO) const { 1096 // ISel process runs DAGCombiner after legalization; this step is called 1097 // SelectionDAG optimization phase. This post-legalization combining process 1098 // runs DAGCombiner on each node, and if there was a change to be made, 1099 // re-runs legalization again on it and its user nodes to make sure 1100 // everythiing is in a legalized state. 1101 // 1102 // The legalization calls lowering routines, and we do our custom lowering for 1103 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements 1104 // into zeros. But there is a set of routines in DAGCombiner that turns unused 1105 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts 1106 // turns unused vector elements into undefs. But this routine does not work 1107 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This 1108 // combination can result in a infinite loop, in which undefs are converted to 1109 // zeros in legalization and back to undefs in combining. 1110 // 1111 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from 1112 // running for build_vectors. 1113 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys) 1114 return false; 1115 return true; 1116 } 1117 1118 //===----------------------------------------------------------------------===// 1119 // WebAssembly Lowering private implementation. 1120 //===----------------------------------------------------------------------===// 1121 1122 //===----------------------------------------------------------------------===// 1123 // Lowering Code 1124 //===----------------------------------------------------------------------===// 1125 1126 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) { 1127 MachineFunction &MF = DAG.getMachineFunction(); 1128 DAG.getContext()->diagnose( 1129 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); 1130 } 1131 1132 // Test whether the given calling convention is supported. 1133 static bool callingConvSupported(CallingConv::ID CallConv) { 1134 // We currently support the language-independent target-independent 1135 // conventions. We don't yet have a way to annotate calls with properties like 1136 // "cold", and we don't have any call-clobbered registers, so these are mostly 1137 // all handled the same. 1138 return CallConv == CallingConv::C || CallConv == CallingConv::Fast || 1139 CallConv == CallingConv::Cold || 1140 CallConv == CallingConv::PreserveMost || 1141 CallConv == CallingConv::PreserveAll || 1142 CallConv == CallingConv::CXX_FAST_TLS || 1143 CallConv == CallingConv::WASM_EmscriptenInvoke || 1144 CallConv == CallingConv::Swift; 1145 } 1146 1147 SDValue 1148 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, 1149 SmallVectorImpl<SDValue> &InVals) const { 1150 SelectionDAG &DAG = CLI.DAG; 1151 SDLoc DL = CLI.DL; 1152 SDValue Chain = CLI.Chain; 1153 SDValue Callee = CLI.Callee; 1154 MachineFunction &MF = DAG.getMachineFunction(); 1155 auto Layout = MF.getDataLayout(); 1156 1157 CallingConv::ID CallConv = CLI.CallConv; 1158 if (!callingConvSupported(CallConv)) 1159 fail(DL, DAG, 1160 "WebAssembly doesn't support language-specific or target-specific " 1161 "calling conventions yet"); 1162 if (CLI.IsPatchPoint) 1163 fail(DL, DAG, "WebAssembly doesn't support patch point yet"); 1164 1165 if (CLI.IsTailCall) { 1166 auto NoTail = [&](const char *Msg) { 1167 if (CLI.CB && CLI.CB->isMustTailCall()) 1168 fail(DL, DAG, Msg); 1169 CLI.IsTailCall = false; 1170 }; 1171 1172 if (!Subtarget->hasTailCall()) 1173 NoTail("WebAssembly 'tail-call' feature not enabled"); 1174 1175 // Varargs calls cannot be tail calls because the buffer is on the stack 1176 if (CLI.IsVarArg) 1177 NoTail("WebAssembly does not support varargs tail calls"); 1178 1179 // Do not tail call unless caller and callee return types match 1180 const Function &F = MF.getFunction(); 1181 const TargetMachine &TM = getTargetMachine(); 1182 Type *RetTy = F.getReturnType(); 1183 SmallVector<MVT, 4> CallerRetTys; 1184 SmallVector<MVT, 4> CalleeRetTys; 1185 computeLegalValueVTs(F, TM, RetTy, CallerRetTys); 1186 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys); 1187 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() && 1188 std::equal(CallerRetTys.begin(), CallerRetTys.end(), 1189 CalleeRetTys.begin()); 1190 if (!TypesMatch) 1191 NoTail("WebAssembly tail call requires caller and callee return types to " 1192 "match"); 1193 1194 // If pointers to local stack values are passed, we cannot tail call 1195 if (CLI.CB) { 1196 for (auto &Arg : CLI.CB->args()) { 1197 Value *Val = Arg.get(); 1198 // Trace the value back through pointer operations 1199 while (true) { 1200 Value *Src = Val->stripPointerCastsAndAliases(); 1201 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src)) 1202 Src = GEP->getPointerOperand(); 1203 if (Val == Src) 1204 break; 1205 Val = Src; 1206 } 1207 if (isa<AllocaInst>(Val)) { 1208 NoTail( 1209 "WebAssembly does not support tail calling with stack arguments"); 1210 break; 1211 } 1212 } 1213 } 1214 } 1215 1216 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1217 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1218 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1219 1220 // The generic code may have added an sret argument. If we're lowering an 1221 // invoke function, the ABI requires that the function pointer be the first 1222 // argument, so we may have to swap the arguments. 1223 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 && 1224 Outs[0].Flags.isSRet()) { 1225 std::swap(Outs[0], Outs[1]); 1226 std::swap(OutVals[0], OutVals[1]); 1227 } 1228 1229 bool HasSwiftSelfArg = false; 1230 bool HasSwiftErrorArg = false; 1231 unsigned NumFixedArgs = 0; 1232 for (unsigned I = 0; I < Outs.size(); ++I) { 1233 const ISD::OutputArg &Out = Outs[I]; 1234 SDValue &OutVal = OutVals[I]; 1235 HasSwiftSelfArg |= Out.Flags.isSwiftSelf(); 1236 HasSwiftErrorArg |= Out.Flags.isSwiftError(); 1237 if (Out.Flags.isNest()) 1238 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 1239 if (Out.Flags.isInAlloca()) 1240 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 1241 if (Out.Flags.isInConsecutiveRegs()) 1242 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 1243 if (Out.Flags.isInConsecutiveRegsLast()) 1244 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 1245 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) { 1246 auto &MFI = MF.getFrameInfo(); 1247 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(), 1248 Out.Flags.getNonZeroByValAlign(), 1249 /*isSS=*/false); 1250 SDValue SizeNode = 1251 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32); 1252 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 1253 Chain = DAG.getMemcpy(Chain, DL, FINode, OutVal, SizeNode, 1254 Out.Flags.getNonZeroByValAlign(), 1255 /*isVolatile*/ false, /*AlwaysInline=*/false, 1256 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), 1257 MachinePointerInfo()); 1258 OutVal = FINode; 1259 } 1260 // Count the number of fixed args *after* legalization. 1261 NumFixedArgs += Out.IsFixed; 1262 } 1263 1264 bool IsVarArg = CLI.IsVarArg; 1265 auto PtrVT = getPointerTy(Layout); 1266 1267 // For swiftcc, emit additional swiftself and swifterror arguments 1268 // if there aren't. These additional arguments are also added for callee 1269 // signature They are necessary to match callee and caller signature for 1270 // indirect call. 1271 if (CallConv == CallingConv::Swift) { 1272 if (!HasSwiftSelfArg) { 1273 NumFixedArgs++; 1274 ISD::OutputArg Arg; 1275 Arg.Flags.setSwiftSelf(); 1276 CLI.Outs.push_back(Arg); 1277 SDValue ArgVal = DAG.getUNDEF(PtrVT); 1278 CLI.OutVals.push_back(ArgVal); 1279 } 1280 if (!HasSwiftErrorArg) { 1281 NumFixedArgs++; 1282 ISD::OutputArg Arg; 1283 Arg.Flags.setSwiftError(); 1284 CLI.Outs.push_back(Arg); 1285 SDValue ArgVal = DAG.getUNDEF(PtrVT); 1286 CLI.OutVals.push_back(ArgVal); 1287 } 1288 } 1289 1290 // Analyze operands of the call, assigning locations to each operand. 1291 SmallVector<CCValAssign, 16> ArgLocs; 1292 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1293 1294 if (IsVarArg) { 1295 // Outgoing non-fixed arguments are placed in a buffer. First 1296 // compute their offsets and the total amount of buffer space needed. 1297 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) { 1298 const ISD::OutputArg &Out = Outs[I]; 1299 SDValue &Arg = OutVals[I]; 1300 EVT VT = Arg.getValueType(); 1301 assert(VT != MVT::iPTR && "Legalized args should be concrete"); 1302 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 1303 Align Alignment = 1304 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty)); 1305 unsigned Offset = 1306 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment); 1307 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), 1308 Offset, VT.getSimpleVT(), 1309 CCValAssign::Full)); 1310 } 1311 } 1312 1313 unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); 1314 1315 SDValue FINode; 1316 if (IsVarArg && NumBytes) { 1317 // For non-fixed arguments, next emit stores to store the argument values 1318 // to the stack buffer at the offsets computed above. 1319 MaybeAlign StackAlign = Layout.getStackAlignment(); 1320 assert(StackAlign && "data layout string is missing stack alignment"); 1321 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, *StackAlign, 1322 /*isSS=*/false); 1323 unsigned ValNo = 0; 1324 SmallVector<SDValue, 8> Chains; 1325 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) { 1326 assert(ArgLocs[ValNo].getValNo() == ValNo && 1327 "ArgLocs should remain in order and only hold varargs args"); 1328 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); 1329 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 1330 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode, 1331 DAG.getConstant(Offset, DL, PtrVT)); 1332 Chains.push_back( 1333 DAG.getStore(Chain, DL, Arg, Add, 1334 MachinePointerInfo::getFixedStack(MF, FI, Offset))); 1335 } 1336 if (!Chains.empty()) 1337 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1338 } else if (IsVarArg) { 1339 FINode = DAG.getIntPtrConstant(0, DL); 1340 } 1341 1342 if (Callee->getOpcode() == ISD::GlobalAddress) { 1343 // If the callee is a GlobalAddress node (quite common, every direct call 1344 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress 1345 // doesn't at MO_GOT which is not needed for direct calls. 1346 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee); 1347 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 1348 getPointerTy(DAG.getDataLayout()), 1349 GA->getOffset()); 1350 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL, 1351 getPointerTy(DAG.getDataLayout()), Callee); 1352 } 1353 1354 // Compute the operands for the CALLn node. 1355 SmallVector<SDValue, 16> Ops; 1356 Ops.push_back(Chain); 1357 Ops.push_back(Callee); 1358 1359 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs 1360 // isn't reliable. 1361 Ops.append(OutVals.begin(), 1362 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); 1363 // Add a pointer to the vararg buffer. 1364 if (IsVarArg) 1365 Ops.push_back(FINode); 1366 1367 SmallVector<EVT, 8> InTys; 1368 for (const auto &In : Ins) { 1369 assert(!In.Flags.isByVal() && "byval is not valid for return values"); 1370 assert(!In.Flags.isNest() && "nest is not valid for return values"); 1371 if (In.Flags.isInAlloca()) 1372 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values"); 1373 if (In.Flags.isInConsecutiveRegs()) 1374 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values"); 1375 if (In.Flags.isInConsecutiveRegsLast()) 1376 fail(DL, DAG, 1377 "WebAssembly hasn't implemented cons regs last return values"); 1378 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in 1379 // registers. 1380 InTys.push_back(In.VT); 1381 } 1382 1383 // Lastly, if this is a call to a funcref we need to add an instruction 1384 // table.set to the chain and transform the call. 1385 if (CLI.CB && WebAssembly::isWebAssemblyFuncrefType( 1386 CLI.CB->getCalledOperand()->getType())) { 1387 // In the absence of function references proposal where a funcref call is 1388 // lowered to call_ref, using reference types we generate a table.set to set 1389 // the funcref to a special table used solely for this purpose, followed by 1390 // a call_indirect. Here we just generate the table set, and return the 1391 // SDValue of the table.set so that LowerCall can finalize the lowering by 1392 // generating the call_indirect. 1393 SDValue Chain = Ops[0]; 1394 1395 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol( 1396 MF.getContext(), Subtarget); 1397 SDValue Sym = DAG.getMCSymbol(Table, PtrVT); 1398 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32); 1399 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee}; 1400 SDValue TableSet = DAG.getMemIntrinsicNode( 1401 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps, 1402 MVT::funcref, 1403 // Machine Mem Operand args 1404 MachinePointerInfo( 1405 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF), 1406 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()), 1407 MachineMemOperand::MOStore); 1408 1409 Ops[0] = TableSet; // The new chain is the TableSet itself 1410 } 1411 1412 if (CLI.IsTailCall) { 1413 // ret_calls do not return values to the current frame 1414 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1415 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops); 1416 } 1417 1418 InTys.push_back(MVT::Other); 1419 SDVTList InTyList = DAG.getVTList(InTys); 1420 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops); 1421 1422 for (size_t I = 0; I < Ins.size(); ++I) 1423 InVals.push_back(Res.getValue(I)); 1424 1425 // Return the chain 1426 return Res.getValue(Ins.size()); 1427 } 1428 1429 bool WebAssemblyTargetLowering::CanLowerReturn( 1430 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, 1431 const SmallVectorImpl<ISD::OutputArg> &Outs, 1432 LLVMContext & /*Context*/) const { 1433 // WebAssembly can only handle returning tuples with multivalue enabled 1434 return WebAssembly::canLowerReturn(Outs.size(), Subtarget); 1435 } 1436 1437 SDValue WebAssemblyTargetLowering::LowerReturn( 1438 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, 1439 const SmallVectorImpl<ISD::OutputArg> &Outs, 1440 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 1441 SelectionDAG &DAG) const { 1442 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) && 1443 "MVP WebAssembly can only return up to one value"); 1444 if (!callingConvSupported(CallConv)) 1445 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 1446 1447 SmallVector<SDValue, 4> RetOps(1, Chain); 1448 RetOps.append(OutVals.begin(), OutVals.end()); 1449 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); 1450 1451 // Record the number and types of the return values. 1452 for (const ISD::OutputArg &Out : Outs) { 1453 assert(!Out.Flags.isByVal() && "byval is not valid for return values"); 1454 assert(!Out.Flags.isNest() && "nest is not valid for return values"); 1455 assert(Out.IsFixed && "non-fixed return value is not valid"); 1456 if (Out.Flags.isInAlloca()) 1457 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); 1458 if (Out.Flags.isInConsecutiveRegs()) 1459 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); 1460 if (Out.Flags.isInConsecutiveRegsLast()) 1461 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); 1462 } 1463 1464 return Chain; 1465 } 1466 1467 SDValue WebAssemblyTargetLowering::LowerFormalArguments( 1468 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1469 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1470 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1471 if (!callingConvSupported(CallConv)) 1472 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 1473 1474 MachineFunction &MF = DAG.getMachineFunction(); 1475 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); 1476 1477 // Set up the incoming ARGUMENTS value, which serves to represent the liveness 1478 // of the incoming values before they're represented by virtual registers. 1479 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); 1480 1481 bool HasSwiftErrorArg = false; 1482 bool HasSwiftSelfArg = false; 1483 for (const ISD::InputArg &In : Ins) { 1484 HasSwiftSelfArg |= In.Flags.isSwiftSelf(); 1485 HasSwiftErrorArg |= In.Flags.isSwiftError(); 1486 if (In.Flags.isInAlloca()) 1487 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 1488 if (In.Flags.isNest()) 1489 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 1490 if (In.Flags.isInConsecutiveRegs()) 1491 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 1492 if (In.Flags.isInConsecutiveRegsLast()) 1493 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 1494 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in 1495 // registers. 1496 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, 1497 DAG.getTargetConstant(InVals.size(), 1498 DL, MVT::i32)) 1499 : DAG.getUNDEF(In.VT)); 1500 1501 // Record the number and types of arguments. 1502 MFI->addParam(In.VT); 1503 } 1504 1505 // For swiftcc, emit additional swiftself and swifterror arguments 1506 // if there aren't. These additional arguments are also added for callee 1507 // signature They are necessary to match callee and caller signature for 1508 // indirect call. 1509 auto PtrVT = getPointerTy(MF.getDataLayout()); 1510 if (CallConv == CallingConv::Swift) { 1511 if (!HasSwiftSelfArg) { 1512 MFI->addParam(PtrVT); 1513 } 1514 if (!HasSwiftErrorArg) { 1515 MFI->addParam(PtrVT); 1516 } 1517 } 1518 // Varargs are copied into a buffer allocated by the caller, and a pointer to 1519 // the buffer is passed as an argument. 1520 if (IsVarArg) { 1521 MVT PtrVT = getPointerTy(MF.getDataLayout()); 1522 Register VarargVreg = 1523 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); 1524 MFI->setVarargBufferVreg(VarargVreg); 1525 Chain = DAG.getCopyToReg( 1526 Chain, DL, VarargVreg, 1527 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, 1528 DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); 1529 MFI->addParam(PtrVT); 1530 } 1531 1532 // Record the number and types of arguments and results. 1533 SmallVector<MVT, 4> Params; 1534 SmallVector<MVT, 4> Results; 1535 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(), 1536 MF.getFunction(), DAG.getTarget(), Params, Results); 1537 for (MVT VT : Results) 1538 MFI->addResult(VT); 1539 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify 1540 // the param logic here with ComputeSignatureVTs 1541 assert(MFI->getParams().size() == Params.size() && 1542 std::equal(MFI->getParams().begin(), MFI->getParams().end(), 1543 Params.begin())); 1544 1545 return Chain; 1546 } 1547 1548 void WebAssemblyTargetLowering::ReplaceNodeResults( 1549 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 1550 switch (N->getOpcode()) { 1551 case ISD::SIGN_EXTEND_INREG: 1552 // Do not add any results, signifying that N should not be custom lowered 1553 // after all. This happens because simd128 turns on custom lowering for 1554 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an 1555 // illegal type. 1556 break; 1557 case ISD::SIGN_EXTEND_VECTOR_INREG: 1558 case ISD::ZERO_EXTEND_VECTOR_INREG: 1559 // Do not add any results, signifying that N should not be custom lowered. 1560 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all. 1561 break; 1562 case ISD::ADD: 1563 case ISD::SUB: 1564 Results.push_back(Replace128Op(N, DAG)); 1565 break; 1566 default: 1567 llvm_unreachable( 1568 "ReplaceNodeResults not implemented for this op for WebAssembly!"); 1569 } 1570 } 1571 1572 //===----------------------------------------------------------------------===// 1573 // Custom lowering hooks. 1574 //===----------------------------------------------------------------------===// 1575 1576 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, 1577 SelectionDAG &DAG) const { 1578 SDLoc DL(Op); 1579 switch (Op.getOpcode()) { 1580 default: 1581 llvm_unreachable("unimplemented operation lowering"); 1582 return SDValue(); 1583 case ISD::FrameIndex: 1584 return LowerFrameIndex(Op, DAG); 1585 case ISD::GlobalAddress: 1586 return LowerGlobalAddress(Op, DAG); 1587 case ISD::GlobalTLSAddress: 1588 return LowerGlobalTLSAddress(Op, DAG); 1589 case ISD::ExternalSymbol: 1590 return LowerExternalSymbol(Op, DAG); 1591 case ISD::JumpTable: 1592 return LowerJumpTable(Op, DAG); 1593 case ISD::BR_JT: 1594 return LowerBR_JT(Op, DAG); 1595 case ISD::VASTART: 1596 return LowerVASTART(Op, DAG); 1597 case ISD::BlockAddress: 1598 case ISD::BRIND: 1599 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); 1600 return SDValue(); 1601 case ISD::RETURNADDR: 1602 return LowerRETURNADDR(Op, DAG); 1603 case ISD::FRAMEADDR: 1604 return LowerFRAMEADDR(Op, DAG); 1605 case ISD::CopyToReg: 1606 return LowerCopyToReg(Op, DAG); 1607 case ISD::EXTRACT_VECTOR_ELT: 1608 case ISD::INSERT_VECTOR_ELT: 1609 return LowerAccessVectorElement(Op, DAG); 1610 case ISD::INTRINSIC_VOID: 1611 case ISD::INTRINSIC_WO_CHAIN: 1612 case ISD::INTRINSIC_W_CHAIN: 1613 return LowerIntrinsic(Op, DAG); 1614 case ISD::SIGN_EXTEND_INREG: 1615 return LowerSIGN_EXTEND_INREG(Op, DAG); 1616 case ISD::ZERO_EXTEND_VECTOR_INREG: 1617 case ISD::SIGN_EXTEND_VECTOR_INREG: 1618 return LowerEXTEND_VECTOR_INREG(Op, DAG); 1619 case ISD::BUILD_VECTOR: 1620 return LowerBUILD_VECTOR(Op, DAG); 1621 case ISD::VECTOR_SHUFFLE: 1622 return LowerVECTOR_SHUFFLE(Op, DAG); 1623 case ISD::SETCC: 1624 return LowerSETCC(Op, DAG); 1625 case ISD::SHL: 1626 case ISD::SRA: 1627 case ISD::SRL: 1628 return LowerShift(Op, DAG); 1629 case ISD::FP_TO_SINT_SAT: 1630 case ISD::FP_TO_UINT_SAT: 1631 return LowerFP_TO_INT_SAT(Op, DAG); 1632 case ISD::LOAD: 1633 return LowerLoad(Op, DAG); 1634 case ISD::STORE: 1635 return LowerStore(Op, DAG); 1636 case ISD::CTPOP: 1637 case ISD::CTLZ: 1638 case ISD::CTTZ: 1639 return DAG.UnrollVectorOp(Op.getNode()); 1640 case ISD::CLEAR_CACHE: 1641 report_fatal_error("llvm.clear_cache is not supported on wasm"); 1642 case ISD::SMUL_LOHI: 1643 case ISD::UMUL_LOHI: 1644 return LowerMUL_LOHI(Op, DAG); 1645 } 1646 } 1647 1648 static bool IsWebAssemblyGlobal(SDValue Op) { 1649 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1650 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace()); 1651 1652 return false; 1653 } 1654 1655 static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op, 1656 SelectionDAG &DAG) { 1657 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op); 1658 if (!FI) 1659 return std::nullopt; 1660 1661 auto &MF = DAG.getMachineFunction(); 1662 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex()); 1663 } 1664 1665 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op, 1666 SelectionDAG &DAG) const { 1667 SDLoc DL(Op); 1668 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 1669 const SDValue &Value = SN->getValue(); 1670 const SDValue &Base = SN->getBasePtr(); 1671 const SDValue &Offset = SN->getOffset(); 1672 1673 if (IsWebAssemblyGlobal(Base)) { 1674 if (!Offset->isUndef()) 1675 report_fatal_error("unexpected offset when storing to webassembly global", 1676 false); 1677 1678 SDVTList Tys = DAG.getVTList(MVT::Other); 1679 SDValue Ops[] = {SN->getChain(), Value, Base}; 1680 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops, 1681 SN->getMemoryVT(), SN->getMemOperand()); 1682 } 1683 1684 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) { 1685 if (!Offset->isUndef()) 1686 report_fatal_error("unexpected offset when storing to webassembly local", 1687 false); 1688 1689 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32); 1690 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain. 1691 SDValue Ops[] = {SN->getChain(), Idx, Value}; 1692 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops); 1693 } 1694 1695 if (WebAssembly::isWasmVarAddressSpace(SN->getAddressSpace())) 1696 report_fatal_error( 1697 "Encountered an unlowerable store to the wasm_var address space", 1698 false); 1699 1700 return Op; 1701 } 1702 1703 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op, 1704 SelectionDAG &DAG) const { 1705 SDLoc DL(Op); 1706 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 1707 const SDValue &Base = LN->getBasePtr(); 1708 const SDValue &Offset = LN->getOffset(); 1709 1710 if (IsWebAssemblyGlobal(Base)) { 1711 if (!Offset->isUndef()) 1712 report_fatal_error( 1713 "unexpected offset when loading from webassembly global", false); 1714 1715 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other); 1716 SDValue Ops[] = {LN->getChain(), Base}; 1717 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops, 1718 LN->getMemoryVT(), LN->getMemOperand()); 1719 } 1720 1721 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) { 1722 if (!Offset->isUndef()) 1723 report_fatal_error( 1724 "unexpected offset when loading from webassembly local", false); 1725 1726 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32); 1727 EVT LocalVT = LN->getValueType(0); 1728 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT, 1729 {LN->getChain(), Idx}); 1730 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL); 1731 assert(Result->getNumValues() == 2 && "Loads must carry a chain!"); 1732 return Result; 1733 } 1734 1735 if (WebAssembly::isWasmVarAddressSpace(LN->getAddressSpace())) 1736 report_fatal_error( 1737 "Encountered an unlowerable load from the wasm_var address space", 1738 false); 1739 1740 return Op; 1741 } 1742 1743 SDValue WebAssemblyTargetLowering::LowerMUL_LOHI(SDValue Op, 1744 SelectionDAG &DAG) const { 1745 assert(Subtarget->hasWideArithmetic()); 1746 assert(Op.getValueType() == MVT::i64); 1747 SDLoc DL(Op); 1748 unsigned Opcode; 1749 switch (Op.getOpcode()) { 1750 case ISD::UMUL_LOHI: 1751 Opcode = WebAssemblyISD::I64_MUL_WIDE_U; 1752 break; 1753 case ISD::SMUL_LOHI: 1754 Opcode = WebAssemblyISD::I64_MUL_WIDE_S; 1755 break; 1756 default: 1757 llvm_unreachable("unexpected opcode"); 1758 } 1759 SDValue LHS = Op.getOperand(0); 1760 SDValue RHS = Op.getOperand(1); 1761 SDValue Hi = 1762 DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64), LHS, RHS); 1763 SDValue Lo(Hi.getNode(), 1); 1764 SDValue Ops[] = {Hi, Lo}; 1765 return DAG.getMergeValues(Ops, DL); 1766 } 1767 1768 SDValue WebAssemblyTargetLowering::Replace128Op(SDNode *N, 1769 SelectionDAG &DAG) const { 1770 assert(Subtarget->hasWideArithmetic()); 1771 assert(N->getValueType(0) == MVT::i128); 1772 SDLoc DL(N); 1773 unsigned Opcode; 1774 switch (N->getOpcode()) { 1775 case ISD::ADD: 1776 Opcode = WebAssemblyISD::I64_ADD128; 1777 break; 1778 case ISD::SUB: 1779 Opcode = WebAssemblyISD::I64_SUB128; 1780 break; 1781 default: 1782 llvm_unreachable("unexpected opcode"); 1783 } 1784 SDValue LHS = N->getOperand(0); 1785 SDValue RHS = N->getOperand(1); 1786 1787 SDValue C0 = DAG.getConstant(0, DL, MVT::i64); 1788 SDValue C1 = DAG.getConstant(1, DL, MVT::i64); 1789 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C0); 1790 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C1); 1791 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C0); 1792 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C1); 1793 SDValue Result_LO = DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64), 1794 LHS_0, LHS_1, RHS_0, RHS_1); 1795 SDValue Result_HI(Result_LO.getNode(), 1); 1796 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getVTList(), Result_LO, Result_HI); 1797 } 1798 1799 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, 1800 SelectionDAG &DAG) const { 1801 SDValue Src = Op.getOperand(2); 1802 if (isa<FrameIndexSDNode>(Src.getNode())) { 1803 // CopyToReg nodes don't support FrameIndex operands. Other targets select 1804 // the FI to some LEA-like instruction, but since we don't have that, we 1805 // need to insert some kind of instruction that can take an FI operand and 1806 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy 1807 // local.copy between Op and its FI operand. 1808 SDValue Chain = Op.getOperand(0); 1809 SDLoc DL(Op); 1810 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); 1811 EVT VT = Src.getValueType(); 1812 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32 1813 : WebAssembly::COPY_I64, 1814 DL, VT, Src), 1815 0); 1816 return Op.getNode()->getNumValues() == 1 1817 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) 1818 : DAG.getCopyToReg(Chain, DL, Reg, Copy, 1819 Op.getNumOperands() == 4 ? Op.getOperand(3) 1820 : SDValue()); 1821 } 1822 return SDValue(); 1823 } 1824 1825 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, 1826 SelectionDAG &DAG) const { 1827 int FI = cast<FrameIndexSDNode>(Op)->getIndex(); 1828 return DAG.getTargetFrameIndex(FI, Op.getValueType()); 1829 } 1830 1831 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op, 1832 SelectionDAG &DAG) const { 1833 SDLoc DL(Op); 1834 1835 if (!Subtarget->getTargetTriple().isOSEmscripten()) { 1836 fail(DL, DAG, 1837 "Non-Emscripten WebAssembly hasn't implemented " 1838 "__builtin_return_address"); 1839 return SDValue(); 1840 } 1841 1842 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1843 return SDValue(); 1844 1845 unsigned Depth = Op.getConstantOperandVal(0); 1846 MakeLibCallOptions CallOptions; 1847 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(), 1848 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL) 1849 .first; 1850 } 1851 1852 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, 1853 SelectionDAG &DAG) const { 1854 // Non-zero depths are not supported by WebAssembly currently. Use the 1855 // legalizer's default expansion, which is to return 0 (what this function is 1856 // documented to do). 1857 if (Op.getConstantOperandVal(0) > 0) 1858 return SDValue(); 1859 1860 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); 1861 EVT VT = Op.getValueType(); 1862 Register FP = 1863 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction()); 1864 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT); 1865 } 1866 1867 SDValue 1868 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1869 SelectionDAG &DAG) const { 1870 SDLoc DL(Op); 1871 const auto *GA = cast<GlobalAddressSDNode>(Op); 1872 1873 MachineFunction &MF = DAG.getMachineFunction(); 1874 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory()) 1875 report_fatal_error("cannot use thread-local storage without bulk memory", 1876 false); 1877 1878 const GlobalValue *GV = GA->getGlobal(); 1879 1880 // Currently only Emscripten supports dynamic linking with threads. Therefore, 1881 // on other targets, if we have thread-local storage, only the local-exec 1882 // model is possible. 1883 auto model = Subtarget->getTargetTriple().isOSEmscripten() 1884 ? GV->getThreadLocalMode() 1885 : GlobalValue::LocalExecTLSModel; 1886 1887 // Unsupported TLS modes 1888 assert(model != GlobalValue::NotThreadLocal); 1889 assert(model != GlobalValue::InitialExecTLSModel); 1890 1891 if (model == GlobalValue::LocalExecTLSModel || 1892 model == GlobalValue::LocalDynamicTLSModel || 1893 (model == GlobalValue::GeneralDynamicTLSModel && 1894 getTargetMachine().shouldAssumeDSOLocal(GV))) { 1895 // For DSO-local TLS variables we use offset from __tls_base 1896 1897 MVT PtrVT = getPointerTy(DAG.getDataLayout()); 1898 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64 1899 : WebAssembly::GLOBAL_GET_I32; 1900 const char *BaseName = MF.createExternalSymbolName("__tls_base"); 1901 1902 SDValue BaseAddr( 1903 DAG.getMachineNode(GlobalGet, DL, PtrVT, 1904 DAG.getTargetExternalSymbol(BaseName, PtrVT)), 1905 0); 1906 1907 SDValue TLSOffset = DAG.getTargetGlobalAddress( 1908 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL); 1909 SDValue SymOffset = 1910 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset); 1911 1912 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset); 1913 } 1914 1915 assert(model == GlobalValue::GeneralDynamicTLSModel); 1916 1917 EVT VT = Op.getValueType(); 1918 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1919 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, 1920 GA->getOffset(), 1921 WebAssemblyII::MO_GOT_TLS)); 1922 } 1923 1924 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, 1925 SelectionDAG &DAG) const { 1926 SDLoc DL(Op); 1927 const auto *GA = cast<GlobalAddressSDNode>(Op); 1928 EVT VT = Op.getValueType(); 1929 assert(GA->getTargetFlags() == 0 && 1930 "Unexpected target flags on generic GlobalAddressSDNode"); 1931 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace())) 1932 fail(DL, DAG, "Invalid address space for WebAssembly target"); 1933 1934 unsigned OperandFlags = 0; 1935 const GlobalValue *GV = GA->getGlobal(); 1936 // Since WebAssembly tables cannot yet be shared accross modules, we don't 1937 // need special treatment for tables in PIC mode. 1938 if (isPositionIndependent() && 1939 !WebAssembly::isWebAssemblyTableType(GV->getValueType())) { 1940 if (getTargetMachine().shouldAssumeDSOLocal(GV)) { 1941 MachineFunction &MF = DAG.getMachineFunction(); 1942 MVT PtrVT = getPointerTy(MF.getDataLayout()); 1943 const char *BaseName; 1944 if (GV->getValueType()->isFunctionTy()) { 1945 BaseName = MF.createExternalSymbolName("__table_base"); 1946 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL; 1947 } else { 1948 BaseName = MF.createExternalSymbolName("__memory_base"); 1949 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL; 1950 } 1951 SDValue BaseAddr = 1952 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 1953 DAG.getTargetExternalSymbol(BaseName, PtrVT)); 1954 1955 SDValue SymAddr = DAG.getNode( 1956 WebAssemblyISD::WrapperREL, DL, VT, 1957 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(), 1958 OperandFlags)); 1959 1960 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr); 1961 } 1962 OperandFlags = WebAssemblyII::MO_GOT; 1963 } 1964 1965 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1966 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, 1967 GA->getOffset(), OperandFlags)); 1968 } 1969 1970 SDValue 1971 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op, 1972 SelectionDAG &DAG) const { 1973 SDLoc DL(Op); 1974 const auto *ES = cast<ExternalSymbolSDNode>(Op); 1975 EVT VT = Op.getValueType(); 1976 assert(ES->getTargetFlags() == 0 && 1977 "Unexpected target flags on generic ExternalSymbolSDNode"); 1978 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1979 DAG.getTargetExternalSymbol(ES->getSymbol(), VT)); 1980 } 1981 1982 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, 1983 SelectionDAG &DAG) const { 1984 // There's no need for a Wrapper node because we always incorporate a jump 1985 // table operand into a BR_TABLE instruction, rather than ever 1986 // materializing it in a register. 1987 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1988 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), 1989 JT->getTargetFlags()); 1990 } 1991 1992 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, 1993 SelectionDAG &DAG) const { 1994 SDLoc DL(Op); 1995 SDValue Chain = Op.getOperand(0); 1996 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1)); 1997 SDValue Index = Op.getOperand(2); 1998 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); 1999 2000 SmallVector<SDValue, 8> Ops; 2001 Ops.push_back(Chain); 2002 Ops.push_back(Index); 2003 2004 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); 2005 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; 2006 2007 // Add an operand for each case. 2008 for (auto *MBB : MBBs) 2009 Ops.push_back(DAG.getBasicBlock(MBB)); 2010 2011 // Add the first MBB as a dummy default target for now. This will be replaced 2012 // with the proper default target (and the preceding range check eliminated) 2013 // if possible by WebAssemblyFixBrTableDefaults. 2014 Ops.push_back(DAG.getBasicBlock(*MBBs.begin())); 2015 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops); 2016 } 2017 2018 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, 2019 SelectionDAG &DAG) const { 2020 SDLoc DL(Op); 2021 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); 2022 2023 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>(); 2024 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2025 2026 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL, 2027 MFI->getVarargBufferVreg(), PtrVT); 2028 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1), 2029 MachinePointerInfo(SV)); 2030 } 2031 2032 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, 2033 SelectionDAG &DAG) const { 2034 MachineFunction &MF = DAG.getMachineFunction(); 2035 unsigned IntNo; 2036 switch (Op.getOpcode()) { 2037 case ISD::INTRINSIC_VOID: 2038 case ISD::INTRINSIC_W_CHAIN: 2039 IntNo = Op.getConstantOperandVal(1); 2040 break; 2041 case ISD::INTRINSIC_WO_CHAIN: 2042 IntNo = Op.getConstantOperandVal(0); 2043 break; 2044 default: 2045 llvm_unreachable("Invalid intrinsic"); 2046 } 2047 SDLoc DL(Op); 2048 2049 switch (IntNo) { 2050 default: 2051 return SDValue(); // Don't custom lower most intrinsics. 2052 2053 case Intrinsic::wasm_lsda: { 2054 auto PtrVT = getPointerTy(MF.getDataLayout()); 2055 const char *SymName = MF.createExternalSymbolName( 2056 "GCC_except_table" + std::to_string(MF.getFunctionNumber())); 2057 if (isPositionIndependent()) { 2058 SDValue Node = DAG.getTargetExternalSymbol( 2059 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL); 2060 const char *BaseName = MF.createExternalSymbolName("__memory_base"); 2061 SDValue BaseAddr = 2062 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 2063 DAG.getTargetExternalSymbol(BaseName, PtrVT)); 2064 SDValue SymAddr = 2065 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node); 2066 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr); 2067 } 2068 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT); 2069 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node); 2070 } 2071 2072 case Intrinsic::wasm_shuffle: { 2073 // Drop in-chain and replace undefs, but otherwise pass through unchanged 2074 SDValue Ops[18]; 2075 size_t OpIdx = 0; 2076 Ops[OpIdx++] = Op.getOperand(1); 2077 Ops[OpIdx++] = Op.getOperand(2); 2078 while (OpIdx < 18) { 2079 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1); 2080 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) { 2081 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant; 2082 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget); 2083 } else { 2084 Ops[OpIdx++] = MaskIdx; 2085 } 2086 } 2087 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops); 2088 } 2089 2090 case Intrinsic::thread_pointer: { 2091 MVT PtrVT = getPointerTy(DAG.getDataLayout()); 2092 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64 2093 : WebAssembly::GLOBAL_GET_I32; 2094 const char *TlsBase = MF.createExternalSymbolName("__tls_base"); 2095 return SDValue( 2096 DAG.getMachineNode(GlobalGet, DL, PtrVT, 2097 DAG.getTargetExternalSymbol(TlsBase, PtrVT)), 2098 0); 2099 } 2100 } 2101 } 2102 2103 SDValue 2104 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2105 SelectionDAG &DAG) const { 2106 SDLoc DL(Op); 2107 // If sign extension operations are disabled, allow sext_inreg only if operand 2108 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign 2109 // extension operations, but allowing sext_inreg in this context lets us have 2110 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg 2111 // everywhere would be simpler in this file, but would necessitate large and 2112 // brittle patterns to undo the expansion and select extract_lane_s 2113 // instructions. 2114 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); 2115 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2116 return SDValue(); 2117 2118 const SDValue &Extract = Op.getOperand(0); 2119 MVT VecT = Extract.getOperand(0).getSimpleValueType(); 2120 if (VecT.getVectorElementType().getSizeInBits() > 32) 2121 return SDValue(); 2122 MVT ExtractedLaneT = 2123 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT(); 2124 MVT ExtractedVecT = 2125 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); 2126 if (ExtractedVecT == VecT) 2127 return Op; 2128 2129 // Bitcast vector to appropriate type to ensure ISel pattern coverage 2130 const SDNode *Index = Extract.getOperand(1).getNode(); 2131 if (!isa<ConstantSDNode>(Index)) 2132 return SDValue(); 2133 unsigned IndexVal = Index->getAsZExtVal(); 2134 unsigned Scale = 2135 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); 2136 assert(Scale > 1); 2137 SDValue NewIndex = 2138 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0)); 2139 SDValue NewExtract = DAG.getNode( 2140 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), 2141 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); 2142 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract, 2143 Op.getOperand(1)); 2144 } 2145 2146 SDValue 2147 WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op, 2148 SelectionDAG &DAG) const { 2149 SDLoc DL(Op); 2150 EVT VT = Op.getValueType(); 2151 SDValue Src = Op.getOperand(0); 2152 EVT SrcVT = Src.getValueType(); 2153 2154 if (SrcVT.getVectorElementType() == MVT::i1 || 2155 SrcVT.getVectorElementType() == MVT::i64) 2156 return SDValue(); 2157 2158 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 && 2159 "Unexpected extension factor."); 2160 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits(); 2161 2162 if (Scale != 2 && Scale != 4 && Scale != 8) 2163 return SDValue(); 2164 2165 unsigned Ext; 2166 switch (Op.getOpcode()) { 2167 case ISD::ZERO_EXTEND_VECTOR_INREG: 2168 Ext = WebAssemblyISD::EXTEND_LOW_U; 2169 break; 2170 case ISD::SIGN_EXTEND_VECTOR_INREG: 2171 Ext = WebAssemblyISD::EXTEND_LOW_S; 2172 break; 2173 } 2174 2175 SDValue Ret = Src; 2176 while (Scale != 1) { 2177 Ret = DAG.getNode(Ext, DL, 2178 Ret.getValueType() 2179 .widenIntegerVectorElementType(*DAG.getContext()) 2180 .getHalfNumVectorElementsVT(*DAG.getContext()), 2181 Ret); 2182 Scale /= 2; 2183 } 2184 assert(Ret.getValueType() == VT); 2185 return Ret; 2186 } 2187 2188 static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) { 2189 SDLoc DL(Op); 2190 if (Op.getValueType() != MVT::v2f64) 2191 return SDValue(); 2192 2193 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec, 2194 unsigned &Index) -> bool { 2195 switch (Op.getOpcode()) { 2196 case ISD::SINT_TO_FP: 2197 Opcode = WebAssemblyISD::CONVERT_LOW_S; 2198 break; 2199 case ISD::UINT_TO_FP: 2200 Opcode = WebAssemblyISD::CONVERT_LOW_U; 2201 break; 2202 case ISD::FP_EXTEND: 2203 Opcode = WebAssemblyISD::PROMOTE_LOW; 2204 break; 2205 default: 2206 return false; 2207 } 2208 2209 auto ExtractVector = Op.getOperand(0); 2210 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2211 return false; 2212 2213 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode())) 2214 return false; 2215 2216 SrcVec = ExtractVector.getOperand(0); 2217 Index = ExtractVector.getConstantOperandVal(1); 2218 return true; 2219 }; 2220 2221 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex; 2222 SDValue LHSSrcVec, RHSSrcVec; 2223 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) || 2224 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex)) 2225 return SDValue(); 2226 2227 if (LHSOpcode != RHSOpcode) 2228 return SDValue(); 2229 2230 MVT ExpectedSrcVT; 2231 switch (LHSOpcode) { 2232 case WebAssemblyISD::CONVERT_LOW_S: 2233 case WebAssemblyISD::CONVERT_LOW_U: 2234 ExpectedSrcVT = MVT::v4i32; 2235 break; 2236 case WebAssemblyISD::PROMOTE_LOW: 2237 ExpectedSrcVT = MVT::v4f32; 2238 break; 2239 } 2240 if (LHSSrcVec.getValueType() != ExpectedSrcVT) 2241 return SDValue(); 2242 2243 auto Src = LHSSrcVec; 2244 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) { 2245 // Shuffle the source vector so that the converted lanes are the low lanes. 2246 Src = DAG.getVectorShuffle( 2247 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec, 2248 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1}); 2249 } 2250 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src); 2251 } 2252 2253 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, 2254 SelectionDAG &DAG) const { 2255 MVT VT = Op.getSimpleValueType(); 2256 if (VT == MVT::v8f16) { 2257 // BUILD_VECTOR can't handle FP16 operands since Wasm doesn't have a scaler 2258 // FP16 type, so cast them to I16s. 2259 MVT IVT = VT.changeVectorElementType(MVT::i16); 2260 SmallVector<SDValue, 8> NewOps; 2261 for (unsigned I = 0, E = Op.getNumOperands(); I < E; ++I) 2262 NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I))); 2263 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps); 2264 return DAG.getBitcast(VT, Res); 2265 } 2266 2267 if (auto ConvertLow = LowerConvertLow(Op, DAG)) 2268 return ConvertLow; 2269 2270 SDLoc DL(Op); 2271 const EVT VecT = Op.getValueType(); 2272 const EVT LaneT = Op.getOperand(0).getValueType(); 2273 const size_t Lanes = Op.getNumOperands(); 2274 bool CanSwizzle = VecT == MVT::v16i8; 2275 2276 // BUILD_VECTORs are lowered to the instruction that initializes the highest 2277 // possible number of lanes at once followed by a sequence of replace_lane 2278 // instructions to individually initialize any remaining lanes. 2279 2280 // TODO: Tune this. For example, lanewise swizzling is very expensive, so 2281 // swizzled lanes should be given greater weight. 2282 2283 // TODO: Investigate looping rather than always extracting/replacing specific 2284 // lanes to fill gaps. 2285 2286 auto IsConstant = [](const SDValue &V) { 2287 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP; 2288 }; 2289 2290 // Returns the source vector and index vector pair if they exist. Checks for: 2291 // (extract_vector_elt 2292 // $src, 2293 // (sign_extend_inreg (extract_vector_elt $indices, $i)) 2294 // ) 2295 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) { 2296 auto Bail = std::make_pair(SDValue(), SDValue()); 2297 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2298 return Bail; 2299 const SDValue &SwizzleSrc = Lane->getOperand(0); 2300 const SDValue &IndexExt = Lane->getOperand(1); 2301 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG) 2302 return Bail; 2303 const SDValue &Index = IndexExt->getOperand(0); 2304 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2305 return Bail; 2306 const SDValue &SwizzleIndices = Index->getOperand(0); 2307 if (SwizzleSrc.getValueType() != MVT::v16i8 || 2308 SwizzleIndices.getValueType() != MVT::v16i8 || 2309 Index->getOperand(1)->getOpcode() != ISD::Constant || 2310 Index->getConstantOperandVal(1) != I) 2311 return Bail; 2312 return std::make_pair(SwizzleSrc, SwizzleIndices); 2313 }; 2314 2315 // If the lane is extracted from another vector at a constant index, return 2316 // that vector. The source vector must not have more lanes than the dest 2317 // because the shufflevector indices are in terms of the destination lanes and 2318 // would not be able to address the smaller individual source lanes. 2319 auto GetShuffleSrc = [&](const SDValue &Lane) { 2320 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 2321 return SDValue(); 2322 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode())) 2323 return SDValue(); 2324 if (Lane->getOperand(0).getValueType().getVectorNumElements() > 2325 VecT.getVectorNumElements()) 2326 return SDValue(); 2327 return Lane->getOperand(0); 2328 }; 2329 2330 using ValueEntry = std::pair<SDValue, size_t>; 2331 SmallVector<ValueEntry, 16> SplatValueCounts; 2332 2333 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>; 2334 SmallVector<SwizzleEntry, 16> SwizzleCounts; 2335 2336 using ShuffleEntry = std::pair<SDValue, size_t>; 2337 SmallVector<ShuffleEntry, 16> ShuffleCounts; 2338 2339 auto AddCount = [](auto &Counts, const auto &Val) { 2340 auto CountIt = 2341 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; }); 2342 if (CountIt == Counts.end()) { 2343 Counts.emplace_back(Val, 1); 2344 } else { 2345 CountIt->second++; 2346 } 2347 }; 2348 2349 auto GetMostCommon = [](auto &Counts) { 2350 auto CommonIt = 2351 std::max_element(Counts.begin(), Counts.end(), llvm::less_second()); 2352 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector"); 2353 return *CommonIt; 2354 }; 2355 2356 size_t NumConstantLanes = 0; 2357 2358 // Count eligible lanes for each type of vector creation op 2359 for (size_t I = 0; I < Lanes; ++I) { 2360 const SDValue &Lane = Op->getOperand(I); 2361 if (Lane.isUndef()) 2362 continue; 2363 2364 AddCount(SplatValueCounts, Lane); 2365 2366 if (IsConstant(Lane)) 2367 NumConstantLanes++; 2368 if (auto ShuffleSrc = GetShuffleSrc(Lane)) 2369 AddCount(ShuffleCounts, ShuffleSrc); 2370 if (CanSwizzle) { 2371 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane); 2372 if (SwizzleSrcs.first) 2373 AddCount(SwizzleCounts, SwizzleSrcs); 2374 } 2375 } 2376 2377 SDValue SplatValue; 2378 size_t NumSplatLanes; 2379 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts); 2380 2381 SDValue SwizzleSrc; 2382 SDValue SwizzleIndices; 2383 size_t NumSwizzleLanes = 0; 2384 if (SwizzleCounts.size()) 2385 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices), 2386 NumSwizzleLanes) = GetMostCommon(SwizzleCounts); 2387 2388 // Shuffles can draw from up to two vectors, so find the two most common 2389 // sources. 2390 SDValue ShuffleSrc1, ShuffleSrc2; 2391 size_t NumShuffleLanes = 0; 2392 if (ShuffleCounts.size()) { 2393 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts); 2394 llvm::erase_if(ShuffleCounts, 2395 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; }); 2396 } 2397 if (ShuffleCounts.size()) { 2398 size_t AdditionalShuffleLanes; 2399 std::tie(ShuffleSrc2, AdditionalShuffleLanes) = 2400 GetMostCommon(ShuffleCounts); 2401 NumShuffleLanes += AdditionalShuffleLanes; 2402 } 2403 2404 // Predicate returning true if the lane is properly initialized by the 2405 // original instruction 2406 std::function<bool(size_t, const SDValue &)> IsLaneConstructed; 2407 SDValue Result; 2408 // Prefer swizzles over shuffles over vector consts over splats 2409 if (NumSwizzleLanes >= NumShuffleLanes && 2410 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) { 2411 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc, 2412 SwizzleIndices); 2413 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices); 2414 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) { 2415 return Swizzled == GetSwizzleSrcs(I, Lane); 2416 }; 2417 } else if (NumShuffleLanes >= NumConstantLanes && 2418 NumShuffleLanes >= NumSplatLanes) { 2419 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8; 2420 size_t DestLaneCount = VecT.getVectorNumElements(); 2421 size_t Scale1 = 1; 2422 size_t Scale2 = 1; 2423 SDValue Src1 = ShuffleSrc1; 2424 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT); 2425 if (Src1.getValueType() != VecT) { 2426 size_t LaneSize = 2427 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8; 2428 assert(LaneSize > DestLaneSize); 2429 Scale1 = LaneSize / DestLaneSize; 2430 Src1 = DAG.getBitcast(VecT, Src1); 2431 } 2432 if (Src2.getValueType() != VecT) { 2433 size_t LaneSize = 2434 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8; 2435 assert(LaneSize > DestLaneSize); 2436 Scale2 = LaneSize / DestLaneSize; 2437 Src2 = DAG.getBitcast(VecT, Src2); 2438 } 2439 2440 int Mask[16]; 2441 assert(DestLaneCount <= 16); 2442 for (size_t I = 0; I < DestLaneCount; ++I) { 2443 const SDValue &Lane = Op->getOperand(I); 2444 SDValue Src = GetShuffleSrc(Lane); 2445 if (Src == ShuffleSrc1) { 2446 Mask[I] = Lane->getConstantOperandVal(1) * Scale1; 2447 } else if (Src && Src == ShuffleSrc2) { 2448 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2; 2449 } else { 2450 Mask[I] = -1; 2451 } 2452 } 2453 ArrayRef<int> MaskRef(Mask, DestLaneCount); 2454 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef); 2455 IsLaneConstructed = [&](size_t, const SDValue &Lane) { 2456 auto Src = GetShuffleSrc(Lane); 2457 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2); 2458 }; 2459 } else if (NumConstantLanes >= NumSplatLanes) { 2460 SmallVector<SDValue, 16> ConstLanes; 2461 for (const SDValue &Lane : Op->op_values()) { 2462 if (IsConstant(Lane)) { 2463 // Values may need to be fixed so that they will sign extend to be 2464 // within the expected range during ISel. Check whether the value is in 2465 // bounds based on the lane bit width and if it is out of bounds, lop 2466 // off the extra bits and subtract 2^n to reflect giving the high bit 2467 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it 2468 // cannot possibly be out of range. 2469 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode()); 2470 int64_t Val = Const ? Const->getSExtValue() : 0; 2471 uint64_t LaneBits = 128 / Lanes; 2472 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) && 2473 "Unexpected out of bounds negative value"); 2474 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) { 2475 uint64_t Mask = (1ll << LaneBits) - 1; 2476 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask; 2477 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT)); 2478 } else { 2479 ConstLanes.push_back(Lane); 2480 } 2481 } else if (LaneT.isFloatingPoint()) { 2482 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT)); 2483 } else { 2484 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT)); 2485 } 2486 } 2487 Result = DAG.getBuildVector(VecT, DL, ConstLanes); 2488 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) { 2489 return IsConstant(Lane); 2490 }; 2491 } else { 2492 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits(); 2493 if (NumSplatLanes == 1 && Op->getOperand(0) == SplatValue && 2494 (DestLaneSize == 32 || DestLaneSize == 64)) { 2495 // Could be selected to load_zero. 2496 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecT, SplatValue); 2497 } else { 2498 // Use a splat (which might be selected as a load splat) 2499 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue); 2500 } 2501 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) { 2502 return Lane == SplatValue; 2503 }; 2504 } 2505 2506 assert(Result); 2507 assert(IsLaneConstructed); 2508 2509 // Add replace_lane instructions for any unhandled values 2510 for (size_t I = 0; I < Lanes; ++I) { 2511 const SDValue &Lane = Op->getOperand(I); 2512 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane)) 2513 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, 2514 DAG.getConstant(I, DL, MVT::i32)); 2515 } 2516 2517 return Result; 2518 } 2519 2520 SDValue 2521 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 2522 SelectionDAG &DAG) const { 2523 SDLoc DL(Op); 2524 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask(); 2525 MVT VecType = Op.getOperand(0).getSimpleValueType(); 2526 assert(VecType.is128BitVector() && "Unexpected shuffle vector type"); 2527 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8; 2528 2529 // Space for two vector args and sixteen mask indices 2530 SDValue Ops[18]; 2531 size_t OpIdx = 0; 2532 Ops[OpIdx++] = Op.getOperand(0); 2533 Ops[OpIdx++] = Op.getOperand(1); 2534 2535 // Expand mask indices to byte indices and materialize them as operands 2536 for (int M : Mask) { 2537 for (size_t J = 0; J < LaneBytes; ++J) { 2538 // Lower undefs (represented by -1 in mask) to {0..J}, which use a 2539 // whole lane of vector input, to allow further reduction at VM. E.g. 2540 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle. 2541 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J; 2542 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32); 2543 } 2544 } 2545 2546 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops); 2547 } 2548 2549 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op, 2550 SelectionDAG &DAG) const { 2551 SDLoc DL(Op); 2552 // The legalizer does not know how to expand the unsupported comparison modes 2553 // of i64x2 vectors, so we manually unroll them here. 2554 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64); 2555 SmallVector<SDValue, 2> LHS, RHS; 2556 DAG.ExtractVectorElements(Op->getOperand(0), LHS); 2557 DAG.ExtractVectorElements(Op->getOperand(1), RHS); 2558 const SDValue &CC = Op->getOperand(2); 2559 auto MakeLane = [&](unsigned I) { 2560 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I], 2561 DAG.getConstant(uint64_t(-1), DL, MVT::i64), 2562 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC); 2563 }; 2564 return DAG.getBuildVector(Op->getValueType(0), DL, 2565 {MakeLane(0), MakeLane(1)}); 2566 } 2567 2568 SDValue 2569 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op, 2570 SelectionDAG &DAG) const { 2571 // Allow constant lane indices, expand variable lane indices 2572 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode(); 2573 if (isa<ConstantSDNode>(IdxNode)) { 2574 // Ensure the index type is i32 to match the tablegen patterns 2575 uint64_t Idx = IdxNode->getAsZExtVal(); 2576 SmallVector<SDValue, 3> Ops(Op.getNode()->ops()); 2577 Ops[Op.getNumOperands() - 1] = 2578 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32); 2579 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops); 2580 } 2581 // Perform default expansion 2582 return SDValue(); 2583 } 2584 2585 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) { 2586 EVT LaneT = Op.getSimpleValueType().getVectorElementType(); 2587 // 32-bit and 64-bit unrolled shifts will have proper semantics 2588 if (LaneT.bitsGE(MVT::i32)) 2589 return DAG.UnrollVectorOp(Op.getNode()); 2590 // Otherwise mask the shift value to get proper semantics from 32-bit shift 2591 SDLoc DL(Op); 2592 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements(); 2593 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32); 2594 unsigned ShiftOpcode = Op.getOpcode(); 2595 SmallVector<SDValue, 16> ShiftedElements; 2596 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32); 2597 SmallVector<SDValue, 16> ShiftElements; 2598 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32); 2599 SmallVector<SDValue, 16> UnrolledOps; 2600 for (size_t i = 0; i < NumLanes; ++i) { 2601 SDValue MaskedShiftValue = 2602 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask); 2603 SDValue ShiftedValue = ShiftedElements[i]; 2604 if (ShiftOpcode == ISD::SRA) 2605 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, 2606 ShiftedValue, DAG.getValueType(LaneT)); 2607 UnrolledOps.push_back( 2608 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue)); 2609 } 2610 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps); 2611 } 2612 2613 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op, 2614 SelectionDAG &DAG) const { 2615 SDLoc DL(Op); 2616 2617 // Only manually lower vector shifts 2618 assert(Op.getSimpleValueType().isVector()); 2619 2620 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits(); 2621 auto ShiftVal = Op.getOperand(1); 2622 2623 // Try to skip bitmask operation since it is implied inside shift instruction 2624 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) { 2625 if (MaskOp.getOpcode() != ISD::AND) 2626 return MaskOp; 2627 SDValue LHS = MaskOp.getOperand(0); 2628 SDValue RHS = MaskOp.getOperand(1); 2629 if (MaskOp.getValueType().isVector()) { 2630 APInt MaskVal; 2631 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal)) 2632 std::swap(LHS, RHS); 2633 2634 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) && 2635 MaskVal == MaskBits) 2636 MaskOp = LHS; 2637 } else { 2638 if (!isa<ConstantSDNode>(RHS.getNode())) 2639 std::swap(LHS, RHS); 2640 2641 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode()); 2642 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits) 2643 MaskOp = LHS; 2644 } 2645 2646 return MaskOp; 2647 }; 2648 2649 // Skip vector and operation 2650 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1); 2651 ShiftVal = DAG.getSplatValue(ShiftVal); 2652 if (!ShiftVal) 2653 return unrollVectorShift(Op, DAG); 2654 2655 // Skip scalar and operation 2656 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1); 2657 // Use anyext because none of the high bits can affect the shift 2658 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32); 2659 2660 unsigned Opcode; 2661 switch (Op.getOpcode()) { 2662 case ISD::SHL: 2663 Opcode = WebAssemblyISD::VEC_SHL; 2664 break; 2665 case ISD::SRA: 2666 Opcode = WebAssemblyISD::VEC_SHR_S; 2667 break; 2668 case ISD::SRL: 2669 Opcode = WebAssemblyISD::VEC_SHR_U; 2670 break; 2671 default: 2672 llvm_unreachable("unexpected opcode"); 2673 } 2674 2675 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal); 2676 } 2677 2678 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op, 2679 SelectionDAG &DAG) const { 2680 SDLoc DL(Op); 2681 EVT ResT = Op.getValueType(); 2682 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2683 2684 if ((ResT == MVT::i32 || ResT == MVT::i64) && 2685 (SatVT == MVT::i32 || SatVT == MVT::i64)) 2686 return Op; 2687 2688 if (ResT == MVT::v4i32 && SatVT == MVT::i32) 2689 return Op; 2690 2691 if (ResT == MVT::v8i16 && SatVT == MVT::i16) 2692 return Op; 2693 2694 return SDValue(); 2695 } 2696 2697 //===----------------------------------------------------------------------===// 2698 // Custom DAG combine hooks 2699 //===----------------------------------------------------------------------===// 2700 static SDValue 2701 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2702 auto &DAG = DCI.DAG; 2703 auto Shuffle = cast<ShuffleVectorSDNode>(N); 2704 2705 // Hoist vector bitcasts that don't change the number of lanes out of unary 2706 // shuffles, where they are less likely to get in the way of other combines. 2707 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) -> 2708 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask)))) 2709 SDValue Bitcast = N->getOperand(0); 2710 if (Bitcast.getOpcode() != ISD::BITCAST) 2711 return SDValue(); 2712 if (!N->getOperand(1).isUndef()) 2713 return SDValue(); 2714 SDValue CastOp = Bitcast.getOperand(0); 2715 EVT SrcType = CastOp.getValueType(); 2716 EVT DstType = Bitcast.getValueType(); 2717 if (!SrcType.is128BitVector() || 2718 SrcType.getVectorNumElements() != DstType.getVectorNumElements()) 2719 return SDValue(); 2720 SDValue NewShuffle = DAG.getVectorShuffle( 2721 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask()); 2722 return DAG.getBitcast(DstType, NewShuffle); 2723 } 2724 2725 /// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get 2726 /// split up into scalar instructions during legalization, and the vector 2727 /// extending instructions are selected in performVectorExtendCombine below. 2728 static SDValue 2729 performVectorExtendToFPCombine(SDNode *N, 2730 TargetLowering::DAGCombinerInfo &DCI) { 2731 auto &DAG = DCI.DAG; 2732 assert(N->getOpcode() == ISD::UINT_TO_FP || 2733 N->getOpcode() == ISD::SINT_TO_FP); 2734 2735 EVT InVT = N->getOperand(0)->getValueType(0); 2736 EVT ResVT = N->getValueType(0); 2737 MVT ExtVT; 2738 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8)) 2739 ExtVT = MVT::v4i32; 2740 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8)) 2741 ExtVT = MVT::v2i32; 2742 else 2743 return SDValue(); 2744 2745 unsigned Op = 2746 N->getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; 2747 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0)); 2748 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv); 2749 } 2750 2751 static SDValue 2752 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2753 auto &DAG = DCI.DAG; 2754 assert(N->getOpcode() == ISD::SIGN_EXTEND || 2755 N->getOpcode() == ISD::ZERO_EXTEND); 2756 2757 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if 2758 // possible before the extract_subvector can be expanded. 2759 auto Extract = N->getOperand(0); 2760 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR) 2761 return SDValue(); 2762 auto Source = Extract.getOperand(0); 2763 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 2764 if (IndexNode == nullptr) 2765 return SDValue(); 2766 auto Index = IndexNode->getZExtValue(); 2767 2768 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the 2769 // extracted subvector is the low or high half of its source. 2770 EVT ResVT = N->getValueType(0); 2771 if (ResVT == MVT::v8i16) { 2772 if (Extract.getValueType() != MVT::v8i8 || 2773 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8)) 2774 return SDValue(); 2775 } else if (ResVT == MVT::v4i32) { 2776 if (Extract.getValueType() != MVT::v4i16 || 2777 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4)) 2778 return SDValue(); 2779 } else if (ResVT == MVT::v2i64) { 2780 if (Extract.getValueType() != MVT::v2i32 || 2781 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2)) 2782 return SDValue(); 2783 } else { 2784 return SDValue(); 2785 } 2786 2787 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND; 2788 bool IsLow = Index == 0; 2789 2790 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S 2791 : WebAssemblyISD::EXTEND_HIGH_S) 2792 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U 2793 : WebAssemblyISD::EXTEND_HIGH_U); 2794 2795 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2796 } 2797 2798 static SDValue 2799 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 2800 auto &DAG = DCI.DAG; 2801 2802 auto GetWasmConversionOp = [](unsigned Op) { 2803 switch (Op) { 2804 case ISD::FP_TO_SINT_SAT: 2805 return WebAssemblyISD::TRUNC_SAT_ZERO_S; 2806 case ISD::FP_TO_UINT_SAT: 2807 return WebAssemblyISD::TRUNC_SAT_ZERO_U; 2808 case ISD::FP_ROUND: 2809 return WebAssemblyISD::DEMOTE_ZERO; 2810 } 2811 llvm_unreachable("unexpected op"); 2812 }; 2813 2814 auto IsZeroSplat = [](SDValue SplatVal) { 2815 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode()); 2816 APInt SplatValue, SplatUndef; 2817 unsigned SplatBitSize; 2818 bool HasAnyUndefs; 2819 // Endianness doesn't matter in this context because we are looking for 2820 // an all-zero value. 2821 return Splat && 2822 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 2823 HasAnyUndefs) && 2824 SplatValue == 0; 2825 }; 2826 2827 if (N->getOpcode() == ISD::CONCAT_VECTORS) { 2828 // Combine this: 2829 // 2830 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0))) 2831 // 2832 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x). 2833 // 2834 // Or this: 2835 // 2836 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0))) 2837 // 2838 // into (f32x4.demote_zero_f64x2 $x). 2839 EVT ResVT; 2840 EVT ExpectedConversionType; 2841 auto Conversion = N->getOperand(0); 2842 auto ConversionOp = Conversion.getOpcode(); 2843 switch (ConversionOp) { 2844 case ISD::FP_TO_SINT_SAT: 2845 case ISD::FP_TO_UINT_SAT: 2846 ResVT = MVT::v4i32; 2847 ExpectedConversionType = MVT::v2i32; 2848 break; 2849 case ISD::FP_ROUND: 2850 ResVT = MVT::v4f32; 2851 ExpectedConversionType = MVT::v2f32; 2852 break; 2853 default: 2854 return SDValue(); 2855 } 2856 2857 if (N->getValueType(0) != ResVT) 2858 return SDValue(); 2859 2860 if (Conversion.getValueType() != ExpectedConversionType) 2861 return SDValue(); 2862 2863 auto Source = Conversion.getOperand(0); 2864 if (Source.getValueType() != MVT::v2f64) 2865 return SDValue(); 2866 2867 if (!IsZeroSplat(N->getOperand(1)) || 2868 N->getOperand(1).getValueType() != ExpectedConversionType) 2869 return SDValue(); 2870 2871 unsigned Op = GetWasmConversionOp(ConversionOp); 2872 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2873 } 2874 2875 // Combine this: 2876 // 2877 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32) 2878 // 2879 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x). 2880 // 2881 // Or this: 2882 // 2883 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0))))) 2884 // 2885 // into (f32x4.demote_zero_f64x2 $x). 2886 EVT ResVT; 2887 auto ConversionOp = N->getOpcode(); 2888 switch (ConversionOp) { 2889 case ISD::FP_TO_SINT_SAT: 2890 case ISD::FP_TO_UINT_SAT: 2891 ResVT = MVT::v4i32; 2892 break; 2893 case ISD::FP_ROUND: 2894 ResVT = MVT::v4f32; 2895 break; 2896 default: 2897 llvm_unreachable("unexpected op"); 2898 } 2899 2900 if (N->getValueType(0) != ResVT) 2901 return SDValue(); 2902 2903 auto Concat = N->getOperand(0); 2904 if (Concat.getValueType() != MVT::v4f64) 2905 return SDValue(); 2906 2907 auto Source = Concat.getOperand(0); 2908 if (Source.getValueType() != MVT::v2f64) 2909 return SDValue(); 2910 2911 if (!IsZeroSplat(Concat.getOperand(1)) || 2912 Concat.getOperand(1).getValueType() != MVT::v2f64) 2913 return SDValue(); 2914 2915 unsigned Op = GetWasmConversionOp(ConversionOp); 2916 return DAG.getNode(Op, SDLoc(N), ResVT, Source); 2917 } 2918 2919 // Helper to extract VectorWidth bits from Vec, starting from IdxVal. 2920 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, 2921 const SDLoc &DL, unsigned VectorWidth) { 2922 EVT VT = Vec.getValueType(); 2923 EVT ElVT = VT.getVectorElementType(); 2924 unsigned Factor = VT.getSizeInBits() / VectorWidth; 2925 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 2926 VT.getVectorNumElements() / Factor); 2927 2928 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR 2929 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits(); 2930 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2"); 2931 2932 // This is the index of the first element of the VectorWidth-bit chunk 2933 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits. 2934 IdxVal &= ~(ElemsPerChunk - 1); 2935 2936 // If the input is a buildvector just emit a smaller one. 2937 if (Vec.getOpcode() == ISD::BUILD_VECTOR) 2938 return DAG.getBuildVector(ResultVT, DL, 2939 Vec->ops().slice(IdxVal, ElemsPerChunk)); 2940 2941 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL); 2942 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx); 2943 } 2944 2945 // Helper to recursively truncate vector elements in half with NARROW_U. DstVT 2946 // is the expected destination value type after recursion. In is the initial 2947 // input. Note that the input should have enough leading zero bits to prevent 2948 // NARROW_U from saturating results. 2949 static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, 2950 SelectionDAG &DAG) { 2951 EVT SrcVT = In.getValueType(); 2952 2953 // No truncation required, we might get here due to recursive calls. 2954 if (SrcVT == DstVT) 2955 return In; 2956 2957 unsigned SrcSizeInBits = SrcVT.getSizeInBits(); 2958 unsigned NumElems = SrcVT.getVectorNumElements(); 2959 if (!isPowerOf2_32(NumElems)) 2960 return SDValue(); 2961 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation"); 2962 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation"); 2963 2964 LLVMContext &Ctx = *DAG.getContext(); 2965 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2); 2966 2967 // Narrow to the largest type possible: 2968 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u. 2969 EVT InVT = MVT::i16, OutVT = MVT::i8; 2970 if (SrcVT.getScalarSizeInBits() > 16) { 2971 InVT = MVT::i32; 2972 OutVT = MVT::i16; 2973 } 2974 unsigned SubSizeInBits = SrcSizeInBits / 2; 2975 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits()); 2976 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits()); 2977 2978 // Split lower/upper subvectors. 2979 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits); 2980 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits); 2981 2982 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors. 2983 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) { 2984 Lo = DAG.getBitcast(InVT, Lo); 2985 Hi = DAG.getBitcast(InVT, Hi); 2986 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi); 2987 return DAG.getBitcast(DstVT, Res); 2988 } 2989 2990 // Recursively narrow lower/upper subvectors, concat result and narrow again. 2991 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2); 2992 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG); 2993 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG); 2994 2995 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems); 2996 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi); 2997 return truncateVectorWithNARROW(DstVT, Res, DL, DAG); 2998 } 2999 3000 static SDValue performTruncateCombine(SDNode *N, 3001 TargetLowering::DAGCombinerInfo &DCI) { 3002 auto &DAG = DCI.DAG; 3003 3004 SDValue In = N->getOperand(0); 3005 EVT InVT = In.getValueType(); 3006 if (!InVT.isSimple()) 3007 return SDValue(); 3008 3009 EVT OutVT = N->getValueType(0); 3010 if (!OutVT.isVector()) 3011 return SDValue(); 3012 3013 EVT OutSVT = OutVT.getVectorElementType(); 3014 EVT InSVT = InVT.getVectorElementType(); 3015 // Currently only cover truncate to v16i8 or v8i16. 3016 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) && 3017 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector())) 3018 return SDValue(); 3019 3020 SDLoc DL(N); 3021 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(), 3022 OutVT.getScalarSizeInBits()); 3023 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT)); 3024 return truncateVectorWithNARROW(OutVT, In, DL, DAG); 3025 } 3026 3027 static SDValue performBitcastCombine(SDNode *N, 3028 TargetLowering::DAGCombinerInfo &DCI) { 3029 auto &DAG = DCI.DAG; 3030 SDLoc DL(N); 3031 SDValue Src = N->getOperand(0); 3032 EVT VT = N->getValueType(0); 3033 EVT SrcVT = Src.getValueType(); 3034 3035 // bitcast <N x i1> to iN 3036 // ==> bitmask 3037 if (DCI.isBeforeLegalize() && VT.isScalarInteger() && 3038 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) { 3039 unsigned NumElts = SrcVT.getVectorNumElements(); 3040 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3041 return SDValue(); 3042 EVT Width = MVT::getIntegerVT(128 / NumElts); 3043 return DAG.getZExtOrTrunc( 3044 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, 3045 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32), 3046 DAG.getSExtOrTrunc(N->getOperand(0), DL, 3047 SrcVT.changeVectorElementType(Width))}), 3048 DL, VT); 3049 } 3050 3051 return SDValue(); 3052 } 3053 3054 static SDValue performSETCCCombine(SDNode *N, 3055 TargetLowering::DAGCombinerInfo &DCI) { 3056 auto &DAG = DCI.DAG; 3057 3058 SDValue LHS = N->getOperand(0); 3059 SDValue RHS = N->getOperand(1); 3060 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get(); 3061 SDLoc DL(N); 3062 EVT VT = N->getValueType(0); 3063 3064 // setcc (iN (bitcast (vNi1 X))), 0, ne 3065 // ==> any_true (vNi1 X) 3066 // setcc (iN (bitcast (vNi1 X))), 0, eq 3067 // ==> xor (any_true (vNi1 X)), -1 3068 // setcc (iN (bitcast (vNi1 X))), -1, eq 3069 // ==> all_true (vNi1 X) 3070 // setcc (iN (bitcast (vNi1 X))), -1, ne 3071 // ==> xor (all_true (vNi1 X)), -1 3072 if (DCI.isBeforeLegalize() && VT.isScalarInteger() && 3073 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3074 (isNullConstant(RHS) || isAllOnesConstant(RHS)) && 3075 LHS->getOpcode() == ISD::BITCAST) { 3076 EVT FromVT = LHS->getOperand(0).getValueType(); 3077 if (FromVT.isFixedLengthVector() && 3078 FromVT.getVectorElementType() == MVT::i1) { 3079 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue 3080 : Intrinsic::wasm_alltrue; 3081 unsigned NumElts = FromVT.getVectorNumElements(); 3082 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3083 return SDValue(); 3084 EVT Width = MVT::getIntegerVT(128 / NumElts); 3085 SDValue Ret = DAG.getZExtOrTrunc( 3086 DAG.getNode( 3087 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, 3088 {DAG.getConstant(Intrin, DL, MVT::i32), 3089 DAG.getSExtOrTrunc(LHS->getOperand(0), DL, 3090 FromVT.changeVectorElementType(Width))}), 3091 DL, MVT::i1); 3092 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) || 3093 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) { 3094 Ret = DAG.getNOT(DL, Ret, MVT::i1); 3095 } 3096 return DAG.getZExtOrTrunc(Ret, DL, VT); 3097 } 3098 } 3099 3100 return SDValue(); 3101 } 3102 3103 SDValue 3104 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, 3105 DAGCombinerInfo &DCI) const { 3106 switch (N->getOpcode()) { 3107 default: 3108 return SDValue(); 3109 case ISD::BITCAST: 3110 return performBitcastCombine(N, DCI); 3111 case ISD::SETCC: 3112 return performSETCCCombine(N, DCI); 3113 case ISD::VECTOR_SHUFFLE: 3114 return performVECTOR_SHUFFLECombine(N, DCI); 3115 case ISD::SIGN_EXTEND: 3116 case ISD::ZERO_EXTEND: 3117 return performVectorExtendCombine(N, DCI); 3118 case ISD::UINT_TO_FP: 3119 case ISD::SINT_TO_FP: 3120 return performVectorExtendToFPCombine(N, DCI); 3121 case ISD::FP_TO_SINT_SAT: 3122 case ISD::FP_TO_UINT_SAT: 3123 case ISD::FP_ROUND: 3124 case ISD::CONCAT_VECTORS: 3125 return performVectorTruncZeroCombine(N, DCI); 3126 case ISD::TRUNCATE: 3127 return performTruncateCombine(N, DCI); 3128 } 3129 } 3130