1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the targeting of the InstructionSelector class for 10 // SPIRV. 11 // TODO: This should be generated by TableGen. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SPIRV.h" 16 #include "SPIRVGlobalRegistry.h" 17 #include "SPIRVInstrInfo.h" 18 #include "SPIRVRegisterBankInfo.h" 19 #include "SPIRVRegisterInfo.h" 20 #include "SPIRVTargetMachine.h" 21 #include "SPIRVUtils.h" 22 #include "llvm/ADT/APFloat.h" 23 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 25 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/IR/IntrinsicsSPIRV.h" 29 #include "llvm/Support/Debug.h" 30 31 #define DEBUG_TYPE "spirv-isel" 32 33 using namespace llvm; 34 namespace CL = SPIRV::OpenCLExtInst; 35 namespace GL = SPIRV::GLSLExtInst; 36 37 using ExtInstList = 38 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>; 39 40 namespace { 41 42 #define GET_GLOBALISEL_PREDICATE_BITSET 43 #include "SPIRVGenGlobalISel.inc" 44 #undef GET_GLOBALISEL_PREDICATE_BITSET 45 46 class SPIRVInstructionSelector : public InstructionSelector { 47 const SPIRVSubtarget &STI; 48 const SPIRVInstrInfo &TII; 49 const SPIRVRegisterInfo &TRI; 50 const RegisterBankInfo &RBI; 51 SPIRVGlobalRegistry &GR; 52 MachineRegisterInfo *MRI; 53 54 public: 55 SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 56 const SPIRVSubtarget &ST, 57 const RegisterBankInfo &RBI); 58 void setupMF(MachineFunction &MF, GISelKnownBits *KB, 59 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI, 60 BlockFrequencyInfo *BFI) override; 61 // Common selection code. Instruction-specific selection occurs in spvSelect. 62 bool select(MachineInstr &I) override; 63 static const char *getName() { return DEBUG_TYPE; } 64 65 #define GET_GLOBALISEL_PREDICATES_DECL 66 #include "SPIRVGenGlobalISel.inc" 67 #undef GET_GLOBALISEL_PREDICATES_DECL 68 69 #define GET_GLOBALISEL_TEMPORARIES_DECL 70 #include "SPIRVGenGlobalISel.inc" 71 #undef GET_GLOBALISEL_TEMPORARIES_DECL 72 73 private: 74 // tblgen-erated 'select' implementation, used as the initial selector for 75 // the patterns that don't require complex C++. 76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 77 78 // All instruction-specific selection that didn't happen in "select()". 79 // Is basically a large Switch/Case delegating to all other select method. 80 bool spvSelect(Register ResVReg, const SPIRVType *ResType, 81 MachineInstr &I) const; 82 83 bool selectGlobalValue(Register ResVReg, MachineInstr &I, 84 const MachineInstr *Init = nullptr) const; 85 86 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType, 87 MachineInstr &I, Register SrcReg, 88 unsigned Opcode) const; 89 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 90 unsigned Opcode) const; 91 92 bool selectLoad(Register ResVReg, const SPIRVType *ResType, 93 MachineInstr &I) const; 94 bool selectStore(MachineInstr &I) const; 95 96 bool selectMemOperation(Register ResVReg, MachineInstr &I) const; 97 98 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType, 99 MachineInstr &I, unsigned NewOpcode) const; 100 101 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType, 102 MachineInstr &I) const; 103 104 bool selectFence(MachineInstr &I) const; 105 106 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType, 107 MachineInstr &I) const; 108 109 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType, 110 MachineInstr &I) const; 111 112 bool selectConstVector(Register ResVReg, const SPIRVType *ResType, 113 MachineInstr &I) const; 114 115 bool selectCmp(Register ResVReg, const SPIRVType *ResType, 116 unsigned comparisonOpcode, MachineInstr &I) const; 117 118 bool selectICmp(Register ResVReg, const SPIRVType *ResType, 119 MachineInstr &I) const; 120 bool selectFCmp(Register ResVReg, const SPIRVType *ResType, 121 MachineInstr &I) const; 122 123 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 124 int OpIdx) const; 125 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 126 int OpIdx) const; 127 128 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm, 129 MachineInstr &I) const; 130 131 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 132 bool IsSigned) const; 133 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 134 bool IsSigned, unsigned Opcode) const; 135 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 136 bool IsSigned) const; 137 138 bool selectTrunc(Register ResVReg, const SPIRVType *ResType, 139 MachineInstr &I) const; 140 141 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I, 142 const SPIRVType *intTy, const SPIRVType *boolTy) const; 143 144 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType, 145 MachineInstr &I) const; 146 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType, 147 MachineInstr &I) const; 148 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType, 149 MachineInstr &I) const; 150 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType, 151 MachineInstr &I) const; 152 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType, 153 MachineInstr &I) const; 154 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType, 155 MachineInstr &I) const; 156 bool selectGEP(Register ResVReg, const SPIRVType *ResType, 157 MachineInstr &I) const; 158 159 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType, 160 MachineInstr &I) const; 161 162 bool selectBranch(MachineInstr &I) const; 163 bool selectBranchCond(MachineInstr &I) const; 164 165 bool selectPhi(Register ResVReg, const SPIRVType *ResType, 166 MachineInstr &I) const; 167 168 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 169 MachineInstr &I, CL::OpenCLExtInst CLInst) const; 170 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 171 MachineInstr &I, CL::OpenCLExtInst CLInst, 172 GL::GLSLExtInst GLInst) const; 173 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 174 MachineInstr &I, const ExtInstList &ExtInsts) const; 175 176 Register buildI32Constant(uint32_t Val, MachineInstr &I, 177 const SPIRVType *ResType = nullptr) const; 178 179 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const; 180 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType, 181 MachineInstr &I) const; 182 }; 183 184 } // end anonymous namespace 185 186 #define GET_GLOBALISEL_IMPL 187 #include "SPIRVGenGlobalISel.inc" 188 #undef GET_GLOBALISEL_IMPL 189 190 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 191 const SPIRVSubtarget &ST, 192 const RegisterBankInfo &RBI) 193 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()), 194 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()), 195 #define GET_GLOBALISEL_PREDICATES_INIT 196 #include "SPIRVGenGlobalISel.inc" 197 #undef GET_GLOBALISEL_PREDICATES_INIT 198 #define GET_GLOBALISEL_TEMPORARIES_INIT 199 #include "SPIRVGenGlobalISel.inc" 200 #undef GET_GLOBALISEL_TEMPORARIES_INIT 201 { 202 } 203 204 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 205 CodeGenCoverage *CoverageInfo, 206 ProfileSummaryInfo *PSI, 207 BlockFrequencyInfo *BFI) { 208 MRI = &MF.getRegInfo(); 209 GR.setCurrentFunc(MF); 210 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 211 } 212 213 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI); 214 215 // Defined in SPIRVLegalizerInfo.cpp. 216 extern bool isTypeFoldingSupported(unsigned Opcode); 217 218 bool SPIRVInstructionSelector::select(MachineInstr &I) { 219 assert(I.getParent() && "Instruction should be in a basic block!"); 220 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 221 222 Register Opcode = I.getOpcode(); 223 // If it's not a GMIR instruction, we've selected it already. 224 if (!isPreISelGenericOpcode(Opcode)) { 225 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more. 226 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg()); 227 if (isTypeFoldingSupported(Def->getOpcode())) { 228 auto Res = selectImpl(I, *CoverageInfo); 229 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT); 230 if (Res) 231 return Res; 232 } 233 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg()); 234 I.removeFromParent(); 235 return true; 236 } else if (I.getNumDefs() == 1) { 237 // Make all vregs 32 bits (for SPIR-V IDs). 238 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32)); 239 } 240 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 241 } 242 243 if (I.getNumOperands() != I.getNumExplicitOperands()) { 244 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n"); 245 return false; 246 } 247 248 // Common code for getting return reg+type, and removing selected instr 249 // from parent occurs here. Instr-specific selection happens in spvSelect(). 250 bool HasDefs = I.getNumDefs() > 0; 251 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0); 252 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr; 253 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); 254 if (spvSelect(ResVReg, ResType, I)) { 255 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs). 256 MRI->setType(ResVReg, LLT::scalar(32)); 257 I.removeFromParent(); 258 return true; 259 } 260 return false; 261 } 262 263 bool SPIRVInstructionSelector::spvSelect(Register ResVReg, 264 const SPIRVType *ResType, 265 MachineInstr &I) const { 266 assert(!isTypeFoldingSupported(I.getOpcode()) || 267 I.getOpcode() == TargetOpcode::G_CONSTANT); 268 const unsigned Opcode = I.getOpcode(); 269 switch (Opcode) { 270 case TargetOpcode::G_CONSTANT: 271 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(), 272 I); 273 case TargetOpcode::G_GLOBAL_VALUE: 274 return selectGlobalValue(ResVReg, I); 275 case TargetOpcode::G_IMPLICIT_DEF: 276 return selectOpUndef(ResVReg, ResType, I); 277 278 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 279 return selectIntrinsic(ResVReg, ResType, I); 280 case TargetOpcode::G_BITREVERSE: 281 return selectBitreverse(ResVReg, ResType, I); 282 283 case TargetOpcode::G_BUILD_VECTOR: 284 return selectConstVector(ResVReg, ResType, I); 285 286 case TargetOpcode::G_SHUFFLE_VECTOR: { 287 MachineBasicBlock &BB = *I.getParent(); 288 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle)) 289 .addDef(ResVReg) 290 .addUse(GR.getSPIRVTypeID(ResType)) 291 .addUse(I.getOperand(1).getReg()) 292 .addUse(I.getOperand(2).getReg()); 293 for (auto V : I.getOperand(3).getShuffleMask()) 294 MIB.addImm(V); 295 return MIB.constrainAllUses(TII, TRI, RBI); 296 } 297 case TargetOpcode::G_MEMMOVE: 298 case TargetOpcode::G_MEMCPY: 299 case TargetOpcode::G_MEMSET: 300 return selectMemOperation(ResVReg, I); 301 302 case TargetOpcode::G_ICMP: 303 return selectICmp(ResVReg, ResType, I); 304 case TargetOpcode::G_FCMP: 305 return selectFCmp(ResVReg, ResType, I); 306 307 case TargetOpcode::G_FRAME_INDEX: 308 return selectFrameIndex(ResVReg, ResType, I); 309 310 case TargetOpcode::G_LOAD: 311 return selectLoad(ResVReg, ResType, I); 312 case TargetOpcode::G_STORE: 313 return selectStore(I); 314 315 case TargetOpcode::G_BR: 316 return selectBranch(I); 317 case TargetOpcode::G_BRCOND: 318 return selectBranchCond(I); 319 320 case TargetOpcode::G_PHI: 321 return selectPhi(ResVReg, ResType, I); 322 323 case TargetOpcode::G_FPTOSI: 324 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS); 325 case TargetOpcode::G_FPTOUI: 326 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU); 327 328 case TargetOpcode::G_SITOFP: 329 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF); 330 case TargetOpcode::G_UITOFP: 331 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF); 332 333 case TargetOpcode::G_CTPOP: 334 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount); 335 case TargetOpcode::G_SMIN: 336 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin); 337 case TargetOpcode::G_UMIN: 338 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin); 339 340 case TargetOpcode::G_SMAX: 341 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax); 342 case TargetOpcode::G_UMAX: 343 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax); 344 345 case TargetOpcode::G_FMA: 346 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma); 347 348 case TargetOpcode::G_FPOW: 349 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow); 350 case TargetOpcode::G_FPOWI: 351 return selectExtInst(ResVReg, ResType, I, CL::pown); 352 353 case TargetOpcode::G_FEXP: 354 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp); 355 case TargetOpcode::G_FEXP2: 356 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2); 357 358 case TargetOpcode::G_FLOG: 359 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log); 360 case TargetOpcode::G_FLOG2: 361 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2); 362 case TargetOpcode::G_FLOG10: 363 return selectExtInst(ResVReg, ResType, I, CL::log10); 364 365 case TargetOpcode::G_FABS: 366 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs); 367 case TargetOpcode::G_ABS: 368 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs); 369 370 case TargetOpcode::G_FMINNUM: 371 case TargetOpcode::G_FMINIMUM: 372 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin); 373 case TargetOpcode::G_FMAXNUM: 374 case TargetOpcode::G_FMAXIMUM: 375 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax); 376 377 case TargetOpcode::G_FCOPYSIGN: 378 return selectExtInst(ResVReg, ResType, I, CL::copysign); 379 380 case TargetOpcode::G_FCEIL: 381 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil); 382 case TargetOpcode::G_FFLOOR: 383 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor); 384 385 case TargetOpcode::G_FCOS: 386 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos); 387 case TargetOpcode::G_FSIN: 388 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin); 389 390 case TargetOpcode::G_FSQRT: 391 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt); 392 393 case TargetOpcode::G_CTTZ: 394 case TargetOpcode::G_CTTZ_ZERO_UNDEF: 395 return selectExtInst(ResVReg, ResType, I, CL::ctz); 396 case TargetOpcode::G_CTLZ: 397 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 398 return selectExtInst(ResVReg, ResType, I, CL::clz); 399 400 case TargetOpcode::G_INTRINSIC_ROUND: 401 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round); 402 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: 403 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven); 404 case TargetOpcode::G_INTRINSIC_TRUNC: 405 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc); 406 case TargetOpcode::G_FRINT: 407 case TargetOpcode::G_FNEARBYINT: 408 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven); 409 410 case TargetOpcode::G_SMULH: 411 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi); 412 case TargetOpcode::G_UMULH: 413 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi); 414 415 case TargetOpcode::G_SEXT: 416 return selectExt(ResVReg, ResType, I, true); 417 case TargetOpcode::G_ANYEXT: 418 case TargetOpcode::G_ZEXT: 419 return selectExt(ResVReg, ResType, I, false); 420 case TargetOpcode::G_TRUNC: 421 return selectTrunc(ResVReg, ResType, I); 422 case TargetOpcode::G_FPTRUNC: 423 case TargetOpcode::G_FPEXT: 424 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert); 425 426 case TargetOpcode::G_PTRTOINT: 427 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU); 428 case TargetOpcode::G_INTTOPTR: 429 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr); 430 case TargetOpcode::G_BITCAST: 431 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 432 case TargetOpcode::G_ADDRSPACE_CAST: 433 return selectAddrSpaceCast(ResVReg, ResType, I); 434 case TargetOpcode::G_PTR_ADD: { 435 // Currently, we get G_PTR_ADD only as a result of translating 436 // global variables, initialized with constant expressions like GV + Const 437 // (see test opencl/basic/progvar_prog_scope_init.ll). 438 // TODO: extend the handler once we have other cases. 439 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); 440 Register GV = I.getOperand(1).getReg(); 441 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV); 442 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE || 443 (*II).getOpcode() == TargetOpcode::COPY || 444 (*II).getOpcode() == SPIRV::OpVariable) && 445 isImm(I.getOperand(2), MRI)); 446 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I); 447 MachineBasicBlock &BB = *I.getParent(); 448 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) 449 .addDef(ResVReg) 450 .addUse(GR.getSPIRVTypeID(ResType)) 451 .addImm(static_cast<uint32_t>( 452 SPIRV::Opcode::InBoundsPtrAccessChain)) 453 .addUse(GV) 454 .addUse(Idx) 455 .addUse(I.getOperand(2).getReg()); 456 return MIB.constrainAllUses(TII, TRI, RBI); 457 } 458 459 case TargetOpcode::G_ATOMICRMW_OR: 460 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr); 461 case TargetOpcode::G_ATOMICRMW_ADD: 462 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd); 463 case TargetOpcode::G_ATOMICRMW_AND: 464 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd); 465 case TargetOpcode::G_ATOMICRMW_MAX: 466 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax); 467 case TargetOpcode::G_ATOMICRMW_MIN: 468 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin); 469 case TargetOpcode::G_ATOMICRMW_SUB: 470 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub); 471 case TargetOpcode::G_ATOMICRMW_XOR: 472 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor); 473 case TargetOpcode::G_ATOMICRMW_UMAX: 474 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax); 475 case TargetOpcode::G_ATOMICRMW_UMIN: 476 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin); 477 case TargetOpcode::G_ATOMICRMW_XCHG: 478 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange); 479 case TargetOpcode::G_ATOMIC_CMPXCHG: 480 return selectAtomicCmpXchg(ResVReg, ResType, I); 481 482 case TargetOpcode::G_FENCE: 483 return selectFence(I); 484 485 default: 486 return false; 487 } 488 } 489 490 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 491 const SPIRVType *ResType, 492 MachineInstr &I, 493 CL::OpenCLExtInst CLInst) const { 494 return selectExtInst(ResVReg, ResType, I, 495 {{SPIRV::InstructionSet::OpenCL_std, CLInst}}); 496 } 497 498 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 499 const SPIRVType *ResType, 500 MachineInstr &I, 501 CL::OpenCLExtInst CLInst, 502 GL::GLSLExtInst GLInst) const { 503 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst}, 504 {SPIRV::InstructionSet::GLSL_std_450, GLInst}}; 505 return selectExtInst(ResVReg, ResType, I, ExtInsts); 506 } 507 508 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 509 const SPIRVType *ResType, 510 MachineInstr &I, 511 const ExtInstList &Insts) const { 512 513 for (const auto &Ex : Insts) { 514 SPIRV::InstructionSet::InstructionSet Set = Ex.first; 515 uint32_t Opcode = Ex.second; 516 if (STI.canUseExtInstSet(Set)) { 517 MachineBasicBlock &BB = *I.getParent(); 518 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) 519 .addDef(ResVReg) 520 .addUse(GR.getSPIRVTypeID(ResType)) 521 .addImm(static_cast<uint32_t>(Set)) 522 .addImm(Opcode); 523 const unsigned NumOps = I.getNumOperands(); 524 for (unsigned i = 1; i < NumOps; ++i) 525 MIB.add(I.getOperand(i)); 526 return MIB.constrainAllUses(TII, TRI, RBI); 527 } 528 } 529 return false; 530 } 531 532 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg, 533 const SPIRVType *ResType, 534 MachineInstr &I, 535 Register SrcReg, 536 unsigned Opcode) const { 537 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 538 .addDef(ResVReg) 539 .addUse(GR.getSPIRVTypeID(ResType)) 540 .addUse(SrcReg) 541 .constrainAllUses(TII, TRI, RBI); 542 } 543 544 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg, 545 const SPIRVType *ResType, 546 MachineInstr &I, 547 unsigned Opcode) const { 548 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(), 549 Opcode); 550 } 551 552 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) { 553 switch (Ord) { 554 case SyncScope::SingleThread: 555 return SPIRV::Scope::Invocation; 556 case SyncScope::System: 557 return SPIRV::Scope::Device; 558 default: 559 llvm_unreachable("Unsupported synchronization Scope ID."); 560 } 561 } 562 563 static void addMemoryOperands(MachineMemOperand *MemOp, 564 MachineInstrBuilder &MIB) { 565 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 566 if (MemOp->isVolatile()) 567 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 568 if (MemOp->isNonTemporal()) 569 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 570 if (MemOp->getAlign().value()) 571 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned); 572 573 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) { 574 MIB.addImm(SpvMemOp); 575 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned)) 576 MIB.addImm(MemOp->getAlign().value()); 577 } 578 } 579 580 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) { 581 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 582 if (Flags & MachineMemOperand::Flags::MOVolatile) 583 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 584 if (Flags & MachineMemOperand::Flags::MONonTemporal) 585 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 586 587 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) 588 MIB.addImm(SpvMemOp); 589 } 590 591 bool SPIRVInstructionSelector::selectLoad(Register ResVReg, 592 const SPIRVType *ResType, 593 MachineInstr &I) const { 594 unsigned OpOffset = 595 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0; 596 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 597 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) 598 .addDef(ResVReg) 599 .addUse(GR.getSPIRVTypeID(ResType)) 600 .addUse(Ptr); 601 if (!I.getNumMemOperands()) { 602 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 603 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 604 } else { 605 addMemoryOperands(*I.memoperands_begin(), MIB); 606 } 607 return MIB.constrainAllUses(TII, TRI, RBI); 608 } 609 610 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const { 611 unsigned OpOffset = 612 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0; 613 Register StoreVal = I.getOperand(0 + OpOffset).getReg(); 614 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 615 MachineBasicBlock &BB = *I.getParent(); 616 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore)) 617 .addUse(Ptr) 618 .addUse(StoreVal); 619 if (!I.getNumMemOperands()) { 620 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 621 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 622 } else { 623 addMemoryOperands(*I.memoperands_begin(), MIB); 624 } 625 return MIB.constrainAllUses(TII, TRI, RBI); 626 } 627 628 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, 629 MachineInstr &I) const { 630 MachineBasicBlock &BB = *I.getParent(); 631 Register SrcReg = I.getOperand(1).getReg(); 632 if (I.getOpcode() == TargetOpcode::G_MEMSET) { 633 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); 634 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); 635 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); 636 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); 637 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII); 638 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII); 639 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( 640 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant); 641 // TODO: check if we have such GV, add init, use buildGlobalVariable. 642 Type *LLVMArrTy = ArrayType::get( 643 IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num); 644 GlobalVariable *GV = 645 new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage); 646 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 647 GR.add(GV, GR.CurMF, VarReg); 648 649 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); 650 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) 651 .addDef(VarReg) 652 .addUse(GR.getSPIRVTypeID(VarTy)) 653 .addImm(SPIRV::StorageClass::UniformConstant) 654 .addUse(Const) 655 .constrainAllUses(TII, TRI, RBI); 656 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( 657 ValTy, I, TII, SPIRV::StorageClass::UniformConstant); 658 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 659 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast); 660 } 661 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized)) 662 .addUse(I.getOperand(0).getReg()) 663 .addUse(SrcReg) 664 .addUse(I.getOperand(2).getReg()); 665 if (I.getNumMemOperands()) 666 addMemoryOperands(*I.memoperands_begin(), MIB); 667 bool Result = MIB.constrainAllUses(TII, TRI, RBI); 668 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) 669 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) 670 .addUse(MIB->getOperand(0).getReg()); 671 return Result; 672 } 673 674 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg, 675 const SPIRVType *ResType, 676 MachineInstr &I, 677 unsigned NewOpcode) const { 678 assert(I.hasOneMemOperand()); 679 const MachineMemOperand *MemOp = *I.memoperands_begin(); 680 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 681 Register ScopeReg = buildI32Constant(Scope, I); 682 683 Register Ptr = I.getOperand(1).getReg(); 684 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll 685 // auto ScSem = 686 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)); 687 AtomicOrdering AO = MemOp->getSuccessOrdering(); 688 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 689 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I); 690 691 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) 692 .addDef(ResVReg) 693 .addUse(GR.getSPIRVTypeID(ResType)) 694 .addUse(Ptr) 695 .addUse(ScopeReg) 696 .addUse(MemSemReg) 697 .addUse(I.getOperand(2).getReg()) 698 .constrainAllUses(TII, TRI, RBI); 699 } 700 701 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const { 702 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm()); 703 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 704 Register MemSemReg = buildI32Constant(MemSem, I); 705 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm()); 706 uint32_t Scope = static_cast<uint32_t>(getScope(Ord)); 707 Register ScopeReg = buildI32Constant(Scope, I); 708 MachineBasicBlock &BB = *I.getParent(); 709 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier)) 710 .addUse(ScopeReg) 711 .addUse(MemSemReg) 712 .constrainAllUses(TII, TRI, RBI); 713 } 714 715 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, 716 const SPIRVType *ResType, 717 MachineInstr &I) const { 718 Register ScopeReg; 719 Register MemSemEqReg; 720 Register MemSemNeqReg; 721 Register Ptr = I.getOperand(2).getReg(); 722 if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) { 723 assert(I.hasOneMemOperand()); 724 const MachineMemOperand *MemOp = *I.memoperands_begin(); 725 unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 726 ScopeReg = buildI32Constant(Scope, I); 727 728 unsigned ScSem = static_cast<uint32_t>( 729 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr))); 730 AtomicOrdering AO = MemOp->getSuccessOrdering(); 731 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem; 732 MemSemEqReg = buildI32Constant(MemSemEq, I); 733 AtomicOrdering FO = MemOp->getFailureOrdering(); 734 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem; 735 MemSemNeqReg = 736 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I); 737 } else { 738 ScopeReg = I.getOperand(5).getReg(); 739 MemSemEqReg = I.getOperand(6).getReg(); 740 MemSemNeqReg = I.getOperand(7).getReg(); 741 } 742 743 Register Cmp = I.getOperand(3).getReg(); 744 Register Val = I.getOperand(4).getReg(); 745 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val); 746 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass); 747 const DebugLoc &DL = I.getDebugLoc(); 748 bool Result = 749 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange)) 750 .addDef(ACmpRes) 751 .addUse(GR.getSPIRVTypeID(SpvValTy)) 752 .addUse(Ptr) 753 .addUse(ScopeReg) 754 .addUse(MemSemEqReg) 755 .addUse(MemSemNeqReg) 756 .addUse(Val) 757 .addUse(Cmp) 758 .constrainAllUses(TII, TRI, RBI); 759 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 760 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII); 761 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual)) 762 .addDef(CmpSuccReg) 763 .addUse(GR.getSPIRVTypeID(BoolTy)) 764 .addUse(ACmpRes) 765 .addUse(Cmp) 766 .constrainAllUses(TII, TRI, RBI); 767 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 768 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) 769 .addDef(TmpReg) 770 .addUse(GR.getSPIRVTypeID(ResType)) 771 .addUse(ACmpRes) 772 .addUse(GR.getOrCreateUndef(I, ResType, TII)) 773 .addImm(0) 774 .constrainAllUses(TII, TRI, RBI); 775 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) 776 .addDef(ResVReg) 777 .addUse(GR.getSPIRVTypeID(ResType)) 778 .addUse(CmpSuccReg) 779 .addUse(TmpReg) 780 .addImm(1) 781 .constrainAllUses(TII, TRI, RBI); 782 return Result; 783 } 784 785 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) { 786 switch (SC) { 787 case SPIRV::StorageClass::Workgroup: 788 case SPIRV::StorageClass::CrossWorkgroup: 789 case SPIRV::StorageClass::Function: 790 return true; 791 default: 792 return false; 793 } 794 } 795 796 // In SPIR-V address space casting can only happen to and from the Generic 797 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function 798 // pointers to and from Generic pointers. As such, we can convert e.g. from 799 // Workgroup to Function by going via a Generic pointer as an intermediary. All 800 // other combinations can only be done by a bitcast, and are probably not safe. 801 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, 802 const SPIRVType *ResType, 803 MachineInstr &I) const { 804 // If the AddrSpaceCast user is single and in OpConstantComposite or 805 // OpVariable, we should select OpSpecConstantOp. 806 auto UIs = MRI->use_instructions(ResVReg); 807 if (!UIs.empty() && ++UIs.begin() == UIs.end() && 808 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite || 809 UIs.begin()->getOpcode() == SPIRV::OpVariable || 810 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) { 811 Register NewReg = I.getOperand(1).getReg(); 812 MachineBasicBlock &BB = *I.getParent(); 813 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); 814 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII, 815 SPIRV::StorageClass::Generic); 816 bool Result = 817 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) 818 .addDef(ResVReg) 819 .addUse(GR.getSPIRVTypeID(ResType)) 820 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)) 821 .addUse(NewReg) 822 .constrainAllUses(TII, TRI, RBI); 823 return Result; 824 } 825 Register SrcPtr = I.getOperand(1).getReg(); 826 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr); 827 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr); 828 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg); 829 830 // Casting from an eligable pointer to Generic. 831 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)) 832 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric); 833 // Casting from Generic to an eligable pointer. 834 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC)) 835 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr); 836 // Casting between 2 eligable pointers using Generic as an intermediary. 837 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) { 838 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass); 839 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( 840 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic); 841 MachineBasicBlock &BB = *I.getParent(); 842 const DebugLoc &DL = I.getDebugLoc(); 843 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) 844 .addDef(Tmp) 845 .addUse(GR.getSPIRVTypeID(GenericPtrTy)) 846 .addUse(SrcPtr) 847 .constrainAllUses(TII, TRI, RBI); 848 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) 849 .addDef(ResVReg) 850 .addUse(GR.getSPIRVTypeID(ResType)) 851 .addUse(Tmp) 852 .constrainAllUses(TII, TRI, RBI); 853 } 854 // TODO Should this case just be disallowed completely? 855 // We're casting 2 other arbitrary address spaces, so have to bitcast. 856 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 857 } 858 859 static unsigned getFCmpOpcode(unsigned PredNum) { 860 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 861 switch (Pred) { 862 case CmpInst::FCMP_OEQ: 863 return SPIRV::OpFOrdEqual; 864 case CmpInst::FCMP_OGE: 865 return SPIRV::OpFOrdGreaterThanEqual; 866 case CmpInst::FCMP_OGT: 867 return SPIRV::OpFOrdGreaterThan; 868 case CmpInst::FCMP_OLE: 869 return SPIRV::OpFOrdLessThanEqual; 870 case CmpInst::FCMP_OLT: 871 return SPIRV::OpFOrdLessThan; 872 case CmpInst::FCMP_ONE: 873 return SPIRV::OpFOrdNotEqual; 874 case CmpInst::FCMP_ORD: 875 return SPIRV::OpOrdered; 876 case CmpInst::FCMP_UEQ: 877 return SPIRV::OpFUnordEqual; 878 case CmpInst::FCMP_UGE: 879 return SPIRV::OpFUnordGreaterThanEqual; 880 case CmpInst::FCMP_UGT: 881 return SPIRV::OpFUnordGreaterThan; 882 case CmpInst::FCMP_ULE: 883 return SPIRV::OpFUnordLessThanEqual; 884 case CmpInst::FCMP_ULT: 885 return SPIRV::OpFUnordLessThan; 886 case CmpInst::FCMP_UNE: 887 return SPIRV::OpFUnordNotEqual; 888 case CmpInst::FCMP_UNO: 889 return SPIRV::OpUnordered; 890 default: 891 llvm_unreachable("Unknown predicate type for FCmp"); 892 } 893 } 894 895 static unsigned getICmpOpcode(unsigned PredNum) { 896 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 897 switch (Pred) { 898 case CmpInst::ICMP_EQ: 899 return SPIRV::OpIEqual; 900 case CmpInst::ICMP_NE: 901 return SPIRV::OpINotEqual; 902 case CmpInst::ICMP_SGE: 903 return SPIRV::OpSGreaterThanEqual; 904 case CmpInst::ICMP_SGT: 905 return SPIRV::OpSGreaterThan; 906 case CmpInst::ICMP_SLE: 907 return SPIRV::OpSLessThanEqual; 908 case CmpInst::ICMP_SLT: 909 return SPIRV::OpSLessThan; 910 case CmpInst::ICMP_UGE: 911 return SPIRV::OpUGreaterThanEqual; 912 case CmpInst::ICMP_UGT: 913 return SPIRV::OpUGreaterThan; 914 case CmpInst::ICMP_ULE: 915 return SPIRV::OpULessThanEqual; 916 case CmpInst::ICMP_ULT: 917 return SPIRV::OpULessThan; 918 default: 919 llvm_unreachable("Unknown predicate type for ICmp"); 920 } 921 } 922 923 static unsigned getPtrCmpOpcode(unsigned Pred) { 924 switch (static_cast<CmpInst::Predicate>(Pred)) { 925 case CmpInst::ICMP_EQ: 926 return SPIRV::OpPtrEqual; 927 case CmpInst::ICMP_NE: 928 return SPIRV::OpPtrNotEqual; 929 default: 930 llvm_unreachable("Unknown predicate type for pointer comparison"); 931 } 932 } 933 934 // Return the logical operation, or abort if none exists. 935 static unsigned getBoolCmpOpcode(unsigned PredNum) { 936 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 937 switch (Pred) { 938 case CmpInst::ICMP_EQ: 939 return SPIRV::OpLogicalEqual; 940 case CmpInst::ICMP_NE: 941 return SPIRV::OpLogicalNotEqual; 942 default: 943 llvm_unreachable("Unknown predicate type for Bool comparison"); 944 } 945 } 946 947 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg, 948 const SPIRVType *ResType, 949 MachineInstr &I) const { 950 MachineBasicBlock &BB = *I.getParent(); 951 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse)) 952 .addDef(ResVReg) 953 .addUse(GR.getSPIRVTypeID(ResType)) 954 .addUse(I.getOperand(1).getReg()) 955 .constrainAllUses(TII, TRI, RBI); 956 } 957 958 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg, 959 const SPIRVType *ResType, 960 MachineInstr &I) const { 961 // TODO: only const case is supported for now. 962 assert(std::all_of( 963 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) { 964 if (MO.isDef()) 965 return true; 966 if (!MO.isReg()) 967 return false; 968 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg()); 969 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE && 970 ConstTy->getOperand(1).isReg()); 971 Register ConstReg = ConstTy->getOperand(1).getReg(); 972 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg); 973 assert(Const); 974 return (Const->getOpcode() == TargetOpcode::G_CONSTANT || 975 Const->getOpcode() == TargetOpcode::G_FCONSTANT); 976 })); 977 978 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), 979 TII.get(SPIRV::OpConstantComposite)) 980 .addDef(ResVReg) 981 .addUse(GR.getSPIRVTypeID(ResType)); 982 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i) 983 MIB.addUse(I.getOperand(i).getReg()); 984 return MIB.constrainAllUses(TII, TRI, RBI); 985 } 986 987 bool SPIRVInstructionSelector::selectCmp(Register ResVReg, 988 const SPIRVType *ResType, 989 unsigned CmpOpc, 990 MachineInstr &I) const { 991 Register Cmp0 = I.getOperand(2).getReg(); 992 Register Cmp1 = I.getOperand(3).getReg(); 993 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() == 994 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() && 995 "CMP operands should have the same type"); 996 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc)) 997 .addDef(ResVReg) 998 .addUse(GR.getSPIRVTypeID(ResType)) 999 .addUse(Cmp0) 1000 .addUse(Cmp1) 1001 .constrainAllUses(TII, TRI, RBI); 1002 } 1003 1004 bool SPIRVInstructionSelector::selectICmp(Register ResVReg, 1005 const SPIRVType *ResType, 1006 MachineInstr &I) const { 1007 auto Pred = I.getOperand(1).getPredicate(); 1008 unsigned CmpOpc; 1009 1010 Register CmpOperand = I.getOperand(2).getReg(); 1011 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer)) 1012 CmpOpc = getPtrCmpOpcode(Pred); 1013 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool)) 1014 CmpOpc = getBoolCmpOpcode(Pred); 1015 else 1016 CmpOpc = getICmpOpcode(Pred); 1017 return selectCmp(ResVReg, ResType, CmpOpc, I); 1018 } 1019 1020 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB, 1021 const MachineInstr &I, 1022 int OpIdx) const { 1023 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 && 1024 "Expected G_FCONSTANT"); 1025 const ConstantFP *FPImm = I.getOperand(1).getFPImm(); 1026 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB); 1027 } 1028 1029 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB, 1030 const MachineInstr &I, 1031 int OpIdx) const { 1032 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 1033 "Expected G_CONSTANT"); 1034 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB); 1035 } 1036 1037 Register 1038 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, 1039 const SPIRVType *ResType) const { 1040 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32); 1041 const SPIRVType *SpvI32Ty = 1042 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII); 1043 // Find a constant in DT or build a new one. 1044 auto ConstInt = ConstantInt::get(LLVMTy, Val); 1045 Register NewReg = GR.find(ConstInt, GR.CurMF); 1046 if (!NewReg.isValid()) { 1047 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 1048 GR.add(ConstInt, GR.CurMF, NewReg); 1049 MachineInstr *MI; 1050 MachineBasicBlock &BB = *I.getParent(); 1051 if (Val == 0) { 1052 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1053 .addDef(NewReg) 1054 .addUse(GR.getSPIRVTypeID(SpvI32Ty)); 1055 } else { 1056 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 1057 .addDef(NewReg) 1058 .addUse(GR.getSPIRVTypeID(SpvI32Ty)) 1059 .addImm(APInt(32, Val).getZExtValue()); 1060 } 1061 constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 1062 } 1063 return NewReg; 1064 } 1065 1066 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg, 1067 const SPIRVType *ResType, 1068 MachineInstr &I) const { 1069 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate()); 1070 return selectCmp(ResVReg, ResType, CmpOp, I); 1071 } 1072 1073 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType, 1074 MachineInstr &I) const { 1075 if (ResType->getOpcode() == SPIRV::OpTypeVector) 1076 return GR.getOrCreateConsIntVector(0, I, ResType, TII); 1077 return GR.getOrCreateConstInt(0, I, ResType, TII); 1078 } 1079 1080 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes, 1081 const SPIRVType *ResType, 1082 MachineInstr &I) const { 1083 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 1084 APInt One = 1085 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0); 1086 if (ResType->getOpcode() == SPIRV::OpTypeVector) 1087 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII); 1088 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII); 1089 } 1090 1091 bool SPIRVInstructionSelector::selectSelect(Register ResVReg, 1092 const SPIRVType *ResType, 1093 MachineInstr &I, 1094 bool IsSigned) const { 1095 // To extend a bool, we need to use OpSelect between constants. 1096 Register ZeroReg = buildZerosVal(ResType, I); 1097 Register OneReg = buildOnesVal(IsSigned, ResType, I); 1098 bool IsScalarBool = 1099 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool); 1100 unsigned Opcode = 1101 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond; 1102 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 1103 .addDef(ResVReg) 1104 .addUse(GR.getSPIRVTypeID(ResType)) 1105 .addUse(I.getOperand(1).getReg()) 1106 .addUse(OneReg) 1107 .addUse(ZeroReg) 1108 .constrainAllUses(TII, TRI, RBI); 1109 } 1110 1111 bool SPIRVInstructionSelector::selectIToF(Register ResVReg, 1112 const SPIRVType *ResType, 1113 MachineInstr &I, bool IsSigned, 1114 unsigned Opcode) const { 1115 Register SrcReg = I.getOperand(1).getReg(); 1116 // We can convert bool value directly to float type without OpConvert*ToF, 1117 // however the translator generates OpSelect+OpConvert*ToF, so we do the same. 1118 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) { 1119 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 1120 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII); 1121 if (ResType->getOpcode() == SPIRV::OpTypeVector) { 1122 const unsigned NumElts = ResType->getOperand(2).getImm(); 1123 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII); 1124 } 1125 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 1126 selectSelect(SrcReg, TmpType, I, false); 1127 } 1128 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode); 1129 } 1130 1131 bool SPIRVInstructionSelector::selectExt(Register ResVReg, 1132 const SPIRVType *ResType, 1133 MachineInstr &I, bool IsSigned) const { 1134 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) 1135 return selectSelect(ResVReg, ResType, I, IsSigned); 1136 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 1137 return selectUnOp(ResVReg, ResType, I, Opcode); 1138 } 1139 1140 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg, 1141 Register ResVReg, 1142 MachineInstr &I, 1143 const SPIRVType *IntTy, 1144 const SPIRVType *BoolTy) const { 1145 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero. 1146 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 1147 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector; 1148 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS; 1149 Register Zero = buildZerosVal(IntTy, I); 1150 Register One = buildOnesVal(false, IntTy, I); 1151 MachineBasicBlock &BB = *I.getParent(); 1152 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 1153 .addDef(BitIntReg) 1154 .addUse(GR.getSPIRVTypeID(IntTy)) 1155 .addUse(IntReg) 1156 .addUse(One) 1157 .constrainAllUses(TII, TRI, RBI); 1158 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) 1159 .addDef(ResVReg) 1160 .addUse(GR.getSPIRVTypeID(BoolTy)) 1161 .addUse(BitIntReg) 1162 .addUse(Zero) 1163 .constrainAllUses(TII, TRI, RBI); 1164 } 1165 1166 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg, 1167 const SPIRVType *ResType, 1168 MachineInstr &I) const { 1169 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) { 1170 Register IntReg = I.getOperand(1).getReg(); 1171 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg); 1172 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType); 1173 } 1174 bool IsSigned = GR.isScalarOrVectorSigned(ResType); 1175 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 1176 return selectUnOp(ResVReg, ResType, I, Opcode); 1177 } 1178 1179 bool SPIRVInstructionSelector::selectConst(Register ResVReg, 1180 const SPIRVType *ResType, 1181 const APInt &Imm, 1182 MachineInstr &I) const { 1183 unsigned TyOpcode = ResType->getOpcode(); 1184 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero()); 1185 MachineBasicBlock &BB = *I.getParent(); 1186 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) && 1187 Imm.isZero()) 1188 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1189 .addDef(ResVReg) 1190 .addUse(GR.getSPIRVTypeID(ResType)) 1191 .constrainAllUses(TII, TRI, RBI); 1192 if (TyOpcode == SPIRV::OpTypeInt) { 1193 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!"); 1194 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII); 1195 if (Reg == ResVReg) 1196 return true; 1197 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) 1198 .addDef(ResVReg) 1199 .addUse(Reg) 1200 .constrainAllUses(TII, TRI, RBI); 1201 } 1202 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 1203 .addDef(ResVReg) 1204 .addUse(GR.getSPIRVTypeID(ResType)); 1205 // <=32-bit integers should be caught by the sdag pattern. 1206 assert(Imm.getBitWidth() > 32); 1207 addNumImm(Imm, MIB); 1208 return MIB.constrainAllUses(TII, TRI, RBI); 1209 } 1210 1211 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg, 1212 const SPIRVType *ResType, 1213 MachineInstr &I) const { 1214 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) 1215 .addDef(ResVReg) 1216 .addUse(GR.getSPIRVTypeID(ResType)) 1217 .constrainAllUses(TII, TRI, RBI); 1218 } 1219 1220 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 1221 assert(MO.isReg()); 1222 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 1223 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE) 1224 return false; 1225 assert(TypeInst->getOperand(1).isReg()); 1226 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 1227 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT; 1228 } 1229 1230 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 1231 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 1232 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 1233 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT); 1234 return ImmInst->getOperand(1).getCImm()->getZExtValue(); 1235 } 1236 1237 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg, 1238 const SPIRVType *ResType, 1239 MachineInstr &I) const { 1240 MachineBasicBlock &BB = *I.getParent(); 1241 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert)) 1242 .addDef(ResVReg) 1243 .addUse(GR.getSPIRVTypeID(ResType)) 1244 // object to insert 1245 .addUse(I.getOperand(3).getReg()) 1246 // composite to insert into 1247 .addUse(I.getOperand(2).getReg()); 1248 for (unsigned i = 4; i < I.getNumOperands(); i++) 1249 MIB.addImm(foldImm(I.getOperand(i), MRI)); 1250 return MIB.constrainAllUses(TII, TRI, RBI); 1251 } 1252 1253 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg, 1254 const SPIRVType *ResType, 1255 MachineInstr &I) const { 1256 MachineBasicBlock &BB = *I.getParent(); 1257 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract)) 1258 .addDef(ResVReg) 1259 .addUse(GR.getSPIRVTypeID(ResType)) 1260 .addUse(I.getOperand(2).getReg()); 1261 for (unsigned i = 3; i < I.getNumOperands(); i++) 1262 MIB.addImm(foldImm(I.getOperand(i), MRI)); 1263 return MIB.constrainAllUses(TII, TRI, RBI); 1264 } 1265 1266 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg, 1267 const SPIRVType *ResType, 1268 MachineInstr &I) const { 1269 if (isImm(I.getOperand(4), MRI)) 1270 return selectInsertVal(ResVReg, ResType, I); 1271 MachineBasicBlock &BB = *I.getParent(); 1272 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic)) 1273 .addDef(ResVReg) 1274 .addUse(GR.getSPIRVTypeID(ResType)) 1275 .addUse(I.getOperand(2).getReg()) 1276 .addUse(I.getOperand(3).getReg()) 1277 .addUse(I.getOperand(4).getReg()) 1278 .constrainAllUses(TII, TRI, RBI); 1279 } 1280 1281 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg, 1282 const SPIRVType *ResType, 1283 MachineInstr &I) const { 1284 if (isImm(I.getOperand(3), MRI)) 1285 return selectExtractVal(ResVReg, ResType, I); 1286 MachineBasicBlock &BB = *I.getParent(); 1287 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic)) 1288 .addDef(ResVReg) 1289 .addUse(GR.getSPIRVTypeID(ResType)) 1290 .addUse(I.getOperand(2).getReg()) 1291 .addUse(I.getOperand(3).getReg()) 1292 .constrainAllUses(TII, TRI, RBI); 1293 } 1294 1295 bool SPIRVInstructionSelector::selectGEP(Register ResVReg, 1296 const SPIRVType *ResType, 1297 MachineInstr &I) const { 1298 // In general we should also support OpAccessChain instrs here (i.e. not 1299 // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so 1300 // do we to stay compliant with its test and more importantly consumers. 1301 unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain 1302 : SPIRV::OpPtrAccessChain; 1303 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 1304 .addDef(ResVReg) 1305 .addUse(GR.getSPIRVTypeID(ResType)) 1306 // Object to get a pointer to. 1307 .addUse(I.getOperand(3).getReg()); 1308 // Adding indices. 1309 for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i) 1310 Res.addUse(I.getOperand(i).getReg()); 1311 return Res.constrainAllUses(TII, TRI, RBI); 1312 } 1313 1314 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, 1315 const SPIRVType *ResType, 1316 MachineInstr &I) const { 1317 MachineBasicBlock &BB = *I.getParent(); 1318 switch (cast<GIntrinsic>(I).getIntrinsicID()) { 1319 case Intrinsic::spv_load: 1320 return selectLoad(ResVReg, ResType, I); 1321 case Intrinsic::spv_store: 1322 return selectStore(I); 1323 case Intrinsic::spv_extractv: 1324 return selectExtractVal(ResVReg, ResType, I); 1325 case Intrinsic::spv_insertv: 1326 return selectInsertVal(ResVReg, ResType, I); 1327 case Intrinsic::spv_extractelt: 1328 return selectExtractElt(ResVReg, ResType, I); 1329 case Intrinsic::spv_insertelt: 1330 return selectInsertElt(ResVReg, ResType, I); 1331 case Intrinsic::spv_gep: 1332 return selectGEP(ResVReg, ResType, I); 1333 case Intrinsic::spv_unref_global: 1334 case Intrinsic::spv_init_global: { 1335 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg()); 1336 MachineInstr *Init = I.getNumExplicitOperands() > 2 1337 ? MRI->getVRegDef(I.getOperand(2).getReg()) 1338 : nullptr; 1339 assert(MI); 1340 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init); 1341 } 1342 case Intrinsic::spv_undef: { 1343 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) 1344 .addDef(ResVReg) 1345 .addUse(GR.getSPIRVTypeID(ResType)); 1346 return MIB.constrainAllUses(TII, TRI, RBI); 1347 } 1348 case Intrinsic::spv_const_composite: { 1349 // If no values are attached, the composite is null constant. 1350 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands(); 1351 unsigned Opcode = 1352 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite; 1353 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 1354 .addDef(ResVReg) 1355 .addUse(GR.getSPIRVTypeID(ResType)); 1356 // skip type MD node we already used when generated assign.type for this 1357 if (!IsNull) { 1358 for (unsigned i = I.getNumExplicitDefs() + 1; 1359 i < I.getNumExplicitOperands(); ++i) { 1360 MIB.addUse(I.getOperand(i).getReg()); 1361 } 1362 } 1363 return MIB.constrainAllUses(TII, TRI, RBI); 1364 } 1365 case Intrinsic::spv_assign_name: { 1366 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName)); 1367 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg()); 1368 for (unsigned i = I.getNumExplicitDefs() + 2; 1369 i < I.getNumExplicitOperands(); ++i) { 1370 MIB.addImm(I.getOperand(i).getImm()); 1371 } 1372 return MIB.constrainAllUses(TII, TRI, RBI); 1373 } 1374 case Intrinsic::spv_switch: { 1375 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch)); 1376 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) { 1377 if (I.getOperand(i).isReg()) 1378 MIB.addReg(I.getOperand(i).getReg()); 1379 else if (I.getOperand(i).isCImm()) 1380 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB); 1381 else if (I.getOperand(i).isMBB()) 1382 MIB.addMBB(I.getOperand(i).getMBB()); 1383 else 1384 llvm_unreachable("Unexpected OpSwitch operand"); 1385 } 1386 return MIB.constrainAllUses(TII, TRI, RBI); 1387 } 1388 case Intrinsic::spv_cmpxchg: 1389 return selectAtomicCmpXchg(ResVReg, ResType, I); 1390 case Intrinsic::spv_unreachable: 1391 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable)); 1392 break; 1393 case Intrinsic::spv_alloca: 1394 return selectFrameIndex(ResVReg, ResType, I); 1395 default: 1396 llvm_unreachable("Intrinsic selection not implemented"); 1397 } 1398 return true; 1399 } 1400 1401 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg, 1402 const SPIRVType *ResType, 1403 MachineInstr &I) const { 1404 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) 1405 .addDef(ResVReg) 1406 .addUse(GR.getSPIRVTypeID(ResType)) 1407 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function)) 1408 .constrainAllUses(TII, TRI, RBI); 1409 } 1410 1411 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const { 1412 // InstructionSelector walks backwards through the instructions. We can use 1413 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR 1414 // first, so can generate an OpBranchConditional here. If there is no 1415 // G_BRCOND, we just use OpBranch for a regular unconditional branch. 1416 const MachineInstr *PrevI = I.getPrevNode(); 1417 MachineBasicBlock &MBB = *I.getParent(); 1418 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) { 1419 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1420 .addUse(PrevI->getOperand(0).getReg()) 1421 .addMBB(PrevI->getOperand(1).getMBB()) 1422 .addMBB(I.getOperand(0).getMBB()) 1423 .constrainAllUses(TII, TRI, RBI); 1424 } 1425 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch)) 1426 .addMBB(I.getOperand(0).getMBB()) 1427 .constrainAllUses(TII, TRI, RBI); 1428 } 1429 1430 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const { 1431 // InstructionSelector walks backwards through the instructions. For an 1432 // explicit conditional branch with no fallthrough, we use both a G_BR and a 1433 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and 1434 // generate the OpBranchConditional in selectBranch above. 1435 // 1436 // If an OpBranchConditional has been generated, we simply return, as the work 1437 // is alread done. If there is no OpBranchConditional, LLVM must be relying on 1438 // implicit fallthrough to the next basic block, so we need to create an 1439 // OpBranchConditional with an explicit "false" argument pointing to the next 1440 // basic block that LLVM would fall through to. 1441 const MachineInstr *NextI = I.getNextNode(); 1442 // Check if this has already been successfully selected. 1443 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional) 1444 return true; 1445 // Must be relying on implicit block fallthrough, so generate an 1446 // OpBranchConditional with the "next" basic block as the "false" target. 1447 MachineBasicBlock &MBB = *I.getParent(); 1448 unsigned NextMBBNum = MBB.getNextNode()->getNumber(); 1449 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum); 1450 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1451 .addUse(I.getOperand(0).getReg()) 1452 .addMBB(I.getOperand(1).getMBB()) 1453 .addMBB(NextMBB) 1454 .constrainAllUses(TII, TRI, RBI); 1455 } 1456 1457 bool SPIRVInstructionSelector::selectPhi(Register ResVReg, 1458 const SPIRVType *ResType, 1459 MachineInstr &I) const { 1460 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi)) 1461 .addDef(ResVReg) 1462 .addUse(GR.getSPIRVTypeID(ResType)); 1463 const unsigned NumOps = I.getNumOperands(); 1464 for (unsigned i = 1; i < NumOps; i += 2) { 1465 MIB.addUse(I.getOperand(i + 0).getReg()); 1466 MIB.addMBB(I.getOperand(i + 1).getMBB()); 1467 } 1468 return MIB.constrainAllUses(TII, TRI, RBI); 1469 } 1470 1471 bool SPIRVInstructionSelector::selectGlobalValue( 1472 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const { 1473 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI. 1474 MachineIRBuilder MIRBuilder(I); 1475 const GlobalValue *GV = I.getOperand(1).getGlobal(); 1476 SPIRVType *ResType = GR.getOrCreateSPIRVType( 1477 GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false); 1478 1479 std::string GlobalIdent = GV->getGlobalIdentifier(); 1480 // We have functions as operands in tests with blocks of instruction e.g. in 1481 // transcoding/global_block.ll. These operands are not used and should be 1482 // substituted by zero constants. Their type is expected to be always 1483 // OpTypePointer Function %uchar. 1484 if (isa<Function>(GV)) { 1485 const Constant *ConstVal = GV; 1486 MachineBasicBlock &BB = *I.getParent(); 1487 Register NewReg = GR.find(ConstVal, GR.CurMF); 1488 if (!NewReg.isValid()) { 1489 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); 1490 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII); 1491 Register NewReg = ResVReg; 1492 GR.add(ConstVal, GR.CurMF, NewReg); 1493 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1494 .addDef(NewReg) 1495 .addUse(GR.getSPIRVTypeID(ResType)) 1496 .constrainAllUses(TII, TRI, RBI); 1497 } 1498 assert(NewReg != ResVReg); 1499 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) 1500 .addDef(ResVReg) 1501 .addUse(NewReg) 1502 .constrainAllUses(TII, TRI, RBI); 1503 } 1504 auto GlobalVar = cast<GlobalVariable>(GV); 1505 assert(GlobalVar->getName() != "llvm.global.annotations"); 1506 1507 bool HasInit = GlobalVar->hasInitializer() && 1508 !isa<UndefValue>(GlobalVar->getInitializer()); 1509 // Skip empty declaration for GVs with initilaizers till we get the decl with 1510 // passed initializer. 1511 if (HasInit && !Init) 1512 return true; 1513 1514 unsigned AddrSpace = GV->getAddressSpace(); 1515 SPIRV::StorageClass::StorageClass Storage = 1516 addressSpaceToStorageClass(AddrSpace); 1517 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage && 1518 Storage != SPIRV::StorageClass::Function; 1519 SPIRV::LinkageType::LinkageType LnkType = 1520 (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) 1521 ? SPIRV::LinkageType::Import 1522 : SPIRV::LinkageType::Export; 1523 1524 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV, 1525 Storage, Init, GlobalVar->isConstant(), 1526 HasLnkTy, LnkType, MIRBuilder, true); 1527 return Reg.isValid(); 1528 } 1529 1530 namespace llvm { 1531 InstructionSelector * 1532 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, 1533 const SPIRVSubtarget &Subtarget, 1534 const RegisterBankInfo &RBI) { 1535 return new SPIRVInstructionSelector(TM, Subtarget, RBI); 1536 } 1537 } // namespace llvm 1538