1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the targeting of the InstructionSelector class for 10 // SPIRV. 11 // TODO: This should be generated by TableGen. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MCTargetDesc/SPIRVMCTargetDesc.h" 16 #include "SPIRV.h" 17 #include "SPIRVGlobalRegistry.h" 18 #include "SPIRVInstrInfo.h" 19 #include "SPIRVRegisterBankInfo.h" 20 #include "SPIRVRegisterInfo.h" 21 #include "SPIRVTargetMachine.h" 22 #include "SPIRVUtils.h" 23 #include "llvm/ADT/APFloat.h" 24 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" 25 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 26 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/IR/IntrinsicsSPIRV.h" 30 #include "llvm/Support/Debug.h" 31 32 #define DEBUG_TYPE "spirv-isel" 33 34 using namespace llvm; 35 namespace CL = SPIRV::OpenCLExtInst; 36 namespace GL = SPIRV::GLSLExtInst; 37 38 using ExtInstList = 39 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>; 40 41 namespace { 42 43 #define GET_GLOBALISEL_PREDICATE_BITSET 44 #include "SPIRVGenGlobalISel.inc" 45 #undef GET_GLOBALISEL_PREDICATE_BITSET 46 47 class SPIRVInstructionSelector : public InstructionSelector { 48 const SPIRVSubtarget &STI; 49 const SPIRVInstrInfo &TII; 50 const SPIRVRegisterInfo &TRI; 51 const RegisterBankInfo &RBI; 52 SPIRVGlobalRegistry &GR; 53 MachineRegisterInfo *MRI; 54 55 public: 56 SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 57 const SPIRVSubtarget &ST, 58 const RegisterBankInfo &RBI); 59 void setupMF(MachineFunction &MF, GISelKnownBits *KB, 60 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI, 61 BlockFrequencyInfo *BFI) override; 62 // Common selection code. Instruction-specific selection occurs in spvSelect. 63 bool select(MachineInstr &I) override; 64 static const char *getName() { return DEBUG_TYPE; } 65 66 #define GET_GLOBALISEL_PREDICATES_DECL 67 #include "SPIRVGenGlobalISel.inc" 68 #undef GET_GLOBALISEL_PREDICATES_DECL 69 70 #define GET_GLOBALISEL_TEMPORARIES_DECL 71 #include "SPIRVGenGlobalISel.inc" 72 #undef GET_GLOBALISEL_TEMPORARIES_DECL 73 74 private: 75 // tblgen-erated 'select' implementation, used as the initial selector for 76 // the patterns that don't require complex C++. 77 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 78 79 // All instruction-specific selection that didn't happen in "select()". 80 // Is basically a large Switch/Case delegating to all other select method. 81 bool spvSelect(Register ResVReg, const SPIRVType *ResType, 82 MachineInstr &I) const; 83 84 bool selectGlobalValue(Register ResVReg, MachineInstr &I, 85 const MachineInstr *Init = nullptr) const; 86 87 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType, 88 MachineInstr &I, Register SrcReg, 89 unsigned Opcode) const; 90 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 91 unsigned Opcode) const; 92 93 bool selectLoad(Register ResVReg, const SPIRVType *ResType, 94 MachineInstr &I) const; 95 bool selectStore(MachineInstr &I) const; 96 97 bool selectMemOperation(Register ResVReg, MachineInstr &I) const; 98 99 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType, 100 MachineInstr &I, unsigned NewOpcode) const; 101 102 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType, 103 MachineInstr &I) const; 104 105 bool selectFence(MachineInstr &I) const; 106 107 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType, 108 MachineInstr &I) const; 109 110 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType, 111 MachineInstr &I) const; 112 113 bool selectConstVector(Register ResVReg, const SPIRVType *ResType, 114 MachineInstr &I) const; 115 116 bool selectCmp(Register ResVReg, const SPIRVType *ResType, 117 unsigned comparisonOpcode, MachineInstr &I) const; 118 119 bool selectICmp(Register ResVReg, const SPIRVType *ResType, 120 MachineInstr &I) const; 121 bool selectFCmp(Register ResVReg, const SPIRVType *ResType, 122 MachineInstr &I) const; 123 124 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 125 int OpIdx) const; 126 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I, 127 int OpIdx) const; 128 129 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm, 130 MachineInstr &I) const; 131 132 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 133 bool IsSigned) const; 134 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 135 bool IsSigned, unsigned Opcode) const; 136 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, 137 bool IsSigned) const; 138 139 bool selectTrunc(Register ResVReg, const SPIRVType *ResType, 140 MachineInstr &I) const; 141 142 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I, 143 const SPIRVType *intTy, const SPIRVType *boolTy) const; 144 145 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType, 146 MachineInstr &I) const; 147 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType, 148 MachineInstr &I) const; 149 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType, 150 MachineInstr &I) const; 151 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType, 152 MachineInstr &I) const; 153 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType, 154 MachineInstr &I) const; 155 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType, 156 MachineInstr &I) const; 157 bool selectGEP(Register ResVReg, const SPIRVType *ResType, 158 MachineInstr &I) const; 159 160 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType, 161 MachineInstr &I) const; 162 163 bool selectBranch(MachineInstr &I) const; 164 bool selectBranchCond(MachineInstr &I) const; 165 166 bool selectPhi(Register ResVReg, const SPIRVType *ResType, 167 MachineInstr &I) const; 168 169 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 170 MachineInstr &I, CL::OpenCLExtInst CLInst) const; 171 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 172 MachineInstr &I, CL::OpenCLExtInst CLInst, 173 GL::GLSLExtInst GLInst) const; 174 bool selectExtInst(Register ResVReg, const SPIRVType *ResType, 175 MachineInstr &I, const ExtInstList &ExtInsts) const; 176 177 Register buildI32Constant(uint32_t Val, MachineInstr &I, 178 const SPIRVType *ResType = nullptr) const; 179 180 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const; 181 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType, 182 MachineInstr &I) const; 183 }; 184 185 } // end anonymous namespace 186 187 #define GET_GLOBALISEL_IMPL 188 #include "SPIRVGenGlobalISel.inc" 189 #undef GET_GLOBALISEL_IMPL 190 191 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM, 192 const SPIRVSubtarget &ST, 193 const RegisterBankInfo &RBI) 194 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()), 195 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()), 196 #define GET_GLOBALISEL_PREDICATES_INIT 197 #include "SPIRVGenGlobalISel.inc" 198 #undef GET_GLOBALISEL_PREDICATES_INIT 199 #define GET_GLOBALISEL_TEMPORARIES_INIT 200 #include "SPIRVGenGlobalISel.inc" 201 #undef GET_GLOBALISEL_TEMPORARIES_INIT 202 { 203 } 204 205 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 206 CodeGenCoverage *CoverageInfo, 207 ProfileSummaryInfo *PSI, 208 BlockFrequencyInfo *BFI) { 209 MRI = &MF.getRegInfo(); 210 GR.setCurrentFunc(MF); 211 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 212 } 213 214 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI); 215 216 // Defined in SPIRVLegalizerInfo.cpp. 217 extern bool isTypeFoldingSupported(unsigned Opcode); 218 219 bool SPIRVInstructionSelector::select(MachineInstr &I) { 220 assert(I.getParent() && "Instruction should be in a basic block!"); 221 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 222 223 Register Opcode = I.getOpcode(); 224 // If it's not a GMIR instruction, we've selected it already. 225 if (!isPreISelGenericOpcode(Opcode)) { 226 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more. 227 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg()); 228 if (isTypeFoldingSupported(Def->getOpcode())) { 229 auto Res = selectImpl(I, *CoverageInfo); 230 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT); 231 if (Res) 232 return Res; 233 } 234 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg()); 235 I.removeFromParent(); 236 return true; 237 } else if (I.getNumDefs() == 1) { 238 // Make all vregs 32 bits (for SPIR-V IDs). 239 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32)); 240 } 241 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 242 } 243 244 if (I.getNumOperands() != I.getNumExplicitOperands()) { 245 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n"); 246 return false; 247 } 248 249 // Common code for getting return reg+type, and removing selected instr 250 // from parent occurs here. Instr-specific selection happens in spvSelect(). 251 bool HasDefs = I.getNumDefs() > 0; 252 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0); 253 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr; 254 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); 255 if (spvSelect(ResVReg, ResType, I)) { 256 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs). 257 MRI->setType(ResVReg, LLT::scalar(32)); 258 I.removeFromParent(); 259 return true; 260 } 261 return false; 262 } 263 264 bool SPIRVInstructionSelector::spvSelect(Register ResVReg, 265 const SPIRVType *ResType, 266 MachineInstr &I) const { 267 assert(!isTypeFoldingSupported(I.getOpcode()) || 268 I.getOpcode() == TargetOpcode::G_CONSTANT); 269 const unsigned Opcode = I.getOpcode(); 270 switch (Opcode) { 271 case TargetOpcode::G_CONSTANT: 272 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(), 273 I); 274 case TargetOpcode::G_GLOBAL_VALUE: 275 return selectGlobalValue(ResVReg, I); 276 case TargetOpcode::G_IMPLICIT_DEF: 277 return selectOpUndef(ResVReg, ResType, I); 278 279 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 280 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: 281 return selectIntrinsic(ResVReg, ResType, I); 282 case TargetOpcode::G_BITREVERSE: 283 return selectBitreverse(ResVReg, ResType, I); 284 285 case TargetOpcode::G_BUILD_VECTOR: 286 return selectConstVector(ResVReg, ResType, I); 287 288 case TargetOpcode::G_SHUFFLE_VECTOR: { 289 MachineBasicBlock &BB = *I.getParent(); 290 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle)) 291 .addDef(ResVReg) 292 .addUse(GR.getSPIRVTypeID(ResType)) 293 .addUse(I.getOperand(1).getReg()) 294 .addUse(I.getOperand(2).getReg()); 295 for (auto V : I.getOperand(3).getShuffleMask()) 296 MIB.addImm(V); 297 return MIB.constrainAllUses(TII, TRI, RBI); 298 } 299 case TargetOpcode::G_MEMMOVE: 300 case TargetOpcode::G_MEMCPY: 301 case TargetOpcode::G_MEMSET: 302 return selectMemOperation(ResVReg, I); 303 304 case TargetOpcode::G_ICMP: 305 return selectICmp(ResVReg, ResType, I); 306 case TargetOpcode::G_FCMP: 307 return selectFCmp(ResVReg, ResType, I); 308 309 case TargetOpcode::G_FRAME_INDEX: 310 return selectFrameIndex(ResVReg, ResType, I); 311 312 case TargetOpcode::G_LOAD: 313 return selectLoad(ResVReg, ResType, I); 314 case TargetOpcode::G_STORE: 315 return selectStore(I); 316 317 case TargetOpcode::G_BR: 318 return selectBranch(I); 319 case TargetOpcode::G_BRCOND: 320 return selectBranchCond(I); 321 322 case TargetOpcode::G_PHI: 323 return selectPhi(ResVReg, ResType, I); 324 325 case TargetOpcode::G_FPTOSI: 326 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS); 327 case TargetOpcode::G_FPTOUI: 328 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU); 329 330 case TargetOpcode::G_SITOFP: 331 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF); 332 case TargetOpcode::G_UITOFP: 333 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF); 334 335 case TargetOpcode::G_CTPOP: 336 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount); 337 case TargetOpcode::G_SMIN: 338 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin); 339 case TargetOpcode::G_UMIN: 340 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin); 341 342 case TargetOpcode::G_SMAX: 343 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax); 344 case TargetOpcode::G_UMAX: 345 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax); 346 347 case TargetOpcode::G_FMA: 348 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma); 349 350 case TargetOpcode::G_FPOW: 351 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow); 352 case TargetOpcode::G_FPOWI: 353 return selectExtInst(ResVReg, ResType, I, CL::pown); 354 355 case TargetOpcode::G_FEXP: 356 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp); 357 case TargetOpcode::G_FEXP2: 358 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2); 359 360 case TargetOpcode::G_FLOG: 361 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log); 362 case TargetOpcode::G_FLOG2: 363 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2); 364 case TargetOpcode::G_FLOG10: 365 return selectExtInst(ResVReg, ResType, I, CL::log10); 366 367 case TargetOpcode::G_FABS: 368 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs); 369 case TargetOpcode::G_ABS: 370 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs); 371 372 case TargetOpcode::G_FMINNUM: 373 case TargetOpcode::G_FMINIMUM: 374 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin); 375 case TargetOpcode::G_FMAXNUM: 376 case TargetOpcode::G_FMAXIMUM: 377 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax); 378 379 case TargetOpcode::G_FCOPYSIGN: 380 return selectExtInst(ResVReg, ResType, I, CL::copysign); 381 382 case TargetOpcode::G_FCEIL: 383 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil); 384 case TargetOpcode::G_FFLOOR: 385 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor); 386 387 case TargetOpcode::G_FCOS: 388 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos); 389 case TargetOpcode::G_FSIN: 390 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin); 391 392 case TargetOpcode::G_FSQRT: 393 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt); 394 395 case TargetOpcode::G_CTTZ: 396 case TargetOpcode::G_CTTZ_ZERO_UNDEF: 397 return selectExtInst(ResVReg, ResType, I, CL::ctz); 398 case TargetOpcode::G_CTLZ: 399 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 400 return selectExtInst(ResVReg, ResType, I, CL::clz); 401 402 case TargetOpcode::G_INTRINSIC_ROUND: 403 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round); 404 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: 405 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven); 406 case TargetOpcode::G_INTRINSIC_TRUNC: 407 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc); 408 case TargetOpcode::G_FRINT: 409 case TargetOpcode::G_FNEARBYINT: 410 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven); 411 412 case TargetOpcode::G_SMULH: 413 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi); 414 case TargetOpcode::G_UMULH: 415 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi); 416 417 case TargetOpcode::G_SEXT: 418 return selectExt(ResVReg, ResType, I, true); 419 case TargetOpcode::G_ANYEXT: 420 case TargetOpcode::G_ZEXT: 421 return selectExt(ResVReg, ResType, I, false); 422 case TargetOpcode::G_TRUNC: 423 return selectTrunc(ResVReg, ResType, I); 424 case TargetOpcode::G_FPTRUNC: 425 case TargetOpcode::G_FPEXT: 426 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert); 427 428 case TargetOpcode::G_PTRTOINT: 429 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU); 430 case TargetOpcode::G_INTTOPTR: 431 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr); 432 case TargetOpcode::G_BITCAST: 433 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 434 case TargetOpcode::G_ADDRSPACE_CAST: 435 return selectAddrSpaceCast(ResVReg, ResType, I); 436 case TargetOpcode::G_PTR_ADD: { 437 // Currently, we get G_PTR_ADD only as a result of translating 438 // global variables, initialized with constant expressions like GV + Const 439 // (see test opencl/basic/progvar_prog_scope_init.ll). 440 // TODO: extend the handler once we have other cases. 441 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); 442 Register GV = I.getOperand(1).getReg(); 443 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV); 444 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE || 445 (*II).getOpcode() == TargetOpcode::COPY || 446 (*II).getOpcode() == SPIRV::OpVariable) && 447 isImm(I.getOperand(2), MRI)); 448 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I); 449 MachineBasicBlock &BB = *I.getParent(); 450 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) 451 .addDef(ResVReg) 452 .addUse(GR.getSPIRVTypeID(ResType)) 453 .addImm(static_cast<uint32_t>( 454 SPIRV::Opcode::InBoundsPtrAccessChain)) 455 .addUse(GV) 456 .addUse(Idx) 457 .addUse(I.getOperand(2).getReg()); 458 return MIB.constrainAllUses(TII, TRI, RBI); 459 } 460 461 case TargetOpcode::G_ATOMICRMW_OR: 462 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr); 463 case TargetOpcode::G_ATOMICRMW_ADD: 464 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd); 465 case TargetOpcode::G_ATOMICRMW_AND: 466 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd); 467 case TargetOpcode::G_ATOMICRMW_MAX: 468 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax); 469 case TargetOpcode::G_ATOMICRMW_MIN: 470 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin); 471 case TargetOpcode::G_ATOMICRMW_SUB: 472 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub); 473 case TargetOpcode::G_ATOMICRMW_XOR: 474 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor); 475 case TargetOpcode::G_ATOMICRMW_UMAX: 476 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax); 477 case TargetOpcode::G_ATOMICRMW_UMIN: 478 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin); 479 case TargetOpcode::G_ATOMICRMW_XCHG: 480 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange); 481 case TargetOpcode::G_ATOMIC_CMPXCHG: 482 return selectAtomicCmpXchg(ResVReg, ResType, I); 483 484 case TargetOpcode::G_FENCE: 485 return selectFence(I); 486 487 default: 488 return false; 489 } 490 } 491 492 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 493 const SPIRVType *ResType, 494 MachineInstr &I, 495 CL::OpenCLExtInst CLInst) const { 496 return selectExtInst(ResVReg, ResType, I, 497 {{SPIRV::InstructionSet::OpenCL_std, CLInst}}); 498 } 499 500 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 501 const SPIRVType *ResType, 502 MachineInstr &I, 503 CL::OpenCLExtInst CLInst, 504 GL::GLSLExtInst GLInst) const { 505 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst}, 506 {SPIRV::InstructionSet::GLSL_std_450, GLInst}}; 507 return selectExtInst(ResVReg, ResType, I, ExtInsts); 508 } 509 510 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, 511 const SPIRVType *ResType, 512 MachineInstr &I, 513 const ExtInstList &Insts) const { 514 515 for (const auto &Ex : Insts) { 516 SPIRV::InstructionSet::InstructionSet Set = Ex.first; 517 uint32_t Opcode = Ex.second; 518 if (STI.canUseExtInstSet(Set)) { 519 MachineBasicBlock &BB = *I.getParent(); 520 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) 521 .addDef(ResVReg) 522 .addUse(GR.getSPIRVTypeID(ResType)) 523 .addImm(static_cast<uint32_t>(Set)) 524 .addImm(Opcode); 525 const unsigned NumOps = I.getNumOperands(); 526 for (unsigned i = 1; i < NumOps; ++i) 527 MIB.add(I.getOperand(i)); 528 return MIB.constrainAllUses(TII, TRI, RBI); 529 } 530 } 531 return false; 532 } 533 534 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg, 535 const SPIRVType *ResType, 536 MachineInstr &I, 537 Register SrcReg, 538 unsigned Opcode) const { 539 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 540 .addDef(ResVReg) 541 .addUse(GR.getSPIRVTypeID(ResType)) 542 .addUse(SrcReg) 543 .constrainAllUses(TII, TRI, RBI); 544 } 545 546 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg, 547 const SPIRVType *ResType, 548 MachineInstr &I, 549 unsigned Opcode) const { 550 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(), 551 Opcode); 552 } 553 554 static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) { 555 switch (Ord) { 556 case SyncScope::SingleThread: 557 return SPIRV::Scope::Invocation; 558 case SyncScope::System: 559 return SPIRV::Scope::Device; 560 default: 561 llvm_unreachable("Unsupported synchronization Scope ID."); 562 } 563 } 564 565 static void addMemoryOperands(MachineMemOperand *MemOp, 566 MachineInstrBuilder &MIB) { 567 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 568 if (MemOp->isVolatile()) 569 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 570 if (MemOp->isNonTemporal()) 571 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 572 if (MemOp->getAlign().value()) 573 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned); 574 575 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) { 576 MIB.addImm(SpvMemOp); 577 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned)) 578 MIB.addImm(MemOp->getAlign().value()); 579 } 580 } 581 582 static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) { 583 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None); 584 if (Flags & MachineMemOperand::Flags::MOVolatile) 585 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile); 586 if (Flags & MachineMemOperand::Flags::MONonTemporal) 587 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal); 588 589 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) 590 MIB.addImm(SpvMemOp); 591 } 592 593 bool SPIRVInstructionSelector::selectLoad(Register ResVReg, 594 const SPIRVType *ResType, 595 MachineInstr &I) const { 596 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0; 597 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 598 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) 599 .addDef(ResVReg) 600 .addUse(GR.getSPIRVTypeID(ResType)) 601 .addUse(Ptr); 602 if (!I.getNumMemOperands()) { 603 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS || 604 I.getOpcode() == 605 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS); 606 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 607 } else { 608 addMemoryOperands(*I.memoperands_begin(), MIB); 609 } 610 return MIB.constrainAllUses(TII, TRI, RBI); 611 } 612 613 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const { 614 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0; 615 Register StoreVal = I.getOperand(0 + OpOffset).getReg(); 616 Register Ptr = I.getOperand(1 + OpOffset).getReg(); 617 MachineBasicBlock &BB = *I.getParent(); 618 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore)) 619 .addUse(Ptr) 620 .addUse(StoreVal); 621 if (!I.getNumMemOperands()) { 622 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS || 623 I.getOpcode() == 624 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS); 625 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB); 626 } else { 627 addMemoryOperands(*I.memoperands_begin(), MIB); 628 } 629 return MIB.constrainAllUses(TII, TRI, RBI); 630 } 631 632 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, 633 MachineInstr &I) const { 634 MachineBasicBlock &BB = *I.getParent(); 635 Register SrcReg = I.getOperand(1).getReg(); 636 if (I.getOpcode() == TargetOpcode::G_MEMSET) { 637 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); 638 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); 639 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); 640 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); 641 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII); 642 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII); 643 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( 644 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant); 645 // TODO: check if we have such GV, add init, use buildGlobalVariable. 646 Type *LLVMArrTy = ArrayType::get( 647 IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num); 648 GlobalVariable *GV = 649 new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage); 650 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 651 GR.add(GV, GR.CurMF, VarReg); 652 653 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); 654 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) 655 .addDef(VarReg) 656 .addUse(GR.getSPIRVTypeID(VarTy)) 657 .addImm(SPIRV::StorageClass::UniformConstant) 658 .addUse(Const) 659 .constrainAllUses(TII, TRI, RBI); 660 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( 661 ValTy, I, TII, SPIRV::StorageClass::UniformConstant); 662 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 663 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast); 664 } 665 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized)) 666 .addUse(I.getOperand(0).getReg()) 667 .addUse(SrcReg) 668 .addUse(I.getOperand(2).getReg()); 669 if (I.getNumMemOperands()) 670 addMemoryOperands(*I.memoperands_begin(), MIB); 671 bool Result = MIB.constrainAllUses(TII, TRI, RBI); 672 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) 673 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) 674 .addUse(MIB->getOperand(0).getReg()); 675 return Result; 676 } 677 678 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg, 679 const SPIRVType *ResType, 680 MachineInstr &I, 681 unsigned NewOpcode) const { 682 assert(I.hasOneMemOperand()); 683 const MachineMemOperand *MemOp = *I.memoperands_begin(); 684 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 685 Register ScopeReg = buildI32Constant(Scope, I); 686 687 Register Ptr = I.getOperand(1).getReg(); 688 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll 689 // auto ScSem = 690 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)); 691 AtomicOrdering AO = MemOp->getSuccessOrdering(); 692 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 693 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I); 694 695 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) 696 .addDef(ResVReg) 697 .addUse(GR.getSPIRVTypeID(ResType)) 698 .addUse(Ptr) 699 .addUse(ScopeReg) 700 .addUse(MemSemReg) 701 .addUse(I.getOperand(2).getReg()) 702 .constrainAllUses(TII, TRI, RBI); 703 } 704 705 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const { 706 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm()); 707 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO)); 708 Register MemSemReg = buildI32Constant(MemSem, I); 709 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm()); 710 uint32_t Scope = static_cast<uint32_t>(getScope(Ord)); 711 Register ScopeReg = buildI32Constant(Scope, I); 712 MachineBasicBlock &BB = *I.getParent(); 713 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier)) 714 .addUse(ScopeReg) 715 .addUse(MemSemReg) 716 .constrainAllUses(TII, TRI, RBI); 717 } 718 719 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, 720 const SPIRVType *ResType, 721 MachineInstr &I) const { 722 Register ScopeReg; 723 Register MemSemEqReg; 724 Register MemSemNeqReg; 725 Register Ptr = I.getOperand(2).getReg(); 726 if (!isa<GIntrinsic>(I)) { 727 assert(I.hasOneMemOperand()); 728 const MachineMemOperand *MemOp = *I.memoperands_begin(); 729 unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID())); 730 ScopeReg = buildI32Constant(Scope, I); 731 732 unsigned ScSem = static_cast<uint32_t>( 733 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr))); 734 AtomicOrdering AO = MemOp->getSuccessOrdering(); 735 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem; 736 MemSemEqReg = buildI32Constant(MemSemEq, I); 737 AtomicOrdering FO = MemOp->getFailureOrdering(); 738 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem; 739 MemSemNeqReg = 740 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I); 741 } else { 742 ScopeReg = I.getOperand(5).getReg(); 743 MemSemEqReg = I.getOperand(6).getReg(); 744 MemSemNeqReg = I.getOperand(7).getReg(); 745 } 746 747 Register Cmp = I.getOperand(3).getReg(); 748 Register Val = I.getOperand(4).getReg(); 749 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val); 750 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass); 751 const DebugLoc &DL = I.getDebugLoc(); 752 bool Result = 753 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange)) 754 .addDef(ACmpRes) 755 .addUse(GR.getSPIRVTypeID(SpvValTy)) 756 .addUse(Ptr) 757 .addUse(ScopeReg) 758 .addUse(MemSemEqReg) 759 .addUse(MemSemNeqReg) 760 .addUse(Val) 761 .addUse(Cmp) 762 .constrainAllUses(TII, TRI, RBI); 763 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 764 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII); 765 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual)) 766 .addDef(CmpSuccReg) 767 .addUse(GR.getSPIRVTypeID(BoolTy)) 768 .addUse(ACmpRes) 769 .addUse(Cmp) 770 .constrainAllUses(TII, TRI, RBI); 771 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 772 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) 773 .addDef(TmpReg) 774 .addUse(GR.getSPIRVTypeID(ResType)) 775 .addUse(ACmpRes) 776 .addUse(GR.getOrCreateUndef(I, ResType, TII)) 777 .addImm(0) 778 .constrainAllUses(TII, TRI, RBI); 779 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) 780 .addDef(ResVReg) 781 .addUse(GR.getSPIRVTypeID(ResType)) 782 .addUse(CmpSuccReg) 783 .addUse(TmpReg) 784 .addImm(1) 785 .constrainAllUses(TII, TRI, RBI); 786 return Result; 787 } 788 789 static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) { 790 switch (SC) { 791 case SPIRV::StorageClass::Workgroup: 792 case SPIRV::StorageClass::CrossWorkgroup: 793 case SPIRV::StorageClass::Function: 794 return true; 795 default: 796 return false; 797 } 798 } 799 800 // In SPIR-V address space casting can only happen to and from the Generic 801 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function 802 // pointers to and from Generic pointers. As such, we can convert e.g. from 803 // Workgroup to Function by going via a Generic pointer as an intermediary. All 804 // other combinations can only be done by a bitcast, and are probably not safe. 805 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, 806 const SPIRVType *ResType, 807 MachineInstr &I) const { 808 // If the AddrSpaceCast user is single and in OpConstantComposite or 809 // OpVariable, we should select OpSpecConstantOp. 810 auto UIs = MRI->use_instructions(ResVReg); 811 if (!UIs.empty() && ++UIs.begin() == UIs.end() && 812 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite || 813 UIs.begin()->getOpcode() == SPIRV::OpVariable || 814 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) { 815 Register NewReg = I.getOperand(1).getReg(); 816 MachineBasicBlock &BB = *I.getParent(); 817 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); 818 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII, 819 SPIRV::StorageClass::Generic); 820 bool Result = 821 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) 822 .addDef(ResVReg) 823 .addUse(GR.getSPIRVTypeID(ResType)) 824 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)) 825 .addUse(NewReg) 826 .constrainAllUses(TII, TRI, RBI); 827 return Result; 828 } 829 Register SrcPtr = I.getOperand(1).getReg(); 830 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr); 831 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr); 832 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg); 833 834 // Casting from an eligable pointer to Generic. 835 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)) 836 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric); 837 // Casting from Generic to an eligable pointer. 838 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC)) 839 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr); 840 // Casting between 2 eligable pointers using Generic as an intermediary. 841 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) { 842 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass); 843 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( 844 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic); 845 MachineBasicBlock &BB = *I.getParent(); 846 const DebugLoc &DL = I.getDebugLoc(); 847 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) 848 .addDef(Tmp) 849 .addUse(GR.getSPIRVTypeID(GenericPtrTy)) 850 .addUse(SrcPtr) 851 .constrainAllUses(TII, TRI, RBI); 852 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) 853 .addDef(ResVReg) 854 .addUse(GR.getSPIRVTypeID(ResType)) 855 .addUse(Tmp) 856 .constrainAllUses(TII, TRI, RBI); 857 } 858 // TODO Should this case just be disallowed completely? 859 // We're casting 2 other arbitrary address spaces, so have to bitcast. 860 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast); 861 } 862 863 static unsigned getFCmpOpcode(unsigned PredNum) { 864 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 865 switch (Pred) { 866 case CmpInst::FCMP_OEQ: 867 return SPIRV::OpFOrdEqual; 868 case CmpInst::FCMP_OGE: 869 return SPIRV::OpFOrdGreaterThanEqual; 870 case CmpInst::FCMP_OGT: 871 return SPIRV::OpFOrdGreaterThan; 872 case CmpInst::FCMP_OLE: 873 return SPIRV::OpFOrdLessThanEqual; 874 case CmpInst::FCMP_OLT: 875 return SPIRV::OpFOrdLessThan; 876 case CmpInst::FCMP_ONE: 877 return SPIRV::OpFOrdNotEqual; 878 case CmpInst::FCMP_ORD: 879 return SPIRV::OpOrdered; 880 case CmpInst::FCMP_UEQ: 881 return SPIRV::OpFUnordEqual; 882 case CmpInst::FCMP_UGE: 883 return SPIRV::OpFUnordGreaterThanEqual; 884 case CmpInst::FCMP_UGT: 885 return SPIRV::OpFUnordGreaterThan; 886 case CmpInst::FCMP_ULE: 887 return SPIRV::OpFUnordLessThanEqual; 888 case CmpInst::FCMP_ULT: 889 return SPIRV::OpFUnordLessThan; 890 case CmpInst::FCMP_UNE: 891 return SPIRV::OpFUnordNotEqual; 892 case CmpInst::FCMP_UNO: 893 return SPIRV::OpUnordered; 894 default: 895 llvm_unreachable("Unknown predicate type for FCmp"); 896 } 897 } 898 899 static unsigned getICmpOpcode(unsigned PredNum) { 900 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 901 switch (Pred) { 902 case CmpInst::ICMP_EQ: 903 return SPIRV::OpIEqual; 904 case CmpInst::ICMP_NE: 905 return SPIRV::OpINotEqual; 906 case CmpInst::ICMP_SGE: 907 return SPIRV::OpSGreaterThanEqual; 908 case CmpInst::ICMP_SGT: 909 return SPIRV::OpSGreaterThan; 910 case CmpInst::ICMP_SLE: 911 return SPIRV::OpSLessThanEqual; 912 case CmpInst::ICMP_SLT: 913 return SPIRV::OpSLessThan; 914 case CmpInst::ICMP_UGE: 915 return SPIRV::OpUGreaterThanEqual; 916 case CmpInst::ICMP_UGT: 917 return SPIRV::OpUGreaterThan; 918 case CmpInst::ICMP_ULE: 919 return SPIRV::OpULessThanEqual; 920 case CmpInst::ICMP_ULT: 921 return SPIRV::OpULessThan; 922 default: 923 llvm_unreachable("Unknown predicate type for ICmp"); 924 } 925 } 926 927 static unsigned getPtrCmpOpcode(unsigned Pred) { 928 switch (static_cast<CmpInst::Predicate>(Pred)) { 929 case CmpInst::ICMP_EQ: 930 return SPIRV::OpPtrEqual; 931 case CmpInst::ICMP_NE: 932 return SPIRV::OpPtrNotEqual; 933 default: 934 llvm_unreachable("Unknown predicate type for pointer comparison"); 935 } 936 } 937 938 // Return the logical operation, or abort if none exists. 939 static unsigned getBoolCmpOpcode(unsigned PredNum) { 940 auto Pred = static_cast<CmpInst::Predicate>(PredNum); 941 switch (Pred) { 942 case CmpInst::ICMP_EQ: 943 return SPIRV::OpLogicalEqual; 944 case CmpInst::ICMP_NE: 945 return SPIRV::OpLogicalNotEqual; 946 default: 947 llvm_unreachable("Unknown predicate type for Bool comparison"); 948 } 949 } 950 951 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg, 952 const SPIRVType *ResType, 953 MachineInstr &I) const { 954 MachineBasicBlock &BB = *I.getParent(); 955 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse)) 956 .addDef(ResVReg) 957 .addUse(GR.getSPIRVTypeID(ResType)) 958 .addUse(I.getOperand(1).getReg()) 959 .constrainAllUses(TII, TRI, RBI); 960 } 961 962 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg, 963 const SPIRVType *ResType, 964 MachineInstr &I) const { 965 // TODO: only const case is supported for now. 966 assert(std::all_of( 967 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) { 968 if (MO.isDef()) 969 return true; 970 if (!MO.isReg()) 971 return false; 972 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg()); 973 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE && 974 ConstTy->getOperand(1).isReg()); 975 Register ConstReg = ConstTy->getOperand(1).getReg(); 976 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg); 977 assert(Const); 978 return (Const->getOpcode() == TargetOpcode::G_CONSTANT || 979 Const->getOpcode() == TargetOpcode::G_FCONSTANT); 980 })); 981 982 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), 983 TII.get(SPIRV::OpConstantComposite)) 984 .addDef(ResVReg) 985 .addUse(GR.getSPIRVTypeID(ResType)); 986 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i) 987 MIB.addUse(I.getOperand(i).getReg()); 988 return MIB.constrainAllUses(TII, TRI, RBI); 989 } 990 991 bool SPIRVInstructionSelector::selectCmp(Register ResVReg, 992 const SPIRVType *ResType, 993 unsigned CmpOpc, 994 MachineInstr &I) const { 995 Register Cmp0 = I.getOperand(2).getReg(); 996 Register Cmp1 = I.getOperand(3).getReg(); 997 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() == 998 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() && 999 "CMP operands should have the same type"); 1000 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc)) 1001 .addDef(ResVReg) 1002 .addUse(GR.getSPIRVTypeID(ResType)) 1003 .addUse(Cmp0) 1004 .addUse(Cmp1) 1005 .constrainAllUses(TII, TRI, RBI); 1006 } 1007 1008 bool SPIRVInstructionSelector::selectICmp(Register ResVReg, 1009 const SPIRVType *ResType, 1010 MachineInstr &I) const { 1011 auto Pred = I.getOperand(1).getPredicate(); 1012 unsigned CmpOpc; 1013 1014 Register CmpOperand = I.getOperand(2).getReg(); 1015 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer)) 1016 CmpOpc = getPtrCmpOpcode(Pred); 1017 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool)) 1018 CmpOpc = getBoolCmpOpcode(Pred); 1019 else 1020 CmpOpc = getICmpOpcode(Pred); 1021 return selectCmp(ResVReg, ResType, CmpOpc, I); 1022 } 1023 1024 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB, 1025 const MachineInstr &I, 1026 int OpIdx) const { 1027 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 && 1028 "Expected G_FCONSTANT"); 1029 const ConstantFP *FPImm = I.getOperand(1).getFPImm(); 1030 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB); 1031 } 1032 1033 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB, 1034 const MachineInstr &I, 1035 int OpIdx) const { 1036 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 1037 "Expected G_CONSTANT"); 1038 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB); 1039 } 1040 1041 Register 1042 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, 1043 const SPIRVType *ResType) const { 1044 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32); 1045 const SPIRVType *SpvI32Ty = 1046 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII); 1047 // Find a constant in DT or build a new one. 1048 auto ConstInt = ConstantInt::get(LLVMTy, Val); 1049 Register NewReg = GR.find(ConstInt, GR.CurMF); 1050 if (!NewReg.isValid()) { 1051 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); 1052 GR.add(ConstInt, GR.CurMF, NewReg); 1053 MachineInstr *MI; 1054 MachineBasicBlock &BB = *I.getParent(); 1055 if (Val == 0) { 1056 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1057 .addDef(NewReg) 1058 .addUse(GR.getSPIRVTypeID(SpvI32Ty)); 1059 } else { 1060 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 1061 .addDef(NewReg) 1062 .addUse(GR.getSPIRVTypeID(SpvI32Ty)) 1063 .addImm(APInt(32, Val).getZExtValue()); 1064 } 1065 constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 1066 } 1067 return NewReg; 1068 } 1069 1070 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg, 1071 const SPIRVType *ResType, 1072 MachineInstr &I) const { 1073 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate()); 1074 return selectCmp(ResVReg, ResType, CmpOp, I); 1075 } 1076 1077 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType, 1078 MachineInstr &I) const { 1079 if (ResType->getOpcode() == SPIRV::OpTypeVector) 1080 return GR.getOrCreateConsIntVector(0, I, ResType, TII); 1081 return GR.getOrCreateConstInt(0, I, ResType, TII); 1082 } 1083 1084 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes, 1085 const SPIRVType *ResType, 1086 MachineInstr &I) const { 1087 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 1088 APInt One = 1089 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0); 1090 if (ResType->getOpcode() == SPIRV::OpTypeVector) 1091 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII); 1092 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII); 1093 } 1094 1095 bool SPIRVInstructionSelector::selectSelect(Register ResVReg, 1096 const SPIRVType *ResType, 1097 MachineInstr &I, 1098 bool IsSigned) const { 1099 // To extend a bool, we need to use OpSelect between constants. 1100 Register ZeroReg = buildZerosVal(ResType, I); 1101 Register OneReg = buildOnesVal(IsSigned, ResType, I); 1102 bool IsScalarBool = 1103 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool); 1104 unsigned Opcode = 1105 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond; 1106 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 1107 .addDef(ResVReg) 1108 .addUse(GR.getSPIRVTypeID(ResType)) 1109 .addUse(I.getOperand(1).getReg()) 1110 .addUse(OneReg) 1111 .addUse(ZeroReg) 1112 .constrainAllUses(TII, TRI, RBI); 1113 } 1114 1115 bool SPIRVInstructionSelector::selectIToF(Register ResVReg, 1116 const SPIRVType *ResType, 1117 MachineInstr &I, bool IsSigned, 1118 unsigned Opcode) const { 1119 Register SrcReg = I.getOperand(1).getReg(); 1120 // We can convert bool value directly to float type without OpConvert*ToF, 1121 // however the translator generates OpSelect+OpConvert*ToF, so we do the same. 1122 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) { 1123 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); 1124 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII); 1125 if (ResType->getOpcode() == SPIRV::OpTypeVector) { 1126 const unsigned NumElts = ResType->getOperand(2).getImm(); 1127 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII); 1128 } 1129 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 1130 selectSelect(SrcReg, TmpType, I, false); 1131 } 1132 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode); 1133 } 1134 1135 bool SPIRVInstructionSelector::selectExt(Register ResVReg, 1136 const SPIRVType *ResType, 1137 MachineInstr &I, bool IsSigned) const { 1138 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) 1139 return selectSelect(ResVReg, ResType, I, IsSigned); 1140 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 1141 return selectUnOp(ResVReg, ResType, I, Opcode); 1142 } 1143 1144 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg, 1145 Register ResVReg, 1146 MachineInstr &I, 1147 const SPIRVType *IntTy, 1148 const SPIRVType *BoolTy) const { 1149 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero. 1150 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); 1151 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector; 1152 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS; 1153 Register Zero = buildZerosVal(IntTy, I); 1154 Register One = buildOnesVal(false, IntTy, I); 1155 MachineBasicBlock &BB = *I.getParent(); 1156 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 1157 .addDef(BitIntReg) 1158 .addUse(GR.getSPIRVTypeID(IntTy)) 1159 .addUse(IntReg) 1160 .addUse(One) 1161 .constrainAllUses(TII, TRI, RBI); 1162 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) 1163 .addDef(ResVReg) 1164 .addUse(GR.getSPIRVTypeID(BoolTy)) 1165 .addUse(BitIntReg) 1166 .addUse(Zero) 1167 .constrainAllUses(TII, TRI, RBI); 1168 } 1169 1170 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg, 1171 const SPIRVType *ResType, 1172 MachineInstr &I) const { 1173 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) { 1174 Register IntReg = I.getOperand(1).getReg(); 1175 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg); 1176 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType); 1177 } 1178 bool IsSigned = GR.isScalarOrVectorSigned(ResType); 1179 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; 1180 return selectUnOp(ResVReg, ResType, I, Opcode); 1181 } 1182 1183 bool SPIRVInstructionSelector::selectConst(Register ResVReg, 1184 const SPIRVType *ResType, 1185 const APInt &Imm, 1186 MachineInstr &I) const { 1187 unsigned TyOpcode = ResType->getOpcode(); 1188 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero()); 1189 MachineBasicBlock &BB = *I.getParent(); 1190 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) && 1191 Imm.isZero()) 1192 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1193 .addDef(ResVReg) 1194 .addUse(GR.getSPIRVTypeID(ResType)) 1195 .constrainAllUses(TII, TRI, RBI); 1196 if (TyOpcode == SPIRV::OpTypeInt) { 1197 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!"); 1198 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII); 1199 if (Reg == ResVReg) 1200 return true; 1201 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) 1202 .addDef(ResVReg) 1203 .addUse(Reg) 1204 .constrainAllUses(TII, TRI, RBI); 1205 } 1206 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) 1207 .addDef(ResVReg) 1208 .addUse(GR.getSPIRVTypeID(ResType)); 1209 // <=32-bit integers should be caught by the sdag pattern. 1210 assert(Imm.getBitWidth() > 32); 1211 addNumImm(Imm, MIB); 1212 return MIB.constrainAllUses(TII, TRI, RBI); 1213 } 1214 1215 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg, 1216 const SPIRVType *ResType, 1217 MachineInstr &I) const { 1218 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) 1219 .addDef(ResVReg) 1220 .addUse(GR.getSPIRVTypeID(ResType)) 1221 .constrainAllUses(TII, TRI, RBI); 1222 } 1223 1224 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 1225 assert(MO.isReg()); 1226 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 1227 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE) 1228 return false; 1229 assert(TypeInst->getOperand(1).isReg()); 1230 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 1231 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT; 1232 } 1233 1234 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) { 1235 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg()); 1236 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg()); 1237 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT); 1238 return ImmInst->getOperand(1).getCImm()->getZExtValue(); 1239 } 1240 1241 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg, 1242 const SPIRVType *ResType, 1243 MachineInstr &I) const { 1244 MachineBasicBlock &BB = *I.getParent(); 1245 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert)) 1246 .addDef(ResVReg) 1247 .addUse(GR.getSPIRVTypeID(ResType)) 1248 // object to insert 1249 .addUse(I.getOperand(3).getReg()) 1250 // composite to insert into 1251 .addUse(I.getOperand(2).getReg()); 1252 for (unsigned i = 4; i < I.getNumOperands(); i++) 1253 MIB.addImm(foldImm(I.getOperand(i), MRI)); 1254 return MIB.constrainAllUses(TII, TRI, RBI); 1255 } 1256 1257 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg, 1258 const SPIRVType *ResType, 1259 MachineInstr &I) const { 1260 MachineBasicBlock &BB = *I.getParent(); 1261 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract)) 1262 .addDef(ResVReg) 1263 .addUse(GR.getSPIRVTypeID(ResType)) 1264 .addUse(I.getOperand(2).getReg()); 1265 for (unsigned i = 3; i < I.getNumOperands(); i++) 1266 MIB.addImm(foldImm(I.getOperand(i), MRI)); 1267 return MIB.constrainAllUses(TII, TRI, RBI); 1268 } 1269 1270 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg, 1271 const SPIRVType *ResType, 1272 MachineInstr &I) const { 1273 if (isImm(I.getOperand(4), MRI)) 1274 return selectInsertVal(ResVReg, ResType, I); 1275 MachineBasicBlock &BB = *I.getParent(); 1276 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic)) 1277 .addDef(ResVReg) 1278 .addUse(GR.getSPIRVTypeID(ResType)) 1279 .addUse(I.getOperand(2).getReg()) 1280 .addUse(I.getOperand(3).getReg()) 1281 .addUse(I.getOperand(4).getReg()) 1282 .constrainAllUses(TII, TRI, RBI); 1283 } 1284 1285 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg, 1286 const SPIRVType *ResType, 1287 MachineInstr &I) const { 1288 if (isImm(I.getOperand(3), MRI)) 1289 return selectExtractVal(ResVReg, ResType, I); 1290 MachineBasicBlock &BB = *I.getParent(); 1291 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic)) 1292 .addDef(ResVReg) 1293 .addUse(GR.getSPIRVTypeID(ResType)) 1294 .addUse(I.getOperand(2).getReg()) 1295 .addUse(I.getOperand(3).getReg()) 1296 .constrainAllUses(TII, TRI, RBI); 1297 } 1298 1299 bool SPIRVInstructionSelector::selectGEP(Register ResVReg, 1300 const SPIRVType *ResType, 1301 MachineInstr &I) const { 1302 const bool IsGEPInBounds = I.getOperand(2).getImm(); 1303 1304 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only 1305 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however, 1306 // we have to use Op[InBounds]AccessChain. 1307 const unsigned Opcode = STI.isVulkanEnv() 1308 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain 1309 : SPIRV::OpAccessChain) 1310 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain 1311 : SPIRV::OpPtrAccessChain); 1312 1313 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) 1314 .addDef(ResVReg) 1315 .addUse(GR.getSPIRVTypeID(ResType)) 1316 // Object to get a pointer to. 1317 .addUse(I.getOperand(3).getReg()); 1318 // Adding indices. 1319 const unsigned StartingIndex = 1320 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain) 1321 ? 5 1322 : 4; 1323 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i) 1324 Res.addUse(I.getOperand(i).getReg()); 1325 return Res.constrainAllUses(TII, TRI, RBI); 1326 } 1327 1328 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, 1329 const SPIRVType *ResType, 1330 MachineInstr &I) const { 1331 MachineBasicBlock &BB = *I.getParent(); 1332 switch (cast<GIntrinsic>(I).getIntrinsicID()) { 1333 case Intrinsic::spv_load: 1334 return selectLoad(ResVReg, ResType, I); 1335 case Intrinsic::spv_store: 1336 return selectStore(I); 1337 case Intrinsic::spv_extractv: 1338 return selectExtractVal(ResVReg, ResType, I); 1339 case Intrinsic::spv_insertv: 1340 return selectInsertVal(ResVReg, ResType, I); 1341 case Intrinsic::spv_extractelt: 1342 return selectExtractElt(ResVReg, ResType, I); 1343 case Intrinsic::spv_insertelt: 1344 return selectInsertElt(ResVReg, ResType, I); 1345 case Intrinsic::spv_gep: 1346 return selectGEP(ResVReg, ResType, I); 1347 case Intrinsic::spv_unref_global: 1348 case Intrinsic::spv_init_global: { 1349 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg()); 1350 MachineInstr *Init = I.getNumExplicitOperands() > 2 1351 ? MRI->getVRegDef(I.getOperand(2).getReg()) 1352 : nullptr; 1353 assert(MI); 1354 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init); 1355 } 1356 case Intrinsic::spv_undef: { 1357 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) 1358 .addDef(ResVReg) 1359 .addUse(GR.getSPIRVTypeID(ResType)); 1360 return MIB.constrainAllUses(TII, TRI, RBI); 1361 } 1362 case Intrinsic::spv_const_composite: { 1363 // If no values are attached, the composite is null constant. 1364 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands(); 1365 unsigned Opcode = 1366 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite; 1367 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) 1368 .addDef(ResVReg) 1369 .addUse(GR.getSPIRVTypeID(ResType)); 1370 // skip type MD node we already used when generated assign.type for this 1371 if (!IsNull) { 1372 for (unsigned i = I.getNumExplicitDefs() + 1; 1373 i < I.getNumExplicitOperands(); ++i) { 1374 MIB.addUse(I.getOperand(i).getReg()); 1375 } 1376 } 1377 return MIB.constrainAllUses(TII, TRI, RBI); 1378 } 1379 case Intrinsic::spv_assign_name: { 1380 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName)); 1381 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg()); 1382 for (unsigned i = I.getNumExplicitDefs() + 2; 1383 i < I.getNumExplicitOperands(); ++i) { 1384 MIB.addImm(I.getOperand(i).getImm()); 1385 } 1386 return MIB.constrainAllUses(TII, TRI, RBI); 1387 } 1388 case Intrinsic::spv_switch: { 1389 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch)); 1390 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) { 1391 if (I.getOperand(i).isReg()) 1392 MIB.addReg(I.getOperand(i).getReg()); 1393 else if (I.getOperand(i).isCImm()) 1394 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB); 1395 else if (I.getOperand(i).isMBB()) 1396 MIB.addMBB(I.getOperand(i).getMBB()); 1397 else 1398 llvm_unreachable("Unexpected OpSwitch operand"); 1399 } 1400 return MIB.constrainAllUses(TII, TRI, RBI); 1401 } 1402 case Intrinsic::spv_cmpxchg: 1403 return selectAtomicCmpXchg(ResVReg, ResType, I); 1404 case Intrinsic::spv_unreachable: 1405 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable)); 1406 break; 1407 case Intrinsic::spv_alloca: 1408 return selectFrameIndex(ResVReg, ResType, I); 1409 case Intrinsic::spv_assume: 1410 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR)) 1411 .addUse(I.getOperand(1).getReg()); 1412 break; 1413 case Intrinsic::spv_expect: 1414 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR)) 1415 .addDef(ResVReg) 1416 .addUse(GR.getSPIRVTypeID(ResType)) 1417 .addUse(I.getOperand(2).getReg()) 1418 .addUse(I.getOperand(3).getReg()); 1419 break; 1420 default: 1421 llvm_unreachable("Intrinsic selection not implemented"); 1422 } 1423 return true; 1424 } 1425 1426 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg, 1427 const SPIRVType *ResType, 1428 MachineInstr &I) const { 1429 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) 1430 .addDef(ResVReg) 1431 .addUse(GR.getSPIRVTypeID(ResType)) 1432 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function)) 1433 .constrainAllUses(TII, TRI, RBI); 1434 } 1435 1436 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const { 1437 // InstructionSelector walks backwards through the instructions. We can use 1438 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR 1439 // first, so can generate an OpBranchConditional here. If there is no 1440 // G_BRCOND, we just use OpBranch for a regular unconditional branch. 1441 const MachineInstr *PrevI = I.getPrevNode(); 1442 MachineBasicBlock &MBB = *I.getParent(); 1443 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) { 1444 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1445 .addUse(PrevI->getOperand(0).getReg()) 1446 .addMBB(PrevI->getOperand(1).getMBB()) 1447 .addMBB(I.getOperand(0).getMBB()) 1448 .constrainAllUses(TII, TRI, RBI); 1449 } 1450 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch)) 1451 .addMBB(I.getOperand(0).getMBB()) 1452 .constrainAllUses(TII, TRI, RBI); 1453 } 1454 1455 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const { 1456 // InstructionSelector walks backwards through the instructions. For an 1457 // explicit conditional branch with no fallthrough, we use both a G_BR and a 1458 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and 1459 // generate the OpBranchConditional in selectBranch above. 1460 // 1461 // If an OpBranchConditional has been generated, we simply return, as the work 1462 // is alread done. If there is no OpBranchConditional, LLVM must be relying on 1463 // implicit fallthrough to the next basic block, so we need to create an 1464 // OpBranchConditional with an explicit "false" argument pointing to the next 1465 // basic block that LLVM would fall through to. 1466 const MachineInstr *NextI = I.getNextNode(); 1467 // Check if this has already been successfully selected. 1468 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional) 1469 return true; 1470 // Must be relying on implicit block fallthrough, so generate an 1471 // OpBranchConditional with the "next" basic block as the "false" target. 1472 MachineBasicBlock &MBB = *I.getParent(); 1473 unsigned NextMBBNum = MBB.getNextNode()->getNumber(); 1474 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum); 1475 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional)) 1476 .addUse(I.getOperand(0).getReg()) 1477 .addMBB(I.getOperand(1).getMBB()) 1478 .addMBB(NextMBB) 1479 .constrainAllUses(TII, TRI, RBI); 1480 } 1481 1482 bool SPIRVInstructionSelector::selectPhi(Register ResVReg, 1483 const SPIRVType *ResType, 1484 MachineInstr &I) const { 1485 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi)) 1486 .addDef(ResVReg) 1487 .addUse(GR.getSPIRVTypeID(ResType)); 1488 const unsigned NumOps = I.getNumOperands(); 1489 for (unsigned i = 1; i < NumOps; i += 2) { 1490 MIB.addUse(I.getOperand(i + 0).getReg()); 1491 MIB.addMBB(I.getOperand(i + 1).getMBB()); 1492 } 1493 return MIB.constrainAllUses(TII, TRI, RBI); 1494 } 1495 1496 bool SPIRVInstructionSelector::selectGlobalValue( 1497 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const { 1498 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI. 1499 MachineIRBuilder MIRBuilder(I); 1500 const GlobalValue *GV = I.getOperand(1).getGlobal(); 1501 Type *GVType = GV->getValueType(); 1502 SPIRVType *PointerBaseType; 1503 if (GVType->isArrayTy()) { 1504 SPIRVType *ArrayElementType = 1505 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder, 1506 SPIRV::AccessQualifier::ReadWrite, false); 1507 PointerBaseType = GR.getOrCreateSPIRVArrayType( 1508 ArrayElementType, GVType->getArrayNumElements(), I, TII); 1509 } else { 1510 PointerBaseType = GR.getOrCreateSPIRVType( 1511 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false); 1512 } 1513 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType( 1514 PointerBaseType, I, TII, 1515 addressSpaceToStorageClass(GV->getAddressSpace())); 1516 std::string GlobalIdent = GV->getGlobalIdentifier(); 1517 // We have functions as operands in tests with blocks of instruction e.g. in 1518 // transcoding/global_block.ll. These operands are not used and should be 1519 // substituted by zero constants. Their type is expected to be always 1520 // OpTypePointer Function %uchar. 1521 if (isa<Function>(GV)) { 1522 const Constant *ConstVal = GV; 1523 MachineBasicBlock &BB = *I.getParent(); 1524 Register NewReg = GR.find(ConstVal, GR.CurMF); 1525 if (!NewReg.isValid()) { 1526 Register NewReg = ResVReg; 1527 GR.add(ConstVal, GR.CurMF, NewReg); 1528 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) 1529 .addDef(NewReg) 1530 .addUse(GR.getSPIRVTypeID(ResType)) 1531 .constrainAllUses(TII, TRI, RBI); 1532 } 1533 assert(NewReg != ResVReg); 1534 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) 1535 .addDef(ResVReg) 1536 .addUse(NewReg) 1537 .constrainAllUses(TII, TRI, RBI); 1538 } 1539 auto GlobalVar = cast<GlobalVariable>(GV); 1540 assert(GlobalVar->getName() != "llvm.global.annotations"); 1541 1542 bool HasInit = GlobalVar->hasInitializer() && 1543 !isa<UndefValue>(GlobalVar->getInitializer()); 1544 // Skip empty declaration for GVs with initilaizers till we get the decl with 1545 // passed initializer. 1546 if (HasInit && !Init) 1547 return true; 1548 1549 unsigned AddrSpace = GV->getAddressSpace(); 1550 SPIRV::StorageClass::StorageClass Storage = 1551 addressSpaceToStorageClass(AddrSpace); 1552 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage && 1553 Storage != SPIRV::StorageClass::Function; 1554 SPIRV::LinkageType::LinkageType LnkType = 1555 (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) 1556 ? SPIRV::LinkageType::Import 1557 : SPIRV::LinkageType::Export; 1558 1559 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV, 1560 Storage, Init, GlobalVar->isConstant(), 1561 HasLnkTy, LnkType, MIRBuilder, true); 1562 return Reg.isValid(); 1563 } 1564 1565 namespace llvm { 1566 InstructionSelector * 1567 createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, 1568 const SPIRVSubtarget &Subtarget, 1569 const RegisterBankInfo &RBI) { 1570 return new SPIRVInstructionSelector(TM, Subtarget, RBI); 1571 } 1572 } // namespace llvm 1573