1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass lowers the pseudo control flow instructions to real 12 /// machine instructions. 13 /// 14 /// All control flow is handled using predicated instructions and 15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17 /// by writting to the 64-bit EXEC register (each bit corresponds to a 18 /// single vector ALU). Typically, for predicates, a vector ALU will write 19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the 21 /// EXEC to update the predicates. 22 /// 23 /// For example: 24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25 /// %SGPR0 = SI_IF %VCC 26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27 /// %SGPR0 = SI_ELSE %SGPR0 28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29 /// SI_END_CF %SGPR0 30 /// 31 /// becomes: 32 /// 33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional 36 /// // optimization which allows us to 37 /// // branch if all the bits of 38 /// // EXEC are zero. 39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40 /// 41 /// label0: 42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44 /// S_BRANCH_EXECZ label1 // Use our branch optimization 45 /// // instruction again. 46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47 /// label1: 48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49 //===----------------------------------------------------------------------===// 50 51 #include "AMDGPU.h" 52 #include "AMDGPUSubtarget.h" 53 #include "SIInstrInfo.h" 54 #include "SIMachineFunctionInfo.h" 55 #include "llvm/CodeGen/MachineFrameInfo.h" 56 #include "llvm/CodeGen/MachineFunction.h" 57 #include "llvm/CodeGen/MachineFunctionPass.h" 58 #include "llvm/CodeGen/MachineInstrBuilder.h" 59 #include "llvm/CodeGen/MachineRegisterInfo.h" 60 #include "llvm/IR/Constants.h" 61 62 using namespace llvm; 63 64 namespace { 65 66 class SILowerControlFlowPass : public MachineFunctionPass { 67 68 private: 69 static const unsigned SkipThreshold = 12; 70 71 static char ID; 72 const SIRegisterInfo *TRI; 73 const SIInstrInfo *TII; 74 75 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 76 77 void Skip(MachineInstr &From, MachineOperand &To); 78 void SkipIfDead(MachineInstr &MI); 79 80 void If(MachineInstr &MI); 81 void Else(MachineInstr &MI); 82 void Break(MachineInstr &MI); 83 void IfBreak(MachineInstr &MI); 84 void ElseBreak(MachineInstr &MI); 85 void Loop(MachineInstr &MI); 86 void EndCf(MachineInstr &MI); 87 88 void Kill(MachineInstr &MI); 89 void Branch(MachineInstr &MI); 90 91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0); 92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset); 93 void IndirectSrc(MachineInstr &MI); 94 void IndirectDst(MachineInstr &MI); 95 96 public: 97 SILowerControlFlowPass(TargetMachine &tm) : 98 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 99 100 bool runOnMachineFunction(MachineFunction &MF) override; 101 102 const char *getPassName() const override { 103 return "SI Lower control flow instructions"; 104 } 105 106 void getAnalysisUsage(AnalysisUsage &AU) const override { 107 AU.setPreservesCFG(); 108 MachineFunctionPass::getAnalysisUsage(AU); 109 } 110 }; 111 112 } // End anonymous namespace 113 114 char SILowerControlFlowPass::ID = 0; 115 116 FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) { 117 return new SILowerControlFlowPass(tm); 118 } 119 120 bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From, 121 MachineBasicBlock *To) { 122 123 unsigned NumInstr = 0; 124 125 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty(); 126 MBB = *MBB->succ_begin()) { 127 128 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 129 NumInstr < SkipThreshold && I != E; ++I) { 130 131 if (I->isBundle() || !I->isBundled()) 132 if (++NumInstr >= SkipThreshold) 133 return true; 134 } 135 } 136 137 return false; 138 } 139 140 void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) { 141 142 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 143 return; 144 145 DebugLoc DL = From.getDebugLoc(); 146 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 147 .addOperand(To); 148 } 149 150 void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) { 151 152 MachineBasicBlock &MBB = *MI.getParent(); 153 DebugLoc DL = MI.getDebugLoc(); 154 155 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() != 156 ShaderType::PIXEL || 157 !shouldSkip(&MBB, &MBB.getParent()->back())) 158 return; 159 160 MachineBasicBlock::iterator Insert = &MI; 161 ++Insert; 162 163 // If the exec mask is non-zero, skip the next two instructions 164 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 165 .addImm(3); 166 167 // Exec mask is zero: Export to NULL target... 168 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 169 .addImm(0) 170 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 171 .addImm(0) 172 .addImm(1) 173 .addImm(1) 174 .addReg(AMDGPU::VGPR0) 175 .addReg(AMDGPU::VGPR0) 176 .addReg(AMDGPU::VGPR0) 177 .addReg(AMDGPU::VGPR0); 178 179 // ... and terminate wavefront 180 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 181 } 182 183 void SILowerControlFlowPass::If(MachineInstr &MI) { 184 MachineBasicBlock &MBB = *MI.getParent(); 185 DebugLoc DL = MI.getDebugLoc(); 186 unsigned Reg = MI.getOperand(0).getReg(); 187 unsigned Vcc = MI.getOperand(1).getReg(); 188 189 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 190 .addReg(Vcc); 191 192 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 193 .addReg(AMDGPU::EXEC) 194 .addReg(Reg); 195 196 Skip(MI, MI.getOperand(2)); 197 198 MI.eraseFromParent(); 199 } 200 201 void SILowerControlFlowPass::Else(MachineInstr &MI) { 202 MachineBasicBlock &MBB = *MI.getParent(); 203 DebugLoc DL = MI.getDebugLoc(); 204 unsigned Dst = MI.getOperand(0).getReg(); 205 unsigned Src = MI.getOperand(1).getReg(); 206 207 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 208 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 209 .addReg(Src); // Saved EXEC 210 211 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 212 .addReg(AMDGPU::EXEC) 213 .addReg(Dst); 214 215 Skip(MI, MI.getOperand(2)); 216 217 MI.eraseFromParent(); 218 } 219 220 void SILowerControlFlowPass::Break(MachineInstr &MI) { 221 MachineBasicBlock &MBB = *MI.getParent(); 222 DebugLoc DL = MI.getDebugLoc(); 223 224 unsigned Dst = MI.getOperand(0).getReg(); 225 unsigned Src = MI.getOperand(1).getReg(); 226 227 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 228 .addReg(AMDGPU::EXEC) 229 .addReg(Src); 230 231 MI.eraseFromParent(); 232 } 233 234 void SILowerControlFlowPass::IfBreak(MachineInstr &MI) { 235 MachineBasicBlock &MBB = *MI.getParent(); 236 DebugLoc DL = MI.getDebugLoc(); 237 238 unsigned Dst = MI.getOperand(0).getReg(); 239 unsigned Vcc = MI.getOperand(1).getReg(); 240 unsigned Src = MI.getOperand(2).getReg(); 241 242 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 243 .addReg(Vcc) 244 .addReg(Src); 245 246 MI.eraseFromParent(); 247 } 248 249 void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) { 250 MachineBasicBlock &MBB = *MI.getParent(); 251 DebugLoc DL = MI.getDebugLoc(); 252 253 unsigned Dst = MI.getOperand(0).getReg(); 254 unsigned Saved = MI.getOperand(1).getReg(); 255 unsigned Src = MI.getOperand(2).getReg(); 256 257 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 258 .addReg(Saved) 259 .addReg(Src); 260 261 MI.eraseFromParent(); 262 } 263 264 void SILowerControlFlowPass::Loop(MachineInstr &MI) { 265 MachineBasicBlock &MBB = *MI.getParent(); 266 DebugLoc DL = MI.getDebugLoc(); 267 unsigned Src = MI.getOperand(0).getReg(); 268 269 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 270 .addReg(AMDGPU::EXEC) 271 .addReg(Src); 272 273 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 274 .addOperand(MI.getOperand(1)); 275 276 MI.eraseFromParent(); 277 } 278 279 void SILowerControlFlowPass::EndCf(MachineInstr &MI) { 280 MachineBasicBlock &MBB = *MI.getParent(); 281 DebugLoc DL = MI.getDebugLoc(); 282 unsigned Reg = MI.getOperand(0).getReg(); 283 284 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 285 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 286 .addReg(AMDGPU::EXEC) 287 .addReg(Reg); 288 289 MI.eraseFromParent(); 290 } 291 292 void SILowerControlFlowPass::Branch(MachineInstr &MI) { 293 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode()) 294 MI.eraseFromParent(); 295 296 // If these aren't equal, this is probably an infinite loop. 297 } 298 299 void SILowerControlFlowPass::Kill(MachineInstr &MI) { 300 MachineBasicBlock &MBB = *MI.getParent(); 301 DebugLoc DL = MI.getDebugLoc(); 302 const MachineOperand &Op = MI.getOperand(0); 303 304 #ifndef NDEBUG 305 const SIMachineFunctionInfo *MFI 306 = MBB.getParent()->getInfo<SIMachineFunctionInfo>(); 307 // Kill is only allowed in pixel / geometry shaders. 308 assert(MFI->getShaderType() == ShaderType::PIXEL || 309 MFI->getShaderType() == ShaderType::GEOMETRY); 310 #endif 311 312 // Clear this thread from the exec mask if the operand is negative 313 if ((Op.isImm())) { 314 // Constant operand: Set exec mask to 0 or do nothing 315 if (Op.getImm() & 0x80000000) { 316 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 317 .addImm(0); 318 } 319 } else { 320 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) 321 .addImm(0) 322 .addOperand(Op); 323 } 324 325 MI.eraseFromParent(); 326 } 327 328 void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) { 329 330 MachineBasicBlock &MBB = *MI.getParent(); 331 DebugLoc DL = MI.getDebugLoc(); 332 MachineBasicBlock::iterator I = MI; 333 334 unsigned Save = MI.getOperand(1).getReg(); 335 unsigned Idx = MI.getOperand(3).getReg(); 336 337 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 338 if (Offset) { 339 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 340 .addReg(Idx) 341 .addImm(Offset); 342 } else { 343 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 344 .addReg(Idx); 345 } 346 MBB.insert(I, MovRel); 347 } else { 348 349 assert(AMDGPU::SReg_64RegClass.contains(Save)); 350 assert(AMDGPU::VGPR_32RegClass.contains(Idx)); 351 352 // Save the EXEC mask 353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 354 .addReg(AMDGPU::EXEC); 355 356 // Read the next variant into VCC (lower 32 bits) <- also loop target 357 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), 358 AMDGPU::VCC_LO) 359 .addReg(Idx); 360 361 // Move index from VCC into M0 362 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 363 .addReg(AMDGPU::VCC_LO); 364 365 // Compare the just read M0 value to all possible Idx values 366 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32)) 367 .addReg(AMDGPU::M0) 368 .addReg(Idx); 369 370 // Update EXEC, save the original EXEC value to VCC 371 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 372 .addReg(AMDGPU::VCC); 373 374 if (Offset) { 375 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 376 .addReg(AMDGPU::M0) 377 .addImm(Offset); 378 } 379 // Do the actual move 380 MBB.insert(I, MovRel); 381 382 // Update EXEC, switch all done bits to 0 and all todo bits to 1 383 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 384 .addReg(AMDGPU::EXEC) 385 .addReg(AMDGPU::VCC); 386 387 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 388 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 389 .addImm(-7); 390 391 // Restore EXEC 392 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 393 .addReg(Save); 394 395 } 396 MI.eraseFromParent(); 397 } 398 399 /// \param @VecReg The register which holds element zero of the vector 400 /// being addressed into. 401 /// \param[out] @Reg The base register to use in the indirect addressing instruction. 402 /// \param[in,out] @Offset As an input, this is the constant offset part of the 403 // indirect Index. e.g. v0 = v[VecReg + Offset] 404 // As an output, this is a constant value that needs 405 // to be added to the value stored in M0. 406 void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg, 407 unsigned &Reg, 408 int &Offset) { 409 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0); 410 if (!SubReg) 411 SubReg = VecReg; 412 413 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg); 414 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset; 415 416 if (RegIdx < 0) { 417 Offset = RegIdx; 418 RegIdx = 0; 419 } else { 420 Offset = 0; 421 } 422 423 Reg = RC->getRegister(RegIdx); 424 } 425 426 void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) { 427 428 MachineBasicBlock &MBB = *MI.getParent(); 429 DebugLoc DL = MI.getDebugLoc(); 430 431 unsigned Dst = MI.getOperand(0).getReg(); 432 unsigned Vec = MI.getOperand(2).getReg(); 433 int Off = MI.getOperand(4).getImm(); 434 unsigned Reg; 435 436 computeIndirectRegAndOffset(Vec, Reg, Off); 437 438 MachineInstr *MovRel = 439 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 440 .addReg(Reg) 441 .addReg(AMDGPU::M0, RegState::Implicit) 442 .addReg(Vec, RegState::Implicit); 443 444 LoadM0(MI, MovRel, Off); 445 } 446 447 void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { 448 449 MachineBasicBlock &MBB = *MI.getParent(); 450 DebugLoc DL = MI.getDebugLoc(); 451 452 unsigned Dst = MI.getOperand(0).getReg(); 453 int Off = MI.getOperand(4).getImm(); 454 unsigned Val = MI.getOperand(5).getReg(); 455 unsigned Reg; 456 457 computeIndirectRegAndOffset(Dst, Reg, Off); 458 459 MachineInstr *MovRel = 460 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 461 .addReg(Reg, RegState::Define) 462 .addReg(Val) 463 .addReg(AMDGPU::M0, RegState::Implicit) 464 .addReg(Dst, RegState::Implicit); 465 466 LoadM0(MI, MovRel, Off); 467 } 468 469 bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { 470 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 471 TRI = 472 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 473 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 474 475 bool HaveKill = false; 476 bool NeedWQM = false; 477 bool NeedFlat = false; 478 unsigned Depth = 0; 479 480 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 481 BI != BE; ++BI) { 482 483 MachineBasicBlock &MBB = *BI; 484 MachineBasicBlock::iterator I, Next; 485 for (I = MBB.begin(); I != MBB.end(); I = Next) { 486 Next = std::next(I); 487 488 MachineInstr &MI = *I; 489 if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode())) 490 NeedWQM = true; 491 492 // Flat uses m0 in case it needs to access LDS. 493 if (TII->isFLAT(MI.getOpcode())) 494 NeedFlat = true; 495 496 switch (MI.getOpcode()) { 497 default: break; 498 case AMDGPU::SI_IF: 499 ++Depth; 500 If(MI); 501 break; 502 503 case AMDGPU::SI_ELSE: 504 Else(MI); 505 break; 506 507 case AMDGPU::SI_BREAK: 508 Break(MI); 509 break; 510 511 case AMDGPU::SI_IF_BREAK: 512 IfBreak(MI); 513 break; 514 515 case AMDGPU::SI_ELSE_BREAK: 516 ElseBreak(MI); 517 break; 518 519 case AMDGPU::SI_LOOP: 520 ++Depth; 521 Loop(MI); 522 break; 523 524 case AMDGPU::SI_END_CF: 525 if (--Depth == 0 && HaveKill) { 526 SkipIfDead(MI); 527 HaveKill = false; 528 } 529 EndCf(MI); 530 break; 531 532 case AMDGPU::SI_KILL: 533 if (Depth == 0) 534 SkipIfDead(MI); 535 else 536 HaveKill = true; 537 Kill(MI); 538 break; 539 540 case AMDGPU::S_BRANCH: 541 Branch(MI); 542 break; 543 544 case AMDGPU::SI_INDIRECT_SRC_V1: 545 case AMDGPU::SI_INDIRECT_SRC_V2: 546 case AMDGPU::SI_INDIRECT_SRC_V4: 547 case AMDGPU::SI_INDIRECT_SRC_V8: 548 case AMDGPU::SI_INDIRECT_SRC_V16: 549 IndirectSrc(MI); 550 break; 551 552 case AMDGPU::SI_INDIRECT_DST_V1: 553 case AMDGPU::SI_INDIRECT_DST_V2: 554 case AMDGPU::SI_INDIRECT_DST_V4: 555 case AMDGPU::SI_INDIRECT_DST_V8: 556 case AMDGPU::SI_INDIRECT_DST_V16: 557 IndirectDst(MI); 558 break; 559 } 560 } 561 } 562 563 if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) { 564 MachineBasicBlock &MBB = MF.front(); 565 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 566 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 567 } 568 569 // FIXME: This seems inappropriate to do here. 570 if (NeedFlat && MFI->IsKernel) { 571 // Insert the prologue initializing the SGPRs pointing to the scratch space 572 // for flat accesses. 573 const MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 574 575 // TODO: What to use with function calls? 576 577 // FIXME: This is reporting stack size that is used in a scratch buffer 578 // rather than registers as well. 579 uint64_t StackSizeBytes = FrameInfo->getStackSize(); 580 581 int IndirectBegin 582 = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF); 583 // Convert register index to 256-byte unit. 584 uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256); 585 586 assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff && 587 "Stack limits should be smaller than 16-bits"); 588 589 // Initialize the flat scratch register pair. 590 // TODO: Can we use one s_mov_b64 here? 591 592 // Offset is in units of 256-bytes. 593 MachineBasicBlock &MBB = MF.front(); 594 DebugLoc NoDL; 595 MachineBasicBlock::iterator Start = MBB.getFirstNonPHI(); 596 const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32); 597 598 assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes)); 599 600 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO) 601 .addImm(StackOffset); 602 603 // Documentation says size is "per-thread scratch size in bytes" 604 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI) 605 .addImm(StackSizeBytes); 606 } 607 608 return true; 609 } 610