1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// \file 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "SIMachineFunctionInfo.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/CodeGen/LiveIntervals.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 struct FoldCandidate { 31 MachineInstr *UseMI; 32 union { 33 MachineOperand *OpToFold; 34 uint64_t ImmToFold; 35 int FrameIndexToFold; 36 }; 37 int ShrinkOpcode; 38 unsigned char UseOpNo; 39 MachineOperand::MachineOperandType Kind; 40 bool Commuted; 41 42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 43 bool Commuted_ = false, 44 int ShrinkOp = -1) : 45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 46 Kind(FoldOp->getType()), 47 Commuted(Commuted_) { 48 if (FoldOp->isImm()) { 49 ImmToFold = FoldOp->getImm(); 50 } else if (FoldOp->isFI()) { 51 FrameIndexToFold = FoldOp->getIndex(); 52 } else { 53 assert(FoldOp->isReg() || FoldOp->isGlobal()); 54 OpToFold = FoldOp; 55 } 56 } 57 58 bool isFI() const { 59 return Kind == MachineOperand::MO_FrameIndex; 60 } 61 62 bool isImm() const { 63 return Kind == MachineOperand::MO_Immediate; 64 } 65 66 bool isReg() const { 67 return Kind == MachineOperand::MO_Register; 68 } 69 70 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; } 71 72 bool isCommuted() const { 73 return Commuted; 74 } 75 76 bool needsShrink() const { 77 return ShrinkOpcode != -1; 78 } 79 80 int getShrinkOpcode() const { 81 return ShrinkOpcode; 82 } 83 }; 84 85 class SIFoldOperands : public MachineFunctionPass { 86 public: 87 static char ID; 88 MachineRegisterInfo *MRI; 89 const SIInstrInfo *TII; 90 const SIRegisterInfo *TRI; 91 const GCNSubtarget *ST; 92 const SIMachineFunctionInfo *MFI; 93 94 void foldOperand(MachineOperand &OpToFold, 95 MachineInstr *UseMI, 96 int UseOpIdx, 97 SmallVectorImpl<FoldCandidate> &FoldList, 98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 99 100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 101 102 const MachineOperand *isClamp(const MachineInstr &MI) const; 103 bool tryFoldClamp(MachineInstr &MI); 104 105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 106 bool tryFoldOMod(MachineInstr &MI); 107 108 public: 109 SIFoldOperands() : MachineFunctionPass(ID) { 110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 111 } 112 113 bool runOnMachineFunction(MachineFunction &MF) override; 114 115 StringRef getPassName() const override { return "SI Fold Operands"; } 116 117 void getAnalysisUsage(AnalysisUsage &AU) const override { 118 AU.setPreservesCFG(); 119 MachineFunctionPass::getAnalysisUsage(AU); 120 } 121 }; 122 123 } // End anonymous namespace. 124 125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 126 "SI Fold Operands", false, false) 127 128 char SIFoldOperands::ID = 0; 129 130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 131 132 // Wrapper around isInlineConstant that understands special cases when 133 // instruction types are replaced during operand folding. 134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 135 const MachineInstr &UseMI, 136 unsigned OpNo, 137 const MachineOperand &OpToFold) { 138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 139 return true; 140 141 unsigned Opc = UseMI.getOpcode(); 142 switch (Opc) { 143 case AMDGPU::V_MAC_F32_e64: 144 case AMDGPU::V_MAC_F16_e64: 145 case AMDGPU::V_FMAC_F32_e64: 146 case AMDGPU::V_FMAC_F16_e64: { 147 // Special case for mac. Since this is replaced with mad when folded into 148 // src2, we need to check the legality for the final instruction. 149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 150 if (static_cast<int>(OpNo) == Src2Idx) { 151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 152 Opc == AMDGPU::V_FMAC_F16_e64; 153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 154 Opc == AMDGPU::V_FMAC_F32_e64; 155 156 unsigned Opc = IsFMA ? 157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 158 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 159 const MCInstrDesc &MadDesc = TII->get(Opc); 160 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 161 } 162 return false; 163 } 164 default: 165 return false; 166 } 167 } 168 169 // TODO: Add heuristic that the frame index might not fit in the addressing mode 170 // immediate offset to avoid materializing in loops. 171 static bool frameIndexMayFold(const SIInstrInfo *TII, 172 const MachineInstr &UseMI, 173 int OpNo, 174 const MachineOperand &OpToFold) { 175 return OpToFold.isFI() && 176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) && 177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr); 178 } 179 180 FunctionPass *llvm::createSIFoldOperandsPass() { 181 return new SIFoldOperands(); 182 } 183 184 static bool updateOperand(FoldCandidate &Fold, 185 const SIInstrInfo &TII, 186 const TargetRegisterInfo &TRI, 187 const GCNSubtarget &ST) { 188 MachineInstr *MI = Fold.UseMI; 189 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 190 assert(Old.isReg()); 191 192 if (Fold.isImm()) { 193 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked && 194 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) && 195 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold), 196 ST.hasInv2PiInlineImm())) { 197 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is 198 // already set. 199 unsigned Opcode = MI->getOpcode(); 200 int OpNo = MI->getOperandNo(&Old); 201 int ModIdx = -1; 202 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 203 ModIdx = AMDGPU::OpName::src0_modifiers; 204 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) 205 ModIdx = AMDGPU::OpName::src1_modifiers; 206 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) 207 ModIdx = AMDGPU::OpName::src2_modifiers; 208 assert(ModIdx != -1); 209 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 210 MachineOperand &Mod = MI->getOperand(ModIdx); 211 unsigned Val = Mod.getImm(); 212 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) 213 return false; 214 // Only apply the following transformation if that operand requries 215 // a packed immediate. 216 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { 217 case AMDGPU::OPERAND_REG_IMM_V2FP16: 218 case AMDGPU::OPERAND_REG_IMM_V2INT16: 219 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 220 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 221 // If upper part is all zero we do not need op_sel_hi. 222 if (!isUInt<16>(Fold.ImmToFold)) { 223 if (!(Fold.ImmToFold & 0xffff)) { 224 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); 225 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 226 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff); 227 return true; 228 } 229 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 230 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff); 231 return true; 232 } 233 break; 234 default: 235 break; 236 } 237 } 238 } 239 240 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) { 241 MachineBasicBlock *MBB = MI->getParent(); 242 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI); 243 if (Liveness != MachineBasicBlock::LQR_Dead) 244 return false; 245 246 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 247 int Op32 = Fold.getShrinkOpcode(); 248 MachineOperand &Dst0 = MI->getOperand(0); 249 MachineOperand &Dst1 = MI->getOperand(1); 250 assert(Dst0.isDef() && Dst1.isDef()); 251 252 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); 253 254 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); 255 Register NewReg0 = MRI.createVirtualRegister(Dst0RC); 256 257 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); 258 259 if (HaveNonDbgCarryUse) { 260 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) 261 .addReg(AMDGPU::VCC, RegState::Kill); 262 } 263 264 // Keep the old instruction around to avoid breaking iterators, but 265 // replace it with a dummy instruction to remove uses. 266 // 267 // FIXME: We should not invert how this pass looks at operands to avoid 268 // this. Should track set of foldable movs instead of looking for uses 269 // when looking at a use. 270 Dst0.setReg(NewReg0); 271 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I) 272 MI->RemoveOperand(I); 273 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF)); 274 275 if (Fold.isCommuted()) 276 TII.commuteInstruction(*Inst32, false); 277 return true; 278 } 279 280 assert(!Fold.needsShrink() && "not handled"); 281 282 if (Fold.isImm()) { 283 Old.ChangeToImmediate(Fold.ImmToFold); 284 return true; 285 } 286 287 if (Fold.isGlobal()) { 288 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(), 289 Fold.OpToFold->getTargetFlags()); 290 return true; 291 } 292 293 if (Fold.isFI()) { 294 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 295 return true; 296 } 297 298 MachineOperand *New = Fold.OpToFold; 299 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 300 Old.setIsUndef(New->isUndef()); 301 return true; 302 } 303 304 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 305 const MachineInstr *MI) { 306 for (auto Candidate : FoldList) { 307 if (Candidate.UseMI == MI) 308 return true; 309 } 310 return false; 311 } 312 313 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 314 MachineInstr *MI, unsigned OpNo, 315 MachineOperand *OpToFold, 316 const SIInstrInfo *TII) { 317 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 318 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 319 unsigned Opc = MI->getOpcode(); 320 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 321 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) && 322 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 323 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 324 Opc == AMDGPU::V_FMAC_F16_e64; 325 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 326 Opc == AMDGPU::V_FMAC_F32_e64; 327 unsigned NewOpc = IsFMA ? 328 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 329 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 330 331 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 332 // to fold the operand. 333 MI->setDesc(TII->get(NewOpc)); 334 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 335 if (FoldAsMAD) { 336 MI->untieRegOperand(OpNo); 337 return true; 338 } 339 MI->setDesc(TII->get(Opc)); 340 } 341 342 // Special case for s_setreg_b32 343 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 344 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 345 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 346 return true; 347 } 348 349 // If we are already folding into another operand of MI, then 350 // we can't commute the instruction, otherwise we risk making the 351 // other fold illegal. 352 if (isUseMIInFoldList(FoldList, MI)) 353 return false; 354 355 unsigned CommuteOpNo = OpNo; 356 357 // Operand is not legal, so try to commute the instruction to 358 // see if this makes it possible to fold. 359 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 360 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 361 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 362 363 if (CanCommute) { 364 if (CommuteIdx0 == OpNo) 365 CommuteOpNo = CommuteIdx1; 366 else if (CommuteIdx1 == OpNo) 367 CommuteOpNo = CommuteIdx0; 368 } 369 370 371 // One of operands might be an Imm operand, and OpNo may refer to it after 372 // the call of commuteInstruction() below. Such situations are avoided 373 // here explicitly as OpNo must be a register operand to be a candidate 374 // for memory folding. 375 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 376 !MI->getOperand(CommuteIdx1).isReg())) 377 return false; 378 379 if (!CanCommute || 380 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 381 return false; 382 383 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 384 if ((Opc == AMDGPU::V_ADD_I32_e64 || 385 Opc == AMDGPU::V_SUB_I32_e64 || 386 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME 387 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) { 388 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 389 390 // Verify the other operand is a VGPR, otherwise we would violate the 391 // constant bus restriction. 392 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0; 393 MachineOperand &OtherOp = MI->getOperand(OtherIdx); 394 if (!OtherOp.isReg() || 395 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg())) 396 return false; 397 398 assert(MI->getOperand(1).isDef()); 399 400 // Make sure to get the 32-bit version of the commuted opcode. 401 unsigned MaybeCommutedOpc = MI->getOpcode(); 402 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); 403 404 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true, 405 Op32)); 406 return true; 407 } 408 409 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 410 return false; 411 } 412 413 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true)); 414 return true; 415 } 416 417 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 418 return true; 419 } 420 421 // If the use operand doesn't care about the value, this may be an operand only 422 // used for register indexing, in which case it is unsafe to fold. 423 static bool isUseSafeToFold(const SIInstrInfo *TII, 424 const MachineInstr &MI, 425 const MachineOperand &UseMO) { 426 return !UseMO.isUndef() && !TII->isSDWA(MI); 427 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 428 } 429 430 static bool tryToFoldACImm(const SIInstrInfo *TII, 431 const MachineOperand &OpToFold, 432 MachineInstr *UseMI, 433 unsigned UseOpIdx, 434 SmallVectorImpl<FoldCandidate> &FoldList) { 435 const MCInstrDesc &Desc = UseMI->getDesc(); 436 const MCOperandInfo *OpInfo = Desc.OpInfo; 437 if (!OpInfo || UseOpIdx >= Desc.getNumOperands()) 438 return false; 439 440 uint8_t OpTy = OpInfo[UseOpIdx].OperandType; 441 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST || 442 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) 443 return false; 444 445 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && 446 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { 447 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm()); 448 return true; 449 } 450 451 if (!OpToFold.isReg()) 452 return false; 453 454 Register UseReg = OpToFold.getReg(); 455 if (!Register::isVirtualRegister(UseReg)) 456 return false; 457 458 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) { 459 return FC.UseMI == UseMI; }) != FoldList.end()) 460 return false; 461 462 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo(); 463 const MachineInstr *Def = MRI.getUniqueVRegDef(UseReg); 464 if (!Def || !Def->isRegSequence()) 465 return false; 466 467 int64_t Imm; 468 MachineOperand *Op; 469 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) { 470 const MachineOperand &Sub = Def->getOperand(I); 471 if (!Sub.isReg() || Sub.getSubReg()) 472 return false; 473 MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub.getReg()); 474 while (SubDef && !SubDef->isMoveImmediate() && 475 !SubDef->getOperand(1).isImm() && TII->isFoldableCopy(*SubDef)) 476 SubDef = MRI.getUniqueVRegDef(SubDef->getOperand(1).getReg()); 477 if (!SubDef || !SubDef->isMoveImmediate() || !SubDef->getOperand(1).isImm()) 478 return false; 479 Op = &SubDef->getOperand(1); 480 auto SubImm = Op->getImm(); 481 if (I == 1) { 482 if (!TII->isInlineConstant(SubDef->getOperand(1), OpTy)) 483 return false; 484 485 Imm = SubImm; 486 continue; 487 } 488 if (Imm != SubImm) 489 return false; // Can only fold splat constants 490 } 491 492 if (!TII->isOperandLegal(*UseMI, UseOpIdx, Op)) 493 return false; 494 495 FoldList.push_back(FoldCandidate(UseMI, UseOpIdx, Op)); 496 return true; 497 } 498 499 void SIFoldOperands::foldOperand( 500 MachineOperand &OpToFold, 501 MachineInstr *UseMI, 502 int UseOpIdx, 503 SmallVectorImpl<FoldCandidate> &FoldList, 504 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 505 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 506 507 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 508 return; 509 510 // FIXME: Fold operands with subregs. 511 if (UseOp.isReg() && OpToFold.isReg()) { 512 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 513 return; 514 515 // Don't fold subregister extracts into tied operands, only if it is a full 516 // copy since a subregister use tied to a full register def doesn't really 517 // make sense. e.g. don't fold: 518 // 519 // %1 = COPY %0:sub1 520 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0> 521 // 522 // into 523 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0> 524 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 525 return; 526 } 527 528 // Special case for REG_SEQUENCE: We can't fold literals into 529 // REG_SEQUENCE instructions, so we have to fold them into the 530 // uses of REG_SEQUENCE. 531 if (UseMI->isRegSequence()) { 532 Register RegSeqDstReg = UseMI->getOperand(0).getReg(); 533 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 534 535 MachineRegisterInfo::use_iterator Next; 536 for (MachineRegisterInfo::use_iterator 537 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 538 RSUse != RSE; RSUse = Next) { 539 Next = std::next(RSUse); 540 541 MachineInstr *RSUseMI = RSUse->getParent(); 542 543 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI, 544 RSUse.getOperandNo(), FoldList)) 545 continue; 546 547 if (RSUse->getSubReg() != RegSeqDstSubReg) 548 continue; 549 550 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 551 CopiesToReplace); 552 } 553 554 return; 555 } 556 557 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList)) 558 return; 559 560 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) { 561 // Sanity check that this is a stack access. 562 // FIXME: Should probably use stack pseudos before frame lowering. 563 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); 564 if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() && 565 SOff->getReg() != MFI->getStackPtrOffsetReg())) 566 return; 567 568 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != 569 MFI->getScratchRSrcReg()) 570 return; 571 572 // A frame index will resolve to a positive constant, so it should always be 573 // safe to fold the addressing mode, even pre-GFX9. 574 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex()); 575 SOff->setReg(MFI->getStackPtrOffsetReg()); 576 return; 577 } 578 579 bool FoldingImmLike = 580 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 581 582 if (FoldingImmLike && UseMI->isCopy()) { 583 Register DestReg = UseMI->getOperand(0).getReg(); 584 const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg) 585 ? MRI->getRegClass(DestReg) 586 : TRI->getPhysRegClass(DestReg); 587 588 Register SrcReg = UseMI->getOperand(1).getReg(); 589 if (Register::isVirtualRegister(DestReg) && 590 Register::isVirtualRegister(SrcReg)) { 591 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); 592 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { 593 MachineRegisterInfo::use_iterator NextUse; 594 SmallVector<FoldCandidate, 4> CopyUses; 595 for (MachineRegisterInfo::use_iterator 596 Use = MRI->use_begin(DestReg), E = MRI->use_end(); 597 Use != E; Use = NextUse) { 598 NextUse = std::next(Use); 599 FoldCandidate FC = FoldCandidate(Use->getParent(), 600 Use.getOperandNo(), &UseMI->getOperand(1)); 601 CopyUses.push_back(FC); 602 } 603 for (auto & F : CopyUses) { 604 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, 605 FoldList, CopiesToReplace); 606 } 607 } 608 } 609 610 if (DestRC == &AMDGPU::AGPR_32RegClass && 611 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 612 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 613 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 614 CopiesToReplace.push_back(UseMI); 615 return; 616 } 617 618 // In order to fold immediates into copies, we need to change the 619 // copy to a MOV. 620 621 unsigned MovOp = TII->getMovOpcode(DestRC); 622 if (MovOp == AMDGPU::COPY) 623 return; 624 625 UseMI->setDesc(TII->get(MovOp)); 626 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin(); 627 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end(); 628 while (ImpOpI != ImpOpE) { 629 MachineInstr::mop_iterator Tmp = ImpOpI; 630 ImpOpI++; 631 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp)); 632 } 633 CopiesToReplace.push_back(UseMI); 634 } else { 635 if (UseMI->isCopy() && OpToFold.isReg() && 636 Register::isVirtualRegister(UseMI->getOperand(0).getReg()) && 637 TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) && 638 TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) && 639 !UseMI->getOperand(1).getSubReg()) { 640 unsigned Size = TII->getOpSize(*UseMI, 1); 641 UseMI->getOperand(1).setReg(OpToFold.getReg()); 642 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 643 UseMI->getOperand(1).setIsKill(false); 644 CopiesToReplace.push_back(UseMI); 645 OpToFold.setIsKill(false); 646 if (Size != 4) 647 return; 648 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 649 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg())) 650 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 651 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 652 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg())) 653 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32)); 654 return; 655 } 656 657 unsigned UseOpc = UseMI->getOpcode(); 658 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || 659 (UseOpc == AMDGPU::V_READLANE_B32 && 660 (int)UseOpIdx == 661 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 662 // %vgpr = V_MOV_B32 imm 663 // %sgpr = V_READFIRSTLANE_B32 %vgpr 664 // => 665 // %sgpr = S_MOV_B32 imm 666 if (FoldingImmLike) { 667 if (execMayBeModifiedBeforeUse(*MRI, 668 UseMI->getOperand(UseOpIdx).getReg(), 669 *OpToFold.getParent(), 670 *UseMI)) 671 return; 672 673 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 674 675 // FIXME: ChangeToImmediate should clear subreg 676 UseMI->getOperand(1).setSubReg(0); 677 if (OpToFold.isImm()) 678 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 679 else 680 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex()); 681 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 682 return; 683 } 684 685 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) { 686 if (execMayBeModifiedBeforeUse(*MRI, 687 UseMI->getOperand(UseOpIdx).getReg(), 688 *OpToFold.getParent(), 689 *UseMI)) 690 return; 691 692 // %vgpr = COPY %sgpr0 693 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr 694 // => 695 // %sgpr1 = COPY %sgpr0 696 UseMI->setDesc(TII->get(AMDGPU::COPY)); 697 UseMI->getOperand(1).setReg(OpToFold.getReg()); 698 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 699 UseMI->getOperand(1).setIsKill(false); 700 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 701 return; 702 } 703 } 704 705 const MCInstrDesc &UseDesc = UseMI->getDesc(); 706 707 // Don't fold into target independent nodes. Target independent opcodes 708 // don't have defined register classes. 709 if (UseDesc.isVariadic() || 710 UseOp.isImplicit() || 711 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 712 return; 713 } 714 715 if (!FoldingImmLike) { 716 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 717 718 // FIXME: We could try to change the instruction from 64-bit to 32-bit 719 // to enable more folding opportunites. The shrink operands pass 720 // already does this. 721 return; 722 } 723 724 725 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 726 const TargetRegisterClass *FoldRC = 727 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 728 729 // Split 64-bit constants into 32-bits for folding. 730 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 731 Register UseReg = UseOp.getReg(); 732 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); 733 734 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 735 return; 736 737 APInt Imm(64, OpToFold.getImm()); 738 if (UseOp.getSubReg() == AMDGPU::sub0) { 739 Imm = Imm.getLoBits(32); 740 } else { 741 assert(UseOp.getSubReg() == AMDGPU::sub1); 742 Imm = Imm.getHiBits(32); 743 } 744 745 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 746 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 747 return; 748 } 749 750 751 752 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 753 } 754 755 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 756 uint32_t LHS, uint32_t RHS) { 757 switch (Opcode) { 758 case AMDGPU::V_AND_B32_e64: 759 case AMDGPU::V_AND_B32_e32: 760 case AMDGPU::S_AND_B32: 761 Result = LHS & RHS; 762 return true; 763 case AMDGPU::V_OR_B32_e64: 764 case AMDGPU::V_OR_B32_e32: 765 case AMDGPU::S_OR_B32: 766 Result = LHS | RHS; 767 return true; 768 case AMDGPU::V_XOR_B32_e64: 769 case AMDGPU::V_XOR_B32_e32: 770 case AMDGPU::S_XOR_B32: 771 Result = LHS ^ RHS; 772 return true; 773 case AMDGPU::V_LSHL_B32_e64: 774 case AMDGPU::V_LSHL_B32_e32: 775 case AMDGPU::S_LSHL_B32: 776 // The instruction ignores the high bits for out of bounds shifts. 777 Result = LHS << (RHS & 31); 778 return true; 779 case AMDGPU::V_LSHLREV_B32_e64: 780 case AMDGPU::V_LSHLREV_B32_e32: 781 Result = RHS << (LHS & 31); 782 return true; 783 case AMDGPU::V_LSHR_B32_e64: 784 case AMDGPU::V_LSHR_B32_e32: 785 case AMDGPU::S_LSHR_B32: 786 Result = LHS >> (RHS & 31); 787 return true; 788 case AMDGPU::V_LSHRREV_B32_e64: 789 case AMDGPU::V_LSHRREV_B32_e32: 790 Result = RHS >> (LHS & 31); 791 return true; 792 case AMDGPU::V_ASHR_I32_e64: 793 case AMDGPU::V_ASHR_I32_e32: 794 case AMDGPU::S_ASHR_I32: 795 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 796 return true; 797 case AMDGPU::V_ASHRREV_I32_e64: 798 case AMDGPU::V_ASHRREV_I32_e32: 799 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 800 return true; 801 default: 802 return false; 803 } 804 } 805 806 static unsigned getMovOpc(bool IsScalar) { 807 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 808 } 809 810 /// Remove any leftover implicit operands from mutating the instruction. e.g. 811 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 812 /// anymore. 813 static void stripExtraCopyOperands(MachineInstr &MI) { 814 const MCInstrDesc &Desc = MI.getDesc(); 815 unsigned NumOps = Desc.getNumOperands() + 816 Desc.getNumImplicitUses() + 817 Desc.getNumImplicitDefs(); 818 819 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 820 MI.RemoveOperand(I); 821 } 822 823 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 824 MI.setDesc(NewDesc); 825 stripExtraCopyOperands(MI); 826 } 827 828 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 829 MachineOperand &Op) { 830 if (Op.isReg()) { 831 // If this has a subregister, it obviously is a register source. 832 if (Op.getSubReg() != AMDGPU::NoSubRegister || 833 !Register::isVirtualRegister(Op.getReg())) 834 return &Op; 835 836 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 837 if (Def && Def->isMoveImmediate()) { 838 MachineOperand &ImmSrc = Def->getOperand(1); 839 if (ImmSrc.isImm()) 840 return &ImmSrc; 841 } 842 } 843 844 return &Op; 845 } 846 847 // Try to simplify operations with a constant that may appear after instruction 848 // selection. 849 // TODO: See if a frame index with a fixed offset can fold. 850 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 851 const SIInstrInfo *TII, 852 MachineInstr *MI, 853 MachineOperand *ImmOp) { 854 unsigned Opc = MI->getOpcode(); 855 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 856 Opc == AMDGPU::S_NOT_B32) { 857 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 858 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 859 return true; 860 } 861 862 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 863 if (Src1Idx == -1) 864 return false; 865 866 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 867 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 868 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 869 870 if (!Src0->isImm() && !Src1->isImm()) 871 return false; 872 873 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) { 874 if (Src0->isImm() && Src0->getImm() == 0) { 875 // v_lshl_or_b32 0, X, Y -> copy Y 876 // v_lshl_or_b32 0, X, K -> v_mov_b32 K 877 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg(); 878 MI->RemoveOperand(Src1Idx); 879 MI->RemoveOperand(Src0Idx); 880 881 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32)); 882 return true; 883 } 884 } 885 886 // and k0, k1 -> v_mov_b32 (k0 & k1) 887 // or k0, k1 -> v_mov_b32 (k0 | k1) 888 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 889 if (Src0->isImm() && Src1->isImm()) { 890 int32_t NewImm; 891 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 892 return false; 893 894 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 895 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 896 897 // Be careful to change the right operand, src0 may belong to a different 898 // instruction. 899 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 900 MI->RemoveOperand(Src1Idx); 901 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 902 return true; 903 } 904 905 if (!MI->isCommutable()) 906 return false; 907 908 if (Src0->isImm() && !Src1->isImm()) { 909 std::swap(Src0, Src1); 910 std::swap(Src0Idx, Src1Idx); 911 } 912 913 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 914 if (Opc == AMDGPU::V_OR_B32_e64 || 915 Opc == AMDGPU::V_OR_B32_e32 || 916 Opc == AMDGPU::S_OR_B32) { 917 if (Src1Val == 0) { 918 // y = or x, 0 => y = copy x 919 MI->RemoveOperand(Src1Idx); 920 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 921 } else if (Src1Val == -1) { 922 // y = or x, -1 => y = v_mov_b32 -1 923 MI->RemoveOperand(Src1Idx); 924 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 925 } else 926 return false; 927 928 return true; 929 } 930 931 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 932 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 933 MI->getOpcode() == AMDGPU::S_AND_B32) { 934 if (Src1Val == 0) { 935 // y = and x, 0 => y = v_mov_b32 0 936 MI->RemoveOperand(Src0Idx); 937 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 938 } else if (Src1Val == -1) { 939 // y = and x, -1 => y = copy x 940 MI->RemoveOperand(Src1Idx); 941 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 942 stripExtraCopyOperands(*MI); 943 } else 944 return false; 945 946 return true; 947 } 948 949 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 950 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 951 MI->getOpcode() == AMDGPU::S_XOR_B32) { 952 if (Src1Val == 0) { 953 // y = xor x, 0 => y = copy x 954 MI->RemoveOperand(Src1Idx); 955 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 956 return true; 957 } 958 } 959 960 return false; 961 } 962 963 // Try to fold an instruction into a simpler one 964 static bool tryFoldInst(const SIInstrInfo *TII, 965 MachineInstr *MI) { 966 unsigned Opc = MI->getOpcode(); 967 968 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 969 Opc == AMDGPU::V_CNDMASK_B32_e64 || 970 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 971 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 972 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 973 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 974 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 975 if (Src1->isIdenticalTo(*Src0) && 976 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) && 977 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) { 978 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into "); 979 auto &NewDesc = 980 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 981 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 982 if (Src2Idx != -1) 983 MI->RemoveOperand(Src2Idx); 984 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 985 if (Src1ModIdx != -1) 986 MI->RemoveOperand(Src1ModIdx); 987 if (Src0ModIdx != -1) 988 MI->RemoveOperand(Src0ModIdx); 989 mutateCopyOp(*MI, NewDesc); 990 LLVM_DEBUG(dbgs() << *MI << '\n'); 991 return true; 992 } 993 } 994 995 return false; 996 } 997 998 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 999 MachineOperand &OpToFold) const { 1000 // We need mutate the operands of new mov instructions to add implicit 1001 // uses of EXEC, but adding them invalidates the use_iterator, so defer 1002 // this. 1003 SmallVector<MachineInstr *, 4> CopiesToReplace; 1004 SmallVector<FoldCandidate, 4> FoldList; 1005 MachineOperand &Dst = MI.getOperand(0); 1006 1007 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1008 if (FoldingImm) { 1009 unsigned NumLiteralUses = 0; 1010 MachineOperand *NonInlineUse = nullptr; 1011 int NonInlineUseOpNo = -1; 1012 1013 MachineRegisterInfo::use_iterator NextUse; 1014 for (MachineRegisterInfo::use_iterator 1015 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1016 Use != E; Use = NextUse) { 1017 NextUse = std::next(Use); 1018 MachineInstr *UseMI = Use->getParent(); 1019 unsigned OpNo = Use.getOperandNo(); 1020 1021 // Folding the immediate may reveal operations that can be constant 1022 // folded or replaced with a copy. This can happen for example after 1023 // frame indices are lowered to constants or from splitting 64-bit 1024 // constants. 1025 // 1026 // We may also encounter cases where one or both operands are 1027 // immediates materialized into a register, which would ordinarily not 1028 // be folded due to multiple uses or operand constraints. 1029 1030 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 1031 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n'); 1032 1033 // Some constant folding cases change the same immediate's use to a new 1034 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 1035 // again. The same constant folded instruction could also have a second 1036 // use operand. 1037 NextUse = MRI->use_begin(Dst.getReg()); 1038 FoldList.clear(); 1039 continue; 1040 } 1041 1042 // Try to fold any inline immediate uses, and then only fold other 1043 // constants if they have one use. 1044 // 1045 // The legality of the inline immediate must be checked based on the use 1046 // operand, not the defining instruction, because 32-bit instructions 1047 // with 32-bit inline immediate sources may be used to materialize 1048 // constants used in 16-bit operands. 1049 // 1050 // e.g. it is unsafe to fold: 1051 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 1052 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 1053 1054 // Folding immediates with more than one use will increase program size. 1055 // FIXME: This will also reduce register usage, which may be better 1056 // in some cases. A better heuristic is needed. 1057 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 1058 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 1059 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) { 1060 foldOperand(OpToFold, UseMI, OpNo, FoldList, 1061 CopiesToReplace); 1062 } else { 1063 if (++NumLiteralUses == 1) { 1064 NonInlineUse = &*Use; 1065 NonInlineUseOpNo = OpNo; 1066 } 1067 } 1068 } 1069 1070 if (NumLiteralUses == 1) { 1071 MachineInstr *UseMI = NonInlineUse->getParent(); 1072 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 1073 } 1074 } else { 1075 // Folding register. 1076 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess; 1077 for (MachineRegisterInfo::use_iterator 1078 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1079 Use != E; ++Use) { 1080 UsesToProcess.push_back(Use); 1081 } 1082 for (auto U : UsesToProcess) { 1083 MachineInstr *UseMI = U->getParent(); 1084 1085 foldOperand(OpToFold, UseMI, U.getOperandNo(), 1086 FoldList, CopiesToReplace); 1087 } 1088 } 1089 1090 MachineFunction *MF = MI.getParent()->getParent(); 1091 // Make sure we add EXEC uses to any new v_mov instructions created. 1092 for (MachineInstr *Copy : CopiesToReplace) 1093 Copy->addImplicitDefUseOperands(*MF); 1094 1095 for (FoldCandidate &Fold : FoldList) { 1096 if (updateOperand(Fold, *TII, *TRI, *ST)) { 1097 // Clear kill flags. 1098 if (Fold.isReg()) { 1099 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 1100 // FIXME: Probably shouldn't bother trying to fold if not an 1101 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 1102 // copies. 1103 MRI->clearKillFlags(Fold.OpToFold->getReg()); 1104 } 1105 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 1106 << static_cast<int>(Fold.UseOpNo) << " of " 1107 << *Fold.UseMI << '\n'); 1108 tryFoldInst(TII, Fold.UseMI); 1109 } else if (Fold.isCommuted()) { 1110 // Restoring instruction's original operand order if fold has failed. 1111 TII->commuteInstruction(*Fold.UseMI, false); 1112 } 1113 } 1114 } 1115 1116 // Clamp patterns are canonically selected to v_max_* instructions, so only 1117 // handle them. 1118 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 1119 unsigned Op = MI.getOpcode(); 1120 switch (Op) { 1121 case AMDGPU::V_MAX_F32_e64: 1122 case AMDGPU::V_MAX_F16_e64: 1123 case AMDGPU::V_MAX_F64: 1124 case AMDGPU::V_PK_MAX_F16: { 1125 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 1126 return nullptr; 1127 1128 // Make sure sources are identical. 1129 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1130 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1131 if (!Src0->isReg() || !Src1->isReg() || 1132 Src0->getReg() != Src1->getReg() || 1133 Src0->getSubReg() != Src1->getSubReg() || 1134 Src0->getSubReg() != AMDGPU::NoSubRegister) 1135 return nullptr; 1136 1137 // Can't fold up if we have modifiers. 1138 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1139 return nullptr; 1140 1141 unsigned Src0Mods 1142 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 1143 unsigned Src1Mods 1144 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 1145 1146 // Having a 0 op_sel_hi would require swizzling the output in the source 1147 // instruction, which we can't do. 1148 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 1149 : 0u; 1150 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 1151 return nullptr; 1152 return Src0; 1153 } 1154 default: 1155 return nullptr; 1156 } 1157 } 1158 1159 // We obviously have multiple uses in a clamp since the register is used twice 1160 // in the same instruction. 1161 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 1162 int Count = 0; 1163 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 1164 I != E; ++I) { 1165 if (++Count > 1) 1166 return false; 1167 } 1168 1169 return true; 1170 } 1171 1172 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 1173 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 1174 const MachineOperand *ClampSrc = isClamp(MI); 1175 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 1176 return false; 1177 1178 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 1179 1180 // The type of clamp must be compatible. 1181 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 1182 return false; 1183 1184 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 1185 if (!DefClamp) 1186 return false; 1187 1188 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def 1189 << '\n'); 1190 1191 // Clamp is applied after omod, so it is OK if omod is set. 1192 DefClamp->setImm(1); 1193 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1194 MI.eraseFromParent(); 1195 return true; 1196 } 1197 1198 static int getOModValue(unsigned Opc, int64_t Val) { 1199 switch (Opc) { 1200 case AMDGPU::V_MUL_F32_e64: { 1201 switch (static_cast<uint32_t>(Val)) { 1202 case 0x3f000000: // 0.5 1203 return SIOutMods::DIV2; 1204 case 0x40000000: // 2.0 1205 return SIOutMods::MUL2; 1206 case 0x40800000: // 4.0 1207 return SIOutMods::MUL4; 1208 default: 1209 return SIOutMods::NONE; 1210 } 1211 } 1212 case AMDGPU::V_MUL_F16_e64: { 1213 switch (static_cast<uint16_t>(Val)) { 1214 case 0x3800: // 0.5 1215 return SIOutMods::DIV2; 1216 case 0x4000: // 2.0 1217 return SIOutMods::MUL2; 1218 case 0x4400: // 4.0 1219 return SIOutMods::MUL4; 1220 default: 1221 return SIOutMods::NONE; 1222 } 1223 } 1224 default: 1225 llvm_unreachable("invalid mul opcode"); 1226 } 1227 } 1228 1229 // FIXME: Does this really not support denormals with f16? 1230 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 1231 // handled, so will anything other than that break? 1232 std::pair<const MachineOperand *, int> 1233 SIFoldOperands::isOMod(const MachineInstr &MI) const { 1234 unsigned Op = MI.getOpcode(); 1235 switch (Op) { 1236 case AMDGPU::V_MUL_F32_e64: 1237 case AMDGPU::V_MUL_F16_e64: { 1238 // If output denormals are enabled, omod is ignored. 1239 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) || 1240 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals())) 1241 return std::make_pair(nullptr, SIOutMods::NONE); 1242 1243 const MachineOperand *RegOp = nullptr; 1244 const MachineOperand *ImmOp = nullptr; 1245 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1246 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1247 if (Src0->isImm()) { 1248 ImmOp = Src0; 1249 RegOp = Src1; 1250 } else if (Src1->isImm()) { 1251 ImmOp = Src1; 1252 RegOp = Src0; 1253 } else 1254 return std::make_pair(nullptr, SIOutMods::NONE); 1255 1256 int OMod = getOModValue(Op, ImmOp->getImm()); 1257 if (OMod == SIOutMods::NONE || 1258 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 1259 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 1260 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 1261 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1262 return std::make_pair(nullptr, SIOutMods::NONE); 1263 1264 return std::make_pair(RegOp, OMod); 1265 } 1266 case AMDGPU::V_ADD_F32_e64: 1267 case AMDGPU::V_ADD_F16_e64: { 1268 // If output denormals are enabled, omod is ignored. 1269 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) || 1270 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals())) 1271 return std::make_pair(nullptr, SIOutMods::NONE); 1272 1273 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 1274 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1275 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1276 1277 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 1278 Src0->getSubReg() == Src1->getSubReg() && 1279 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 1280 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 1281 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 1282 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1283 return std::make_pair(Src0, SIOutMods::MUL2); 1284 1285 return std::make_pair(nullptr, SIOutMods::NONE); 1286 } 1287 default: 1288 return std::make_pair(nullptr, SIOutMods::NONE); 1289 } 1290 } 1291 1292 // FIXME: Does this need to check IEEE bit on function? 1293 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 1294 const MachineOperand *RegOp; 1295 int OMod; 1296 std::tie(RegOp, OMod) = isOMod(MI); 1297 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 1298 RegOp->getSubReg() != AMDGPU::NoSubRegister || 1299 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 1300 return false; 1301 1302 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 1303 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 1304 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 1305 return false; 1306 1307 // Clamp is applied after omod. If the source already has clamp set, don't 1308 // fold it. 1309 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 1310 return false; 1311 1312 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 1313 1314 DefOMod->setImm(OMod); 1315 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1316 MI.eraseFromParent(); 1317 return true; 1318 } 1319 1320 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 1321 if (skipFunction(MF.getFunction())) 1322 return false; 1323 1324 MRI = &MF.getRegInfo(); 1325 ST = &MF.getSubtarget<GCNSubtarget>(); 1326 TII = ST->getInstrInfo(); 1327 TRI = &TII->getRegisterInfo(); 1328 MFI = MF.getInfo<SIMachineFunctionInfo>(); 1329 1330 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 1331 // correctly handle signed zeros. 1332 // 1333 // FIXME: Also need to check strictfp 1334 bool IsIEEEMode = MFI->getMode().IEEE; 1335 bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 1336 1337 for (MachineBasicBlock *MBB : depth_first(&MF)) { 1338 MachineBasicBlock::iterator I, Next; 1339 for (I = MBB->begin(); I != MBB->end(); I = Next) { 1340 Next = std::next(I); 1341 MachineInstr &MI = *I; 1342 1343 tryFoldInst(TII, &MI); 1344 1345 if (!TII->isFoldableCopy(MI)) { 1346 // TODO: Omod might be OK if there is NSZ only on the source 1347 // instruction, and not the omod multiply. 1348 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 1349 !tryFoldOMod(MI)) 1350 tryFoldClamp(MI); 1351 continue; 1352 } 1353 1354 MachineOperand &OpToFold = MI.getOperand(1); 1355 bool FoldingImm = 1356 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1357 1358 // FIXME: We could also be folding things like TargetIndexes. 1359 if (!FoldingImm && !OpToFold.isReg()) 1360 continue; 1361 1362 if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg())) 1363 continue; 1364 1365 // Prevent folding operands backwards in the function. For example, 1366 // the COPY opcode must not be replaced by 1 in this example: 1367 // 1368 // %3 = COPY %vgpr0; VGPR_32:%3 1369 // ... 1370 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1371 MachineOperand &Dst = MI.getOperand(0); 1372 if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg())) 1373 continue; 1374 1375 foldInstOperand(MI, OpToFold); 1376 } 1377 } 1378 return false; 1379 } 1380