1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// \file 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "SIMachineFunctionInfo.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/CodeGen/LiveIntervals.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 struct FoldCandidate { 31 MachineInstr *UseMI; 32 union { 33 MachineOperand *OpToFold; 34 uint64_t ImmToFold; 35 int FrameIndexToFold; 36 }; 37 int ShrinkOpcode; 38 unsigned char UseOpNo; 39 MachineOperand::MachineOperandType Kind; 40 bool Commuted; 41 42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 43 bool Commuted_ = false, 44 int ShrinkOp = -1) : 45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 46 Kind(FoldOp->getType()), 47 Commuted(Commuted_) { 48 if (FoldOp->isImm()) { 49 ImmToFold = FoldOp->getImm(); 50 } else if (FoldOp->isFI()) { 51 FrameIndexToFold = FoldOp->getIndex(); 52 } else { 53 assert(FoldOp->isReg()); 54 OpToFold = FoldOp; 55 } 56 } 57 58 bool isFI() const { 59 return Kind == MachineOperand::MO_FrameIndex; 60 } 61 62 bool isImm() const { 63 return Kind == MachineOperand::MO_Immediate; 64 } 65 66 bool isReg() const { 67 return Kind == MachineOperand::MO_Register; 68 } 69 70 bool isCommuted() const { 71 return Commuted; 72 } 73 74 bool needsShrink() const { 75 return ShrinkOpcode != -1; 76 } 77 78 int getShrinkOpcode() const { 79 return ShrinkOpcode; 80 } 81 }; 82 83 class SIFoldOperands : public MachineFunctionPass { 84 public: 85 static char ID; 86 MachineRegisterInfo *MRI; 87 const SIInstrInfo *TII; 88 const SIRegisterInfo *TRI; 89 const GCNSubtarget *ST; 90 91 void foldOperand(MachineOperand &OpToFold, 92 MachineInstr *UseMI, 93 unsigned UseOpIdx, 94 SmallVectorImpl<FoldCandidate> &FoldList, 95 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 96 97 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 98 99 const MachineOperand *isClamp(const MachineInstr &MI) const; 100 bool tryFoldClamp(MachineInstr &MI); 101 102 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 103 bool tryFoldOMod(MachineInstr &MI); 104 105 public: 106 SIFoldOperands() : MachineFunctionPass(ID) { 107 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 108 } 109 110 bool runOnMachineFunction(MachineFunction &MF) override; 111 112 StringRef getPassName() const override { return "SI Fold Operands"; } 113 114 void getAnalysisUsage(AnalysisUsage &AU) const override { 115 AU.setPreservesCFG(); 116 MachineFunctionPass::getAnalysisUsage(AU); 117 } 118 }; 119 120 } // End anonymous namespace. 121 122 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 123 "SI Fold Operands", false, false) 124 125 char SIFoldOperands::ID = 0; 126 127 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 128 129 // Wrapper around isInlineConstant that understands special cases when 130 // instruction types are replaced during operand folding. 131 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 132 const MachineInstr &UseMI, 133 unsigned OpNo, 134 const MachineOperand &OpToFold) { 135 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 136 return true; 137 138 unsigned Opc = UseMI.getOpcode(); 139 switch (Opc) { 140 case AMDGPU::V_MAC_F32_e64: 141 case AMDGPU::V_MAC_F16_e64: 142 case AMDGPU::V_FMAC_F32_e64: { 143 // Special case for mac. Since this is replaced with mad when folded into 144 // src2, we need to check the legality for the final instruction. 145 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 146 if (static_cast<int>(OpNo) == Src2Idx) { 147 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64; 148 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 149 150 unsigned Opc = IsFMA ? 151 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 152 const MCInstrDesc &MadDesc = TII->get(Opc); 153 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 154 } 155 return false; 156 } 157 default: 158 return false; 159 } 160 } 161 162 FunctionPass *llvm::createSIFoldOperandsPass() { 163 return new SIFoldOperands(); 164 } 165 166 static bool updateOperand(FoldCandidate &Fold, 167 const SIInstrInfo &TII, 168 const TargetRegisterInfo &TRI, 169 const GCNSubtarget &ST) { 170 MachineInstr *MI = Fold.UseMI; 171 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 172 assert(Old.isReg()); 173 174 if (Fold.isImm()) { 175 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked && 176 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold), 177 ST.hasInv2PiInlineImm())) { 178 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is 179 // already set. 180 unsigned Opcode = MI->getOpcode(); 181 int OpNo = MI->getOperandNo(&Old); 182 int ModIdx = -1; 183 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 184 ModIdx = AMDGPU::OpName::src0_modifiers; 185 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) 186 ModIdx = AMDGPU::OpName::src1_modifiers; 187 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) 188 ModIdx = AMDGPU::OpName::src2_modifiers; 189 assert(ModIdx != -1); 190 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 191 MachineOperand &Mod = MI->getOperand(ModIdx); 192 unsigned Val = Mod.getImm(); 193 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) 194 return false; 195 // Only apply the following transformation if that operand requries 196 // a packed immediate. 197 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { 198 case AMDGPU::OPERAND_REG_IMM_V2FP16: 199 case AMDGPU::OPERAND_REG_IMM_V2INT16: 200 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 201 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 202 // If upper part is all zero we do not need op_sel_hi. 203 if (!isUInt<16>(Fold.ImmToFold)) { 204 if (!(Fold.ImmToFold & 0xffff)) { 205 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); 206 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 207 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff); 208 return true; 209 } 210 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 211 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff); 212 return true; 213 } 214 break; 215 default: 216 break; 217 } 218 } 219 } 220 221 if ((Fold.isImm() || Fold.isFI()) && Fold.needsShrink()) { 222 MachineBasicBlock *MBB = MI->getParent(); 223 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI); 224 if (Liveness != MachineBasicBlock::LQR_Dead) 225 return false; 226 227 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 228 int Op32 = Fold.getShrinkOpcode(); 229 MachineOperand &Dst0 = MI->getOperand(0); 230 MachineOperand &Dst1 = MI->getOperand(1); 231 assert(Dst0.isDef() && Dst1.isDef()); 232 233 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); 234 235 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); 236 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC); 237 238 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); 239 240 if (HaveNonDbgCarryUse) { 241 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) 242 .addReg(AMDGPU::VCC, RegState::Kill); 243 } 244 245 // Keep the old instruction around to avoid breaking iterators, but 246 // replace it with a dummy instruction to remove uses. 247 // 248 // FIXME: We should not invert how this pass looks at operands to avoid 249 // this. Should track set of foldable movs instead of looking for uses 250 // when looking at a use. 251 Dst0.setReg(NewReg0); 252 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I) 253 MI->RemoveOperand(I); 254 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF)); 255 256 if (Fold.isCommuted()) 257 TII.commuteInstruction(*Inst32, false); 258 return true; 259 } 260 261 assert(!Fold.needsShrink() && "not handled"); 262 263 if (Fold.isImm()) { 264 Old.ChangeToImmediate(Fold.ImmToFold); 265 return true; 266 } 267 268 if (Fold.isFI()) { 269 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 270 return true; 271 } 272 273 MachineOperand *New = Fold.OpToFold; 274 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 275 Old.setIsUndef(New->isUndef()); 276 return true; 277 } 278 279 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 280 const MachineInstr *MI) { 281 for (auto Candidate : FoldList) { 282 if (Candidate.UseMI == MI) 283 return true; 284 } 285 return false; 286 } 287 288 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 289 MachineInstr *MI, unsigned OpNo, 290 MachineOperand *OpToFold, 291 const SIInstrInfo *TII) { 292 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 293 294 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 295 unsigned Opc = MI->getOpcode(); 296 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 297 Opc == AMDGPU::V_FMAC_F32_e64) && 298 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 299 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64; 300 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 301 unsigned NewOpc = IsFMA ? 302 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 303 304 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 305 // to fold the operand. 306 MI->setDesc(TII->get(NewOpc)); 307 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 308 if (FoldAsMAD) { 309 MI->untieRegOperand(OpNo); 310 return true; 311 } 312 MI->setDesc(TII->get(Opc)); 313 } 314 315 // Special case for s_setreg_b32 316 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 317 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 318 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 319 return true; 320 } 321 322 // If we are already folding into another operand of MI, then 323 // we can't commute the instruction, otherwise we risk making the 324 // other fold illegal. 325 if (isUseMIInFoldList(FoldList, MI)) 326 return false; 327 328 unsigned CommuteOpNo = OpNo; 329 330 // Operand is not legal, so try to commute the instruction to 331 // see if this makes it possible to fold. 332 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 333 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 334 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 335 336 if (CanCommute) { 337 if (CommuteIdx0 == OpNo) 338 CommuteOpNo = CommuteIdx1; 339 else if (CommuteIdx1 == OpNo) 340 CommuteOpNo = CommuteIdx0; 341 } 342 343 344 // One of operands might be an Imm operand, and OpNo may refer to it after 345 // the call of commuteInstruction() below. Such situations are avoided 346 // here explicitly as OpNo must be a register operand to be a candidate 347 // for memory folding. 348 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 349 !MI->getOperand(CommuteIdx1).isReg())) 350 return false; 351 352 if (!CanCommute || 353 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 354 return false; 355 356 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 357 if ((Opc == AMDGPU::V_ADD_I32_e64 || 358 Opc == AMDGPU::V_SUB_I32_e64 || 359 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME 360 (OpToFold->isImm() || OpToFold->isFI())) { 361 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 362 363 // Verify the other operand is a VGPR, otherwise we would violate the 364 // constant bus restriction. 365 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0; 366 MachineOperand &OtherOp = MI->getOperand(OtherIdx); 367 if (!OtherOp.isReg() || 368 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg())) 369 return false; 370 371 assert(MI->getOperand(1).isDef()); 372 373 // Make sure to get the 32-bit version of the commuted opcode. 374 unsigned MaybeCommutedOpc = MI->getOpcode(); 375 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); 376 377 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true, 378 Op32)); 379 return true; 380 } 381 382 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 383 return false; 384 } 385 386 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true)); 387 return true; 388 } 389 390 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 391 return true; 392 } 393 394 // If the use operand doesn't care about the value, this may be an operand only 395 // used for register indexing, in which case it is unsafe to fold. 396 static bool isUseSafeToFold(const SIInstrInfo *TII, 397 const MachineInstr &MI, 398 const MachineOperand &UseMO) { 399 return !UseMO.isUndef() && !TII->isSDWA(MI); 400 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 401 } 402 403 void SIFoldOperands::foldOperand( 404 MachineOperand &OpToFold, 405 MachineInstr *UseMI, 406 unsigned UseOpIdx, 407 SmallVectorImpl<FoldCandidate> &FoldList, 408 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 409 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 410 411 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 412 return; 413 414 // FIXME: Fold operands with subregs. 415 if (UseOp.isReg() && OpToFold.isReg()) { 416 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 417 return; 418 419 // Don't fold subregister extracts into tied operands, only if it is a full 420 // copy since a subregister use tied to a full register def doesn't really 421 // make sense. e.g. don't fold: 422 // 423 // %1 = COPY %0:sub1 424 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0> 425 // 426 // into 427 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0> 428 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 429 return; 430 } 431 432 // Special case for REG_SEQUENCE: We can't fold literals into 433 // REG_SEQUENCE instructions, so we have to fold them into the 434 // uses of REG_SEQUENCE. 435 if (UseMI->isRegSequence()) { 436 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 437 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 438 439 for (MachineRegisterInfo::use_iterator 440 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 441 RSUse != RSE; ++RSUse) { 442 443 MachineInstr *RSUseMI = RSUse->getParent(); 444 if (RSUse->getSubReg() != RegSeqDstSubReg) 445 continue; 446 447 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 448 CopiesToReplace); 449 } 450 451 return; 452 } 453 454 455 bool FoldingImm = OpToFold.isImm(); 456 457 if (FoldingImm && UseMI->isCopy()) { 458 unsigned DestReg = UseMI->getOperand(0).getReg(); 459 const TargetRegisterClass *DestRC 460 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 461 MRI->getRegClass(DestReg) : 462 TRI->getPhysRegClass(DestReg); 463 464 unsigned SrcReg = UseMI->getOperand(1).getReg(); 465 if (TargetRegisterInfo::isVirtualRegister(DestReg) && 466 TargetRegisterInfo::isVirtualRegister(SrcReg)) { 467 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); 468 if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) { 469 MachineRegisterInfo::use_iterator NextUse; 470 SmallVector<FoldCandidate, 4> CopyUses; 471 for (MachineRegisterInfo::use_iterator 472 Use = MRI->use_begin(DestReg), E = MRI->use_end(); 473 Use != E; Use = NextUse) { 474 NextUse = std::next(Use); 475 FoldCandidate FC = FoldCandidate(Use->getParent(), 476 Use.getOperandNo(), &UseMI->getOperand(1)); 477 CopyUses.push_back(FC); 478 } 479 for (auto & F : CopyUses) { 480 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, 481 FoldList, CopiesToReplace); 482 } 483 } 484 } 485 486 // In order to fold immediates into copies, we need to change the 487 // copy to a MOV. 488 489 unsigned MovOp = TII->getMovOpcode(DestRC); 490 if (MovOp == AMDGPU::COPY) 491 return; 492 493 UseMI->setDesc(TII->get(MovOp)); 494 CopiesToReplace.push_back(UseMI); 495 } else { 496 if (UseMI->isCopy() && OpToFold.isReg() && 497 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) && 498 TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 499 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) && 500 !UseMI->getOperand(1).getSubReg()) { 501 UseMI->getOperand(1).setReg(OpToFold.getReg()); 502 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 503 UseMI->getOperand(1).setIsKill(false); 504 CopiesToReplace.push_back(UseMI); 505 OpToFold.setIsKill(false); 506 return; 507 } 508 509 unsigned UseOpc = UseMI->getOpcode(); 510 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || 511 (UseOpc == AMDGPU::V_READLANE_B32 && 512 (int)UseOpIdx == 513 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 514 // %vgpr = V_MOV_B32 imm 515 // %sgpr = V_READFIRSTLANE_B32 %vgpr 516 // => 517 // %sgpr = S_MOV_B32 imm 518 if (FoldingImm) { 519 if (execMayBeModifiedBeforeUse(*MRI, 520 UseMI->getOperand(UseOpIdx).getReg(), 521 *OpToFold.getParent(), 522 UseMI)) 523 return; 524 525 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 526 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 527 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 528 return; 529 } 530 531 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) { 532 if (execMayBeModifiedBeforeUse(*MRI, 533 UseMI->getOperand(UseOpIdx).getReg(), 534 *OpToFold.getParent(), 535 UseMI)) 536 return; 537 538 // %vgpr = COPY %sgpr0 539 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr 540 // => 541 // %sgpr1 = COPY %sgpr0 542 UseMI->setDesc(TII->get(AMDGPU::COPY)); 543 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 544 return; 545 } 546 } 547 548 const MCInstrDesc &UseDesc = UseMI->getDesc(); 549 550 // Don't fold into target independent nodes. Target independent opcodes 551 // don't have defined register classes. 552 if (UseDesc.isVariadic() || 553 UseOp.isImplicit() || 554 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 555 return; 556 } 557 558 if (!FoldingImm) { 559 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 560 561 // FIXME: We could try to change the instruction from 64-bit to 32-bit 562 // to enable more folding opportunites. The shrink operands pass 563 // already does this. 564 return; 565 } 566 567 568 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 569 const TargetRegisterClass *FoldRC = 570 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 571 572 // Split 64-bit constants into 32-bits for folding. 573 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 574 unsigned UseReg = UseOp.getReg(); 575 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); 576 577 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 578 return; 579 580 APInt Imm(64, OpToFold.getImm()); 581 if (UseOp.getSubReg() == AMDGPU::sub0) { 582 Imm = Imm.getLoBits(32); 583 } else { 584 assert(UseOp.getSubReg() == AMDGPU::sub1); 585 Imm = Imm.getHiBits(32); 586 } 587 588 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 589 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 590 return; 591 } 592 593 594 595 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 596 } 597 598 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 599 uint32_t LHS, uint32_t RHS) { 600 switch (Opcode) { 601 case AMDGPU::V_AND_B32_e64: 602 case AMDGPU::V_AND_B32_e32: 603 case AMDGPU::S_AND_B32: 604 Result = LHS & RHS; 605 return true; 606 case AMDGPU::V_OR_B32_e64: 607 case AMDGPU::V_OR_B32_e32: 608 case AMDGPU::S_OR_B32: 609 Result = LHS | RHS; 610 return true; 611 case AMDGPU::V_XOR_B32_e64: 612 case AMDGPU::V_XOR_B32_e32: 613 case AMDGPU::S_XOR_B32: 614 Result = LHS ^ RHS; 615 return true; 616 case AMDGPU::V_LSHL_B32_e64: 617 case AMDGPU::V_LSHL_B32_e32: 618 case AMDGPU::S_LSHL_B32: 619 // The instruction ignores the high bits for out of bounds shifts. 620 Result = LHS << (RHS & 31); 621 return true; 622 case AMDGPU::V_LSHLREV_B32_e64: 623 case AMDGPU::V_LSHLREV_B32_e32: 624 Result = RHS << (LHS & 31); 625 return true; 626 case AMDGPU::V_LSHR_B32_e64: 627 case AMDGPU::V_LSHR_B32_e32: 628 case AMDGPU::S_LSHR_B32: 629 Result = LHS >> (RHS & 31); 630 return true; 631 case AMDGPU::V_LSHRREV_B32_e64: 632 case AMDGPU::V_LSHRREV_B32_e32: 633 Result = RHS >> (LHS & 31); 634 return true; 635 case AMDGPU::V_ASHR_I32_e64: 636 case AMDGPU::V_ASHR_I32_e32: 637 case AMDGPU::S_ASHR_I32: 638 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 639 return true; 640 case AMDGPU::V_ASHRREV_I32_e64: 641 case AMDGPU::V_ASHRREV_I32_e32: 642 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 643 return true; 644 default: 645 return false; 646 } 647 } 648 649 static unsigned getMovOpc(bool IsScalar) { 650 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 651 } 652 653 /// Remove any leftover implicit operands from mutating the instruction. e.g. 654 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 655 /// anymore. 656 static void stripExtraCopyOperands(MachineInstr &MI) { 657 const MCInstrDesc &Desc = MI.getDesc(); 658 unsigned NumOps = Desc.getNumOperands() + 659 Desc.getNumImplicitUses() + 660 Desc.getNumImplicitDefs(); 661 662 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 663 MI.RemoveOperand(I); 664 } 665 666 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 667 MI.setDesc(NewDesc); 668 stripExtraCopyOperands(MI); 669 } 670 671 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 672 MachineOperand &Op) { 673 if (Op.isReg()) { 674 // If this has a subregister, it obviously is a register source. 675 if (Op.getSubReg() != AMDGPU::NoSubRegister || 676 !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 677 return &Op; 678 679 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 680 if (Def && Def->isMoveImmediate()) { 681 MachineOperand &ImmSrc = Def->getOperand(1); 682 if (ImmSrc.isImm()) 683 return &ImmSrc; 684 } 685 } 686 687 return &Op; 688 } 689 690 // Try to simplify operations with a constant that may appear after instruction 691 // selection. 692 // TODO: See if a frame index with a fixed offset can fold. 693 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 694 const SIInstrInfo *TII, 695 MachineInstr *MI, 696 MachineOperand *ImmOp) { 697 unsigned Opc = MI->getOpcode(); 698 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 699 Opc == AMDGPU::S_NOT_B32) { 700 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 701 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 702 return true; 703 } 704 705 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 706 if (Src1Idx == -1) 707 return false; 708 709 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 710 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 711 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 712 713 if (!Src0->isImm() && !Src1->isImm()) 714 return false; 715 716 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) { 717 if (Src0->isImm() && Src0->getImm() == 0) { 718 // v_lshl_or_b32 0, X, Y -> copy Y 719 // v_lshl_or_b32 0, X, K -> v_mov_b32 K 720 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg(); 721 MI->RemoveOperand(Src1Idx); 722 MI->RemoveOperand(Src0Idx); 723 724 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32)); 725 return true; 726 } 727 } 728 729 // and k0, k1 -> v_mov_b32 (k0 & k1) 730 // or k0, k1 -> v_mov_b32 (k0 | k1) 731 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 732 if (Src0->isImm() && Src1->isImm()) { 733 int32_t NewImm; 734 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 735 return false; 736 737 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 738 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 739 740 // Be careful to change the right operand, src0 may belong to a different 741 // instruction. 742 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 743 MI->RemoveOperand(Src1Idx); 744 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 745 return true; 746 } 747 748 if (!MI->isCommutable()) 749 return false; 750 751 if (Src0->isImm() && !Src1->isImm()) { 752 std::swap(Src0, Src1); 753 std::swap(Src0Idx, Src1Idx); 754 } 755 756 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 757 if (Opc == AMDGPU::V_OR_B32_e64 || 758 Opc == AMDGPU::V_OR_B32_e32 || 759 Opc == AMDGPU::S_OR_B32) { 760 if (Src1Val == 0) { 761 // y = or x, 0 => y = copy x 762 MI->RemoveOperand(Src1Idx); 763 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 764 } else if (Src1Val == -1) { 765 // y = or x, -1 => y = v_mov_b32 -1 766 MI->RemoveOperand(Src1Idx); 767 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 768 } else 769 return false; 770 771 return true; 772 } 773 774 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 775 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 776 MI->getOpcode() == AMDGPU::S_AND_B32) { 777 if (Src1Val == 0) { 778 // y = and x, 0 => y = v_mov_b32 0 779 MI->RemoveOperand(Src0Idx); 780 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 781 } else if (Src1Val == -1) { 782 // y = and x, -1 => y = copy x 783 MI->RemoveOperand(Src1Idx); 784 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 785 stripExtraCopyOperands(*MI); 786 } else 787 return false; 788 789 return true; 790 } 791 792 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 793 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 794 MI->getOpcode() == AMDGPU::S_XOR_B32) { 795 if (Src1Val == 0) { 796 // y = xor x, 0 => y = copy x 797 MI->RemoveOperand(Src1Idx); 798 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 799 return true; 800 } 801 } 802 803 return false; 804 } 805 806 // Try to fold an instruction into a simpler one 807 static bool tryFoldInst(const SIInstrInfo *TII, 808 MachineInstr *MI) { 809 unsigned Opc = MI->getOpcode(); 810 811 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 812 Opc == AMDGPU::V_CNDMASK_B32_e64 || 813 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 814 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 815 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 816 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 817 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 818 if (Src1->isIdenticalTo(*Src0) && 819 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) && 820 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) { 821 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into "); 822 auto &NewDesc = 823 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 824 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 825 if (Src2Idx != -1) 826 MI->RemoveOperand(Src2Idx); 827 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 828 if (Src1ModIdx != -1) 829 MI->RemoveOperand(Src1ModIdx); 830 if (Src0ModIdx != -1) 831 MI->RemoveOperand(Src0ModIdx); 832 mutateCopyOp(*MI, NewDesc); 833 LLVM_DEBUG(dbgs() << *MI << '\n'); 834 return true; 835 } 836 } 837 838 return false; 839 } 840 841 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 842 MachineOperand &OpToFold) const { 843 // We need mutate the operands of new mov instructions to add implicit 844 // uses of EXEC, but adding them invalidates the use_iterator, so defer 845 // this. 846 SmallVector<MachineInstr *, 4> CopiesToReplace; 847 SmallVector<FoldCandidate, 4> FoldList; 848 MachineOperand &Dst = MI.getOperand(0); 849 850 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 851 if (FoldingImm) { 852 unsigned NumLiteralUses = 0; 853 MachineOperand *NonInlineUse = nullptr; 854 int NonInlineUseOpNo = -1; 855 856 MachineRegisterInfo::use_iterator NextUse; 857 for (MachineRegisterInfo::use_iterator 858 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 859 Use != E; Use = NextUse) { 860 NextUse = std::next(Use); 861 MachineInstr *UseMI = Use->getParent(); 862 unsigned OpNo = Use.getOperandNo(); 863 864 // Folding the immediate may reveal operations that can be constant 865 // folded or replaced with a copy. This can happen for example after 866 // frame indices are lowered to constants or from splitting 64-bit 867 // constants. 868 // 869 // We may also encounter cases where one or both operands are 870 // immediates materialized into a register, which would ordinarily not 871 // be folded due to multiple uses or operand constraints. 872 873 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 874 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n'); 875 876 // Some constant folding cases change the same immediate's use to a new 877 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 878 // again. The same constant folded instruction could also have a second 879 // use operand. 880 NextUse = MRI->use_begin(Dst.getReg()); 881 FoldList.clear(); 882 continue; 883 } 884 885 // Try to fold any inline immediate uses, and then only fold other 886 // constants if they have one use. 887 // 888 // The legality of the inline immediate must be checked based on the use 889 // operand, not the defining instruction, because 32-bit instructions 890 // with 32-bit inline immediate sources may be used to materialize 891 // constants used in 16-bit operands. 892 // 893 // e.g. it is unsafe to fold: 894 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 895 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 896 897 // Folding immediates with more than one use will increase program size. 898 // FIXME: This will also reduce register usage, which may be better 899 // in some cases. A better heuristic is needed. 900 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 901 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 902 } else { 903 if (++NumLiteralUses == 1) { 904 NonInlineUse = &*Use; 905 NonInlineUseOpNo = OpNo; 906 } 907 } 908 } 909 910 if (NumLiteralUses == 1) { 911 MachineInstr *UseMI = NonInlineUse->getParent(); 912 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 913 } 914 } else { 915 // Folding register. 916 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess; 917 for (MachineRegisterInfo::use_iterator 918 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 919 Use != E; ++Use) { 920 UsesToProcess.push_back(Use); 921 } 922 for (auto U : UsesToProcess) { 923 MachineInstr *UseMI = U->getParent(); 924 925 foldOperand(OpToFold, UseMI, U.getOperandNo(), 926 FoldList, CopiesToReplace); 927 } 928 } 929 930 MachineFunction *MF = MI.getParent()->getParent(); 931 // Make sure we add EXEC uses to any new v_mov instructions created. 932 for (MachineInstr *Copy : CopiesToReplace) 933 Copy->addImplicitDefUseOperands(*MF); 934 935 for (FoldCandidate &Fold : FoldList) { 936 if (updateOperand(Fold, *TII, *TRI, *ST)) { 937 // Clear kill flags. 938 if (Fold.isReg()) { 939 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 940 // FIXME: Probably shouldn't bother trying to fold if not an 941 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 942 // copies. 943 MRI->clearKillFlags(Fold.OpToFold->getReg()); 944 } 945 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 946 << static_cast<int>(Fold.UseOpNo) << " of " 947 << *Fold.UseMI << '\n'); 948 tryFoldInst(TII, Fold.UseMI); 949 } else if (Fold.isCommuted()) { 950 // Restoring instruction's original operand order if fold has failed. 951 TII->commuteInstruction(*Fold.UseMI, false); 952 } 953 } 954 } 955 956 // Clamp patterns are canonically selected to v_max_* instructions, so only 957 // handle them. 958 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 959 unsigned Op = MI.getOpcode(); 960 switch (Op) { 961 case AMDGPU::V_MAX_F32_e64: 962 case AMDGPU::V_MAX_F16_e64: 963 case AMDGPU::V_MAX_F64: 964 case AMDGPU::V_PK_MAX_F16: { 965 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 966 return nullptr; 967 968 // Make sure sources are identical. 969 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 970 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 971 if (!Src0->isReg() || !Src1->isReg() || 972 Src0->getReg() != Src1->getReg() || 973 Src0->getSubReg() != Src1->getSubReg() || 974 Src0->getSubReg() != AMDGPU::NoSubRegister) 975 return nullptr; 976 977 // Can't fold up if we have modifiers. 978 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 979 return nullptr; 980 981 unsigned Src0Mods 982 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 983 unsigned Src1Mods 984 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 985 986 // Having a 0 op_sel_hi would require swizzling the output in the source 987 // instruction, which we can't do. 988 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 989 : 0u; 990 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 991 return nullptr; 992 return Src0; 993 } 994 default: 995 return nullptr; 996 } 997 } 998 999 // We obviously have multiple uses in a clamp since the register is used twice 1000 // in the same instruction. 1001 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 1002 int Count = 0; 1003 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 1004 I != E; ++I) { 1005 if (++Count > 1) 1006 return false; 1007 } 1008 1009 return true; 1010 } 1011 1012 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 1013 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 1014 const MachineOperand *ClampSrc = isClamp(MI); 1015 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 1016 return false; 1017 1018 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 1019 1020 // The type of clamp must be compatible. 1021 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 1022 return false; 1023 1024 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 1025 if (!DefClamp) 1026 return false; 1027 1028 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def 1029 << '\n'); 1030 1031 // Clamp is applied after omod, so it is OK if omod is set. 1032 DefClamp->setImm(1); 1033 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1034 MI.eraseFromParent(); 1035 return true; 1036 } 1037 1038 static int getOModValue(unsigned Opc, int64_t Val) { 1039 switch (Opc) { 1040 case AMDGPU::V_MUL_F32_e64: { 1041 switch (static_cast<uint32_t>(Val)) { 1042 case 0x3f000000: // 0.5 1043 return SIOutMods::DIV2; 1044 case 0x40000000: // 2.0 1045 return SIOutMods::MUL2; 1046 case 0x40800000: // 4.0 1047 return SIOutMods::MUL4; 1048 default: 1049 return SIOutMods::NONE; 1050 } 1051 } 1052 case AMDGPU::V_MUL_F16_e64: { 1053 switch (static_cast<uint16_t>(Val)) { 1054 case 0x3800: // 0.5 1055 return SIOutMods::DIV2; 1056 case 0x4000: // 2.0 1057 return SIOutMods::MUL2; 1058 case 0x4400: // 4.0 1059 return SIOutMods::MUL4; 1060 default: 1061 return SIOutMods::NONE; 1062 } 1063 } 1064 default: 1065 llvm_unreachable("invalid mul opcode"); 1066 } 1067 } 1068 1069 // FIXME: Does this really not support denormals with f16? 1070 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 1071 // handled, so will anything other than that break? 1072 std::pair<const MachineOperand *, int> 1073 SIFoldOperands::isOMod(const MachineInstr &MI) const { 1074 unsigned Op = MI.getOpcode(); 1075 switch (Op) { 1076 case AMDGPU::V_MUL_F32_e64: 1077 case AMDGPU::V_MUL_F16_e64: { 1078 // If output denormals are enabled, omod is ignored. 1079 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) || 1080 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals())) 1081 return std::make_pair(nullptr, SIOutMods::NONE); 1082 1083 const MachineOperand *RegOp = nullptr; 1084 const MachineOperand *ImmOp = nullptr; 1085 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1086 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1087 if (Src0->isImm()) { 1088 ImmOp = Src0; 1089 RegOp = Src1; 1090 } else if (Src1->isImm()) { 1091 ImmOp = Src1; 1092 RegOp = Src0; 1093 } else 1094 return std::make_pair(nullptr, SIOutMods::NONE); 1095 1096 int OMod = getOModValue(Op, ImmOp->getImm()); 1097 if (OMod == SIOutMods::NONE || 1098 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 1099 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 1100 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 1101 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1102 return std::make_pair(nullptr, SIOutMods::NONE); 1103 1104 return std::make_pair(RegOp, OMod); 1105 } 1106 case AMDGPU::V_ADD_F32_e64: 1107 case AMDGPU::V_ADD_F16_e64: { 1108 // If output denormals are enabled, omod is ignored. 1109 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) || 1110 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals())) 1111 return std::make_pair(nullptr, SIOutMods::NONE); 1112 1113 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 1114 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1115 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1116 1117 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 1118 Src0->getSubReg() == Src1->getSubReg() && 1119 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 1120 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 1121 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 1122 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1123 return std::make_pair(Src0, SIOutMods::MUL2); 1124 1125 return std::make_pair(nullptr, SIOutMods::NONE); 1126 } 1127 default: 1128 return std::make_pair(nullptr, SIOutMods::NONE); 1129 } 1130 } 1131 1132 // FIXME: Does this need to check IEEE bit on function? 1133 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 1134 const MachineOperand *RegOp; 1135 int OMod; 1136 std::tie(RegOp, OMod) = isOMod(MI); 1137 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 1138 RegOp->getSubReg() != AMDGPU::NoSubRegister || 1139 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 1140 return false; 1141 1142 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 1143 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 1144 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 1145 return false; 1146 1147 // Clamp is applied after omod. If the source already has clamp set, don't 1148 // fold it. 1149 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 1150 return false; 1151 1152 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 1153 1154 DefOMod->setImm(OMod); 1155 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1156 MI.eraseFromParent(); 1157 return true; 1158 } 1159 1160 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 1161 if (skipFunction(MF.getFunction())) 1162 return false; 1163 1164 MRI = &MF.getRegInfo(); 1165 ST = &MF.getSubtarget<GCNSubtarget>(); 1166 TII = ST->getInstrInfo(); 1167 TRI = &TII->getRegisterInfo(); 1168 1169 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1170 1171 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 1172 // correctly handle signed zeros. 1173 // 1174 // FIXME: Also need to check strictfp 1175 bool IsIEEEMode = MFI->getMode().IEEE; 1176 bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 1177 1178 for (MachineBasicBlock *MBB : depth_first(&MF)) { 1179 MachineBasicBlock::iterator I, Next; 1180 for (I = MBB->begin(); I != MBB->end(); I = Next) { 1181 Next = std::next(I); 1182 MachineInstr &MI = *I; 1183 1184 tryFoldInst(TII, &MI); 1185 1186 if (!TII->isFoldableCopy(MI)) { 1187 // TODO: Omod might be OK if there is NSZ only on the source 1188 // instruction, and not the omod multiply. 1189 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 1190 !tryFoldOMod(MI)) 1191 tryFoldClamp(MI); 1192 continue; 1193 } 1194 1195 MachineOperand &OpToFold = MI.getOperand(1); 1196 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 1197 1198 // FIXME: We could also be folding things like TargetIndexes. 1199 if (!FoldingImm && !OpToFold.isReg()) 1200 continue; 1201 1202 if (OpToFold.isReg() && 1203 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 1204 continue; 1205 1206 // Prevent folding operands backwards in the function. For example, 1207 // the COPY opcode must not be replaced by 1 in this example: 1208 // 1209 // %3 = COPY %vgpr0; VGPR_32:%3 1210 // ... 1211 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1212 MachineOperand &Dst = MI.getOperand(0); 1213 if (Dst.isReg() && 1214 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 1215 continue; 1216 1217 foldInstOperand(MI, OpToFold); 1218 } 1219 } 1220 return false; 1221 } 1222