1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// \file 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "SIMachineFunctionInfo.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/CodeGen/LiveIntervals.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 struct FoldCandidate { 31 MachineInstr *UseMI; 32 union { 33 MachineOperand *OpToFold; 34 uint64_t ImmToFold; 35 int FrameIndexToFold; 36 }; 37 int ShrinkOpcode; 38 unsigned char UseOpNo; 39 MachineOperand::MachineOperandType Kind; 40 bool Commuted; 41 42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 43 bool Commuted_ = false, 44 int ShrinkOp = -1) : 45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 46 Kind(FoldOp->getType()), 47 Commuted(Commuted_) { 48 if (FoldOp->isImm()) { 49 ImmToFold = FoldOp->getImm(); 50 } else if (FoldOp->isFI()) { 51 FrameIndexToFold = FoldOp->getIndex(); 52 } else { 53 assert(FoldOp->isReg()); 54 OpToFold = FoldOp; 55 } 56 } 57 58 bool isFI() const { 59 return Kind == MachineOperand::MO_FrameIndex; 60 } 61 62 bool isImm() const { 63 return Kind == MachineOperand::MO_Immediate; 64 } 65 66 bool isReg() const { 67 return Kind == MachineOperand::MO_Register; 68 } 69 70 bool isCommuted() const { 71 return Commuted; 72 } 73 74 bool needsShrink() const { 75 return ShrinkOpcode != -1; 76 } 77 78 int getShrinkOpcode() const { 79 return ShrinkOpcode; 80 } 81 }; 82 83 class SIFoldOperands : public MachineFunctionPass { 84 public: 85 static char ID; 86 MachineRegisterInfo *MRI; 87 const SIInstrInfo *TII; 88 const SIRegisterInfo *TRI; 89 const GCNSubtarget *ST; 90 91 void foldOperand(MachineOperand &OpToFold, 92 MachineInstr *UseMI, 93 unsigned UseOpIdx, 94 SmallVectorImpl<FoldCandidate> &FoldList, 95 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 96 97 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 98 99 const MachineOperand *isClamp(const MachineInstr &MI) const; 100 bool tryFoldClamp(MachineInstr &MI); 101 102 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 103 bool tryFoldOMod(MachineInstr &MI); 104 105 public: 106 SIFoldOperands() : MachineFunctionPass(ID) { 107 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 108 } 109 110 bool runOnMachineFunction(MachineFunction &MF) override; 111 112 StringRef getPassName() const override { return "SI Fold Operands"; } 113 114 void getAnalysisUsage(AnalysisUsage &AU) const override { 115 AU.setPreservesCFG(); 116 MachineFunctionPass::getAnalysisUsage(AU); 117 } 118 }; 119 120 } // End anonymous namespace. 121 122 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 123 "SI Fold Operands", false, false) 124 125 char SIFoldOperands::ID = 0; 126 127 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 128 129 // Wrapper around isInlineConstant that understands special cases when 130 // instruction types are replaced during operand folding. 131 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 132 const MachineInstr &UseMI, 133 unsigned OpNo, 134 const MachineOperand &OpToFold) { 135 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 136 return true; 137 138 unsigned Opc = UseMI.getOpcode(); 139 switch (Opc) { 140 case AMDGPU::V_MAC_F32_e64: 141 case AMDGPU::V_MAC_F16_e64: 142 case AMDGPU::V_FMAC_F32_e64: { 143 // Special case for mac. Since this is replaced with mad when folded into 144 // src2, we need to check the legality for the final instruction. 145 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 146 if (static_cast<int>(OpNo) == Src2Idx) { 147 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64; 148 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 149 150 unsigned Opc = IsFMA ? 151 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 152 const MCInstrDesc &MadDesc = TII->get(Opc); 153 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 154 } 155 return false; 156 } 157 default: 158 return false; 159 } 160 } 161 162 FunctionPass *llvm::createSIFoldOperandsPass() { 163 return new SIFoldOperands(); 164 } 165 166 static bool updateOperand(FoldCandidate &Fold, 167 const SIInstrInfo &TII, 168 const TargetRegisterInfo &TRI) { 169 MachineInstr *MI = Fold.UseMI; 170 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 171 assert(Old.isReg()); 172 173 if (Fold.isImm()) { 174 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) { 175 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is 176 // already set. 177 unsigned Opcode = MI->getOpcode(); 178 int OpNo = MI->getOperandNo(&Old); 179 int ModIdx = -1; 180 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 181 ModIdx = AMDGPU::OpName::src0_modifiers; 182 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) 183 ModIdx = AMDGPU::OpName::src1_modifiers; 184 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) 185 ModIdx = AMDGPU::OpName::src2_modifiers; 186 assert(ModIdx != -1); 187 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 188 MachineOperand &Mod = MI->getOperand(ModIdx); 189 unsigned Val = Mod.getImm(); 190 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) 191 return false; 192 // Only apply the following transformation if that operand requries 193 // a packed immediate. 194 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { 195 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 196 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 197 // If upper part is all zero we do not need op_sel_hi. 198 if (!isUInt<16>(Fold.ImmToFold)) { 199 if (!(Fold.ImmToFold & 0xffff)) { 200 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); 201 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 202 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff); 203 return true; 204 } 205 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 206 } 207 break; 208 default: 209 break; 210 } 211 } 212 213 if (Fold.needsShrink()) { 214 MachineBasicBlock *MBB = MI->getParent(); 215 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI); 216 if (Liveness != MachineBasicBlock::LQR_Dead) 217 return false; 218 219 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 220 int Op32 = Fold.getShrinkOpcode(); 221 MachineOperand &Dst0 = MI->getOperand(0); 222 MachineOperand &Dst1 = MI->getOperand(1); 223 assert(Dst0.isDef() && Dst1.isDef()); 224 225 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); 226 227 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); 228 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC); 229 const TargetRegisterClass *Dst1RC = MRI.getRegClass(Dst1.getReg()); 230 unsigned NewReg1 = MRI.createVirtualRegister(Dst1RC); 231 232 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); 233 234 if (HaveNonDbgCarryUse) { 235 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) 236 .addReg(AMDGPU::VCC, RegState::Kill); 237 } 238 239 // Keep the old instruction around to avoid breaking iterators, but 240 // replace the outputs with dummy registers. 241 Dst0.setReg(NewReg0); 242 Dst1.setReg(NewReg1); 243 244 if (Fold.isCommuted()) 245 TII.commuteInstruction(*Inst32, false); 246 return true; 247 } 248 249 Old.ChangeToImmediate(Fold.ImmToFold); 250 return true; 251 } 252 253 assert(!Fold.needsShrink() && "not handled"); 254 255 if (Fold.isFI()) { 256 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 257 return true; 258 } 259 260 MachineOperand *New = Fold.OpToFold; 261 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && 262 TargetRegisterInfo::isVirtualRegister(New->getReg())) { 263 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 264 265 Old.setIsUndef(New->isUndef()); 266 return true; 267 } 268 269 // FIXME: Handle physical registers. 270 271 return false; 272 } 273 274 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 275 const MachineInstr *MI) { 276 for (auto Candidate : FoldList) { 277 if (Candidate.UseMI == MI) 278 return true; 279 } 280 return false; 281 } 282 283 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 284 MachineInstr *MI, unsigned OpNo, 285 MachineOperand *OpToFold, 286 const SIInstrInfo *TII) { 287 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 288 289 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 290 unsigned Opc = MI->getOpcode(); 291 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 292 Opc == AMDGPU::V_FMAC_F32_e64) && 293 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 294 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64; 295 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 296 unsigned NewOpc = IsFMA ? 297 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 298 299 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 300 // to fold the operand. 301 MI->setDesc(TII->get(NewOpc)); 302 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 303 if (FoldAsMAD) { 304 MI->untieRegOperand(OpNo); 305 return true; 306 } 307 MI->setDesc(TII->get(Opc)); 308 } 309 310 // Special case for s_setreg_b32 311 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 312 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 313 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 314 return true; 315 } 316 317 // If we are already folding into another operand of MI, then 318 // we can't commute the instruction, otherwise we risk making the 319 // other fold illegal. 320 if (isUseMIInFoldList(FoldList, MI)) 321 return false; 322 323 unsigned CommuteOpNo = OpNo; 324 325 // Operand is not legal, so try to commute the instruction to 326 // see if this makes it possible to fold. 327 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 328 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 329 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 330 331 if (CanCommute) { 332 if (CommuteIdx0 == OpNo) 333 CommuteOpNo = CommuteIdx1; 334 else if (CommuteIdx1 == OpNo) 335 CommuteOpNo = CommuteIdx0; 336 } 337 338 339 // One of operands might be an Imm operand, and OpNo may refer to it after 340 // the call of commuteInstruction() below. Such situations are avoided 341 // here explicitly as OpNo must be a register operand to be a candidate 342 // for memory folding. 343 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 344 !MI->getOperand(CommuteIdx1).isReg())) 345 return false; 346 347 if (!CanCommute || 348 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 349 return false; 350 351 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 352 if ((Opc == AMDGPU::V_ADD_I32_e64 || 353 Opc == AMDGPU::V_SUB_I32_e64 || 354 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME 355 OpToFold->isImm()) { 356 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 357 358 // Verify the other operand is a VGPR, otherwise we would violate the 359 // constant bus restriction. 360 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0; 361 MachineOperand &OtherOp = MI->getOperand(OtherIdx); 362 if (!OtherOp.isReg() || 363 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg())) 364 return false; 365 366 assert(MI->getOperand(1).isDef()); 367 368 int Op32 = AMDGPU::getVOPe32(Opc); 369 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true, 370 Op32)); 371 return true; 372 } 373 374 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 375 return false; 376 } 377 378 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true)); 379 return true; 380 } 381 382 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 383 return true; 384 } 385 386 // If the use operand doesn't care about the value, this may be an operand only 387 // used for register indexing, in which case it is unsafe to fold. 388 static bool isUseSafeToFold(const SIInstrInfo *TII, 389 const MachineInstr &MI, 390 const MachineOperand &UseMO) { 391 return !UseMO.isUndef() && !TII->isSDWA(MI); 392 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 393 } 394 395 void SIFoldOperands::foldOperand( 396 MachineOperand &OpToFold, 397 MachineInstr *UseMI, 398 unsigned UseOpIdx, 399 SmallVectorImpl<FoldCandidate> &FoldList, 400 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 401 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 402 403 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 404 return; 405 406 // FIXME: Fold operands with subregs. 407 if (UseOp.isReg() && OpToFold.isReg()) { 408 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 409 return; 410 411 // Don't fold subregister extracts into tied operands, only if it is a full 412 // copy since a subregister use tied to a full register def doesn't really 413 // make sense. e.g. don't fold: 414 // 415 // %1 = COPY %0:sub1 416 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0> 417 // 418 // into 419 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0> 420 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 421 return; 422 } 423 424 // Special case for REG_SEQUENCE: We can't fold literals into 425 // REG_SEQUENCE instructions, so we have to fold them into the 426 // uses of REG_SEQUENCE. 427 if (UseMI->isRegSequence()) { 428 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 429 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 430 431 for (MachineRegisterInfo::use_iterator 432 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 433 RSUse != RSE; ++RSUse) { 434 435 MachineInstr *RSUseMI = RSUse->getParent(); 436 if (RSUse->getSubReg() != RegSeqDstSubReg) 437 continue; 438 439 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 440 CopiesToReplace); 441 } 442 443 return; 444 } 445 446 447 bool FoldingImm = OpToFold.isImm(); 448 449 if (FoldingImm && UseMI->isCopy()) { 450 unsigned DestReg = UseMI->getOperand(0).getReg(); 451 const TargetRegisterClass *DestRC 452 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 453 MRI->getRegClass(DestReg) : 454 TRI->getPhysRegClass(DestReg); 455 456 unsigned SrcReg = UseMI->getOperand(1).getReg(); 457 if (TargetRegisterInfo::isVirtualRegister(DestReg) && 458 TargetRegisterInfo::isVirtualRegister(SrcReg)) { 459 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); 460 if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) { 461 MachineRegisterInfo::use_iterator NextUse; 462 SmallVector<FoldCandidate, 4> CopyUses; 463 for (MachineRegisterInfo::use_iterator 464 Use = MRI->use_begin(DestReg), E = MRI->use_end(); 465 Use != E; Use = NextUse) { 466 NextUse = std::next(Use); 467 FoldCandidate FC = FoldCandidate(Use->getParent(), 468 Use.getOperandNo(), &UseMI->getOperand(1)); 469 CopyUses.push_back(FC); 470 } 471 for (auto & F : CopyUses) { 472 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, 473 FoldList, CopiesToReplace); 474 } 475 } 476 } 477 478 // In order to fold immediates into copies, we need to change the 479 // copy to a MOV. 480 481 unsigned MovOp = TII->getMovOpcode(DestRC); 482 if (MovOp == AMDGPU::COPY) 483 return; 484 485 UseMI->setDesc(TII->get(MovOp)); 486 CopiesToReplace.push_back(UseMI); 487 } else { 488 if (UseMI->isCopy() && OpToFold.isReg() && 489 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) && 490 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) && 491 TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 492 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) && 493 !UseMI->getOperand(1).getSubReg()) { 494 UseMI->getOperand(1).setReg(OpToFold.getReg()); 495 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 496 UseMI->getOperand(1).setIsKill(false); 497 CopiesToReplace.push_back(UseMI); 498 OpToFold.setIsKill(false); 499 return; 500 } 501 502 const MCInstrDesc &UseDesc = UseMI->getDesc(); 503 504 // Don't fold into target independent nodes. Target independent opcodes 505 // don't have defined register classes. 506 if (UseDesc.isVariadic() || 507 UseOp.isImplicit() || 508 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 509 return; 510 } 511 512 if (!FoldingImm) { 513 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 514 515 // FIXME: We could try to change the instruction from 64-bit to 32-bit 516 // to enable more folding opportunites. The shrink operands pass 517 // already does this. 518 return; 519 } 520 521 522 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 523 const TargetRegisterClass *FoldRC = 524 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 525 526 527 // Split 64-bit constants into 32-bits for folding. 528 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 529 unsigned UseReg = UseOp.getReg(); 530 const TargetRegisterClass *UseRC 531 = TargetRegisterInfo::isVirtualRegister(UseReg) ? 532 MRI->getRegClass(UseReg) : 533 TRI->getPhysRegClass(UseReg); 534 535 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 536 return; 537 538 APInt Imm(64, OpToFold.getImm()); 539 if (UseOp.getSubReg() == AMDGPU::sub0) { 540 Imm = Imm.getLoBits(32); 541 } else { 542 assert(UseOp.getSubReg() == AMDGPU::sub1); 543 Imm = Imm.getHiBits(32); 544 } 545 546 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 547 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 548 return; 549 } 550 551 552 553 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 554 } 555 556 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 557 uint32_t LHS, uint32_t RHS) { 558 switch (Opcode) { 559 case AMDGPU::V_AND_B32_e64: 560 case AMDGPU::V_AND_B32_e32: 561 case AMDGPU::S_AND_B32: 562 Result = LHS & RHS; 563 return true; 564 case AMDGPU::V_OR_B32_e64: 565 case AMDGPU::V_OR_B32_e32: 566 case AMDGPU::S_OR_B32: 567 Result = LHS | RHS; 568 return true; 569 case AMDGPU::V_XOR_B32_e64: 570 case AMDGPU::V_XOR_B32_e32: 571 case AMDGPU::S_XOR_B32: 572 Result = LHS ^ RHS; 573 return true; 574 case AMDGPU::V_LSHL_B32_e64: 575 case AMDGPU::V_LSHL_B32_e32: 576 case AMDGPU::S_LSHL_B32: 577 // The instruction ignores the high bits for out of bounds shifts. 578 Result = LHS << (RHS & 31); 579 return true; 580 case AMDGPU::V_LSHLREV_B32_e64: 581 case AMDGPU::V_LSHLREV_B32_e32: 582 Result = RHS << (LHS & 31); 583 return true; 584 case AMDGPU::V_LSHR_B32_e64: 585 case AMDGPU::V_LSHR_B32_e32: 586 case AMDGPU::S_LSHR_B32: 587 Result = LHS >> (RHS & 31); 588 return true; 589 case AMDGPU::V_LSHRREV_B32_e64: 590 case AMDGPU::V_LSHRREV_B32_e32: 591 Result = RHS >> (LHS & 31); 592 return true; 593 case AMDGPU::V_ASHR_I32_e64: 594 case AMDGPU::V_ASHR_I32_e32: 595 case AMDGPU::S_ASHR_I32: 596 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 597 return true; 598 case AMDGPU::V_ASHRREV_I32_e64: 599 case AMDGPU::V_ASHRREV_I32_e32: 600 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 601 return true; 602 default: 603 return false; 604 } 605 } 606 607 static unsigned getMovOpc(bool IsScalar) { 608 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 609 } 610 611 /// Remove any leftover implicit operands from mutating the instruction. e.g. 612 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 613 /// anymore. 614 static void stripExtraCopyOperands(MachineInstr &MI) { 615 const MCInstrDesc &Desc = MI.getDesc(); 616 unsigned NumOps = Desc.getNumOperands() + 617 Desc.getNumImplicitUses() + 618 Desc.getNumImplicitDefs(); 619 620 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 621 MI.RemoveOperand(I); 622 } 623 624 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 625 MI.setDesc(NewDesc); 626 stripExtraCopyOperands(MI); 627 } 628 629 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 630 MachineOperand &Op) { 631 if (Op.isReg()) { 632 // If this has a subregister, it obviously is a register source. 633 if (Op.getSubReg() != AMDGPU::NoSubRegister || 634 !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 635 return &Op; 636 637 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 638 if (Def && Def->isMoveImmediate()) { 639 MachineOperand &ImmSrc = Def->getOperand(1); 640 if (ImmSrc.isImm()) 641 return &ImmSrc; 642 } 643 } 644 645 return &Op; 646 } 647 648 // Try to simplify operations with a constant that may appear after instruction 649 // selection. 650 // TODO: See if a frame index with a fixed offset can fold. 651 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 652 const SIInstrInfo *TII, 653 MachineInstr *MI, 654 MachineOperand *ImmOp) { 655 unsigned Opc = MI->getOpcode(); 656 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 657 Opc == AMDGPU::S_NOT_B32) { 658 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 659 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 660 return true; 661 } 662 663 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 664 if (Src1Idx == -1) 665 return false; 666 667 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 668 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 669 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 670 671 if (!Src0->isImm() && !Src1->isImm()) 672 return false; 673 674 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) { 675 if (Src0->isImm() && Src0->getImm() == 0) { 676 // v_lshl_or_b32 0, X, Y -> copy Y 677 // v_lshl_or_b32 0, X, K -> v_mov_b32 K 678 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg(); 679 MI->RemoveOperand(Src1Idx); 680 MI->RemoveOperand(Src0Idx); 681 682 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32)); 683 return true; 684 } 685 } 686 687 // and k0, k1 -> v_mov_b32 (k0 & k1) 688 // or k0, k1 -> v_mov_b32 (k0 | k1) 689 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 690 if (Src0->isImm() && Src1->isImm()) { 691 int32_t NewImm; 692 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 693 return false; 694 695 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 696 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 697 698 // Be careful to change the right operand, src0 may belong to a different 699 // instruction. 700 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 701 MI->RemoveOperand(Src1Idx); 702 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 703 return true; 704 } 705 706 if (!MI->isCommutable()) 707 return false; 708 709 if (Src0->isImm() && !Src1->isImm()) { 710 std::swap(Src0, Src1); 711 std::swap(Src0Idx, Src1Idx); 712 } 713 714 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 715 if (Opc == AMDGPU::V_OR_B32_e64 || 716 Opc == AMDGPU::V_OR_B32_e32 || 717 Opc == AMDGPU::S_OR_B32) { 718 if (Src1Val == 0) { 719 // y = or x, 0 => y = copy x 720 MI->RemoveOperand(Src1Idx); 721 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 722 } else if (Src1Val == -1) { 723 // y = or x, -1 => y = v_mov_b32 -1 724 MI->RemoveOperand(Src1Idx); 725 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 726 } else 727 return false; 728 729 return true; 730 } 731 732 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 733 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 734 MI->getOpcode() == AMDGPU::S_AND_B32) { 735 if (Src1Val == 0) { 736 // y = and x, 0 => y = v_mov_b32 0 737 MI->RemoveOperand(Src0Idx); 738 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 739 } else if (Src1Val == -1) { 740 // y = and x, -1 => y = copy x 741 MI->RemoveOperand(Src1Idx); 742 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 743 stripExtraCopyOperands(*MI); 744 } else 745 return false; 746 747 return true; 748 } 749 750 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 751 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 752 MI->getOpcode() == AMDGPU::S_XOR_B32) { 753 if (Src1Val == 0) { 754 // y = xor x, 0 => y = copy x 755 MI->RemoveOperand(Src1Idx); 756 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 757 return true; 758 } 759 } 760 761 return false; 762 } 763 764 // Try to fold an instruction into a simpler one 765 static bool tryFoldInst(const SIInstrInfo *TII, 766 MachineInstr *MI) { 767 unsigned Opc = MI->getOpcode(); 768 769 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 770 Opc == AMDGPU::V_CNDMASK_B32_e64 || 771 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 772 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 773 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 774 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 775 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 776 if (Src1->isIdenticalTo(*Src0) && 777 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) && 778 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) { 779 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into "); 780 auto &NewDesc = 781 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 782 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 783 if (Src2Idx != -1) 784 MI->RemoveOperand(Src2Idx); 785 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 786 if (Src1ModIdx != -1) 787 MI->RemoveOperand(Src1ModIdx); 788 if (Src0ModIdx != -1) 789 MI->RemoveOperand(Src0ModIdx); 790 mutateCopyOp(*MI, NewDesc); 791 LLVM_DEBUG(dbgs() << *MI << '\n'); 792 return true; 793 } 794 } 795 796 return false; 797 } 798 799 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 800 MachineOperand &OpToFold) const { 801 // We need mutate the operands of new mov instructions to add implicit 802 // uses of EXEC, but adding them invalidates the use_iterator, so defer 803 // this. 804 SmallVector<MachineInstr *, 4> CopiesToReplace; 805 SmallVector<FoldCandidate, 4> FoldList; 806 MachineOperand &Dst = MI.getOperand(0); 807 808 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 809 if (FoldingImm) { 810 unsigned NumLiteralUses = 0; 811 MachineOperand *NonInlineUse = nullptr; 812 int NonInlineUseOpNo = -1; 813 814 MachineRegisterInfo::use_iterator NextUse; 815 for (MachineRegisterInfo::use_iterator 816 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 817 Use != E; Use = NextUse) { 818 NextUse = std::next(Use); 819 MachineInstr *UseMI = Use->getParent(); 820 unsigned OpNo = Use.getOperandNo(); 821 822 // Folding the immediate may reveal operations that can be constant 823 // folded or replaced with a copy. This can happen for example after 824 // frame indices are lowered to constants or from splitting 64-bit 825 // constants. 826 // 827 // We may also encounter cases where one or both operands are 828 // immediates materialized into a register, which would ordinarily not 829 // be folded due to multiple uses or operand constraints. 830 831 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 832 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n'); 833 834 // Some constant folding cases change the same immediate's use to a new 835 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 836 // again. The same constant folded instruction could also have a second 837 // use operand. 838 NextUse = MRI->use_begin(Dst.getReg()); 839 FoldList.clear(); 840 continue; 841 } 842 843 // Try to fold any inline immediate uses, and then only fold other 844 // constants if they have one use. 845 // 846 // The legality of the inline immediate must be checked based on the use 847 // operand, not the defining instruction, because 32-bit instructions 848 // with 32-bit inline immediate sources may be used to materialize 849 // constants used in 16-bit operands. 850 // 851 // e.g. it is unsafe to fold: 852 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 853 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 854 855 // Folding immediates with more than one use will increase program size. 856 // FIXME: This will also reduce register usage, which may be better 857 // in some cases. A better heuristic is needed. 858 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 859 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 860 } else { 861 if (++NumLiteralUses == 1) { 862 NonInlineUse = &*Use; 863 NonInlineUseOpNo = OpNo; 864 } 865 } 866 } 867 868 if (NumLiteralUses == 1) { 869 MachineInstr *UseMI = NonInlineUse->getParent(); 870 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 871 } 872 } else { 873 // Folding register. 874 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess; 875 for (MachineRegisterInfo::use_iterator 876 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 877 Use != E; ++Use) { 878 UsesToProcess.push_back(Use); 879 } 880 for (auto U : UsesToProcess) { 881 MachineInstr *UseMI = U->getParent(); 882 883 foldOperand(OpToFold, UseMI, U.getOperandNo(), 884 FoldList, CopiesToReplace); 885 } 886 } 887 888 MachineFunction *MF = MI.getParent()->getParent(); 889 // Make sure we add EXEC uses to any new v_mov instructions created. 890 for (MachineInstr *Copy : CopiesToReplace) 891 Copy->addImplicitDefUseOperands(*MF); 892 893 for (FoldCandidate &Fold : FoldList) { 894 if (updateOperand(Fold, *TII, *TRI)) { 895 // Clear kill flags. 896 if (Fold.isReg()) { 897 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 898 // FIXME: Probably shouldn't bother trying to fold if not an 899 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 900 // copies. 901 MRI->clearKillFlags(Fold.OpToFold->getReg()); 902 } 903 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 904 << static_cast<int>(Fold.UseOpNo) << " of " 905 << *Fold.UseMI << '\n'); 906 tryFoldInst(TII, Fold.UseMI); 907 } else if (Fold.isCommuted()) { 908 // Restoring instruction's original operand order if fold has failed. 909 TII->commuteInstruction(*Fold.UseMI, false); 910 } 911 } 912 } 913 914 // Clamp patterns are canonically selected to v_max_* instructions, so only 915 // handle them. 916 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 917 unsigned Op = MI.getOpcode(); 918 switch (Op) { 919 case AMDGPU::V_MAX_F32_e64: 920 case AMDGPU::V_MAX_F16_e64: 921 case AMDGPU::V_MAX_F64: 922 case AMDGPU::V_PK_MAX_F16: { 923 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 924 return nullptr; 925 926 // Make sure sources are identical. 927 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 928 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 929 if (!Src0->isReg() || !Src1->isReg() || 930 Src0->getReg() != Src1->getReg() || 931 Src0->getSubReg() != Src1->getSubReg() || 932 Src0->getSubReg() != AMDGPU::NoSubRegister) 933 return nullptr; 934 935 // Can't fold up if we have modifiers. 936 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 937 return nullptr; 938 939 unsigned Src0Mods 940 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 941 unsigned Src1Mods 942 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 943 944 // Having a 0 op_sel_hi would require swizzling the output in the source 945 // instruction, which we can't do. 946 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 947 : 0u; 948 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 949 return nullptr; 950 return Src0; 951 } 952 default: 953 return nullptr; 954 } 955 } 956 957 // We obviously have multiple uses in a clamp since the register is used twice 958 // in the same instruction. 959 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 960 int Count = 0; 961 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 962 I != E; ++I) { 963 if (++Count > 1) 964 return false; 965 } 966 967 return true; 968 } 969 970 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 971 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 972 const MachineOperand *ClampSrc = isClamp(MI); 973 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 974 return false; 975 976 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 977 978 // The type of clamp must be compatible. 979 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 980 return false; 981 982 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 983 if (!DefClamp) 984 return false; 985 986 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def 987 << '\n'); 988 989 // Clamp is applied after omod, so it is OK if omod is set. 990 DefClamp->setImm(1); 991 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 992 MI.eraseFromParent(); 993 return true; 994 } 995 996 static int getOModValue(unsigned Opc, int64_t Val) { 997 switch (Opc) { 998 case AMDGPU::V_MUL_F32_e64: { 999 switch (static_cast<uint32_t>(Val)) { 1000 case 0x3f000000: // 0.5 1001 return SIOutMods::DIV2; 1002 case 0x40000000: // 2.0 1003 return SIOutMods::MUL2; 1004 case 0x40800000: // 4.0 1005 return SIOutMods::MUL4; 1006 default: 1007 return SIOutMods::NONE; 1008 } 1009 } 1010 case AMDGPU::V_MUL_F16_e64: { 1011 switch (static_cast<uint16_t>(Val)) { 1012 case 0x3800: // 0.5 1013 return SIOutMods::DIV2; 1014 case 0x4000: // 2.0 1015 return SIOutMods::MUL2; 1016 case 0x4400: // 4.0 1017 return SIOutMods::MUL4; 1018 default: 1019 return SIOutMods::NONE; 1020 } 1021 } 1022 default: 1023 llvm_unreachable("invalid mul opcode"); 1024 } 1025 } 1026 1027 // FIXME: Does this really not support denormals with f16? 1028 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 1029 // handled, so will anything other than that break? 1030 std::pair<const MachineOperand *, int> 1031 SIFoldOperands::isOMod(const MachineInstr &MI) const { 1032 unsigned Op = MI.getOpcode(); 1033 switch (Op) { 1034 case AMDGPU::V_MUL_F32_e64: 1035 case AMDGPU::V_MUL_F16_e64: { 1036 // If output denormals are enabled, omod is ignored. 1037 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) || 1038 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals())) 1039 return std::make_pair(nullptr, SIOutMods::NONE); 1040 1041 const MachineOperand *RegOp = nullptr; 1042 const MachineOperand *ImmOp = nullptr; 1043 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1044 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1045 if (Src0->isImm()) { 1046 ImmOp = Src0; 1047 RegOp = Src1; 1048 } else if (Src1->isImm()) { 1049 ImmOp = Src1; 1050 RegOp = Src0; 1051 } else 1052 return std::make_pair(nullptr, SIOutMods::NONE); 1053 1054 int OMod = getOModValue(Op, ImmOp->getImm()); 1055 if (OMod == SIOutMods::NONE || 1056 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 1057 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 1058 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 1059 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1060 return std::make_pair(nullptr, SIOutMods::NONE); 1061 1062 return std::make_pair(RegOp, OMod); 1063 } 1064 case AMDGPU::V_ADD_F32_e64: 1065 case AMDGPU::V_ADD_F16_e64: { 1066 // If output denormals are enabled, omod is ignored. 1067 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) || 1068 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals())) 1069 return std::make_pair(nullptr, SIOutMods::NONE); 1070 1071 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 1072 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1073 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1074 1075 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 1076 Src0->getSubReg() == Src1->getSubReg() && 1077 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 1078 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 1079 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 1080 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1081 return std::make_pair(Src0, SIOutMods::MUL2); 1082 1083 return std::make_pair(nullptr, SIOutMods::NONE); 1084 } 1085 default: 1086 return std::make_pair(nullptr, SIOutMods::NONE); 1087 } 1088 } 1089 1090 // FIXME: Does this need to check IEEE bit on function? 1091 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 1092 const MachineOperand *RegOp; 1093 int OMod; 1094 std::tie(RegOp, OMod) = isOMod(MI); 1095 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 1096 RegOp->getSubReg() != AMDGPU::NoSubRegister || 1097 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 1098 return false; 1099 1100 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 1101 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 1102 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 1103 return false; 1104 1105 // Clamp is applied after omod. If the source already has clamp set, don't 1106 // fold it. 1107 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 1108 return false; 1109 1110 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 1111 1112 DefOMod->setImm(OMod); 1113 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1114 MI.eraseFromParent(); 1115 return true; 1116 } 1117 1118 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 1119 if (skipFunction(MF.getFunction())) 1120 return false; 1121 1122 MRI = &MF.getRegInfo(); 1123 ST = &MF.getSubtarget<GCNSubtarget>(); 1124 TII = ST->getInstrInfo(); 1125 TRI = &TII->getRegisterInfo(); 1126 1127 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1128 1129 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 1130 // correctly handle signed zeros. 1131 // 1132 // FIXME: Also need to check strictfp 1133 bool IsIEEEMode = MFI->getMode().IEEE; 1134 bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 1135 1136 for (MachineBasicBlock *MBB : depth_first(&MF)) { 1137 MachineBasicBlock::iterator I, Next; 1138 for (I = MBB->begin(); I != MBB->end(); I = Next) { 1139 Next = std::next(I); 1140 MachineInstr &MI = *I; 1141 1142 tryFoldInst(TII, &MI); 1143 1144 if (!TII->isFoldableCopy(MI)) { 1145 // TODO: Omod might be OK if there is NSZ only on the source 1146 // instruction, and not the omod multiply. 1147 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 1148 !tryFoldOMod(MI)) 1149 tryFoldClamp(MI); 1150 continue; 1151 } 1152 1153 MachineOperand &OpToFold = MI.getOperand(1); 1154 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 1155 1156 // FIXME: We could also be folding things like TargetIndexes. 1157 if (!FoldingImm && !OpToFold.isReg()) 1158 continue; 1159 1160 if (OpToFold.isReg() && 1161 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 1162 continue; 1163 1164 // Prevent folding operands backwards in the function. For example, 1165 // the COPY opcode must not be replaced by 1 in this example: 1166 // 1167 // %3 = COPY %vgpr0; VGPR_32:%3 1168 // ... 1169 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1170 MachineOperand &Dst = MI.getOperand(0); 1171 if (Dst.isReg() && 1172 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 1173 continue; 1174 1175 foldInstOperand(MI, OpToFold); 1176 } 1177 } 1178 return false; 1179 } 1180