1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// \file 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "SIMachineFunctionInfo.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 struct FoldCandidate { 31 MachineInstr *UseMI; 32 union { 33 MachineOperand *OpToFold; 34 uint64_t ImmToFold; 35 int FrameIndexToFold; 36 }; 37 int ShrinkOpcode; 38 unsigned char UseOpNo; 39 MachineOperand::MachineOperandType Kind; 40 bool Commuted; 41 42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 43 bool Commuted_ = false, 44 int ShrinkOp = -1) : 45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 46 Kind(FoldOp->getType()), 47 Commuted(Commuted_) { 48 if (FoldOp->isImm()) { 49 ImmToFold = FoldOp->getImm(); 50 } else if (FoldOp->isFI()) { 51 FrameIndexToFold = FoldOp->getIndex(); 52 } else { 53 assert(FoldOp->isReg() || FoldOp->isGlobal()); 54 OpToFold = FoldOp; 55 } 56 } 57 58 bool isFI() const { 59 return Kind == MachineOperand::MO_FrameIndex; 60 } 61 62 bool isImm() const { 63 return Kind == MachineOperand::MO_Immediate; 64 } 65 66 bool isReg() const { 67 return Kind == MachineOperand::MO_Register; 68 } 69 70 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; } 71 72 bool isCommuted() const { 73 return Commuted; 74 } 75 76 bool needsShrink() const { 77 return ShrinkOpcode != -1; 78 } 79 80 int getShrinkOpcode() const { 81 return ShrinkOpcode; 82 } 83 }; 84 85 class SIFoldOperands : public MachineFunctionPass { 86 public: 87 static char ID; 88 MachineRegisterInfo *MRI; 89 const SIInstrInfo *TII; 90 const SIRegisterInfo *TRI; 91 const GCNSubtarget *ST; 92 const SIMachineFunctionInfo *MFI; 93 94 void foldOperand(MachineOperand &OpToFold, 95 MachineInstr *UseMI, 96 int UseOpIdx, 97 SmallVectorImpl<FoldCandidate> &FoldList, 98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 99 100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 101 102 const MachineOperand *isClamp(const MachineInstr &MI) const; 103 bool tryFoldClamp(MachineInstr &MI); 104 105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 106 bool tryFoldOMod(MachineInstr &MI); 107 108 public: 109 SIFoldOperands() : MachineFunctionPass(ID) { 110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 111 } 112 113 bool runOnMachineFunction(MachineFunction &MF) override; 114 115 StringRef getPassName() const override { return "SI Fold Operands"; } 116 117 void getAnalysisUsage(AnalysisUsage &AU) const override { 118 AU.setPreservesCFG(); 119 MachineFunctionPass::getAnalysisUsage(AU); 120 } 121 }; 122 123 } // End anonymous namespace. 124 125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 126 "SI Fold Operands", false, false) 127 128 char SIFoldOperands::ID = 0; 129 130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 131 132 // Wrapper around isInlineConstant that understands special cases when 133 // instruction types are replaced during operand folding. 134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 135 const MachineInstr &UseMI, 136 unsigned OpNo, 137 const MachineOperand &OpToFold) { 138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 139 return true; 140 141 unsigned Opc = UseMI.getOpcode(); 142 switch (Opc) { 143 case AMDGPU::V_MAC_F32_e64: 144 case AMDGPU::V_MAC_F16_e64: 145 case AMDGPU::V_FMAC_F32_e64: 146 case AMDGPU::V_FMAC_F16_e64: { 147 // Special case for mac. Since this is replaced with mad when folded into 148 // src2, we need to check the legality for the final instruction. 149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 150 if (static_cast<int>(OpNo) == Src2Idx) { 151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 152 Opc == AMDGPU::V_FMAC_F16_e64; 153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 154 Opc == AMDGPU::V_FMAC_F32_e64; 155 156 unsigned Opc = IsFMA ? 157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 158 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 159 const MCInstrDesc &MadDesc = TII->get(Opc); 160 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 161 } 162 return false; 163 } 164 default: 165 return false; 166 } 167 } 168 169 // TODO: Add heuristic that the frame index might not fit in the addressing mode 170 // immediate offset to avoid materializing in loops. 171 static bool frameIndexMayFold(const SIInstrInfo *TII, 172 const MachineInstr &UseMI, 173 int OpNo, 174 const MachineOperand &OpToFold) { 175 return OpToFold.isFI() && 176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) && 177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr); 178 } 179 180 FunctionPass *llvm::createSIFoldOperandsPass() { 181 return new SIFoldOperands(); 182 } 183 184 static bool updateOperand(FoldCandidate &Fold, 185 const SIInstrInfo &TII, 186 const TargetRegisterInfo &TRI, 187 const GCNSubtarget &ST) { 188 MachineInstr *MI = Fold.UseMI; 189 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 190 assert(Old.isReg()); 191 192 if (Fold.isImm()) { 193 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked && 194 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) && 195 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold), 196 ST.hasInv2PiInlineImm())) { 197 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is 198 // already set. 199 unsigned Opcode = MI->getOpcode(); 200 int OpNo = MI->getOperandNo(&Old); 201 int ModIdx = -1; 202 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 203 ModIdx = AMDGPU::OpName::src0_modifiers; 204 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) 205 ModIdx = AMDGPU::OpName::src1_modifiers; 206 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) 207 ModIdx = AMDGPU::OpName::src2_modifiers; 208 assert(ModIdx != -1); 209 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 210 MachineOperand &Mod = MI->getOperand(ModIdx); 211 unsigned Val = Mod.getImm(); 212 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) 213 return false; 214 // Only apply the following transformation if that operand requries 215 // a packed immediate. 216 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { 217 case AMDGPU::OPERAND_REG_IMM_V2FP16: 218 case AMDGPU::OPERAND_REG_IMM_V2INT16: 219 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 220 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 221 // If upper part is all zero we do not need op_sel_hi. 222 if (!isUInt<16>(Fold.ImmToFold)) { 223 if (!(Fold.ImmToFold & 0xffff)) { 224 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); 225 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 226 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff); 227 return true; 228 } 229 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 230 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff); 231 return true; 232 } 233 break; 234 default: 235 break; 236 } 237 } 238 } 239 240 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) { 241 MachineBasicBlock *MBB = MI->getParent(); 242 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16); 243 if (Liveness != MachineBasicBlock::LQR_Dead) { 244 LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n"); 245 return false; 246 } 247 248 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 249 int Op32 = Fold.getShrinkOpcode(); 250 MachineOperand &Dst0 = MI->getOperand(0); 251 MachineOperand &Dst1 = MI->getOperand(1); 252 assert(Dst0.isDef() && Dst1.isDef()); 253 254 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); 255 256 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); 257 Register NewReg0 = MRI.createVirtualRegister(Dst0RC); 258 259 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); 260 261 if (HaveNonDbgCarryUse) { 262 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) 263 .addReg(AMDGPU::VCC, RegState::Kill); 264 } 265 266 // Keep the old instruction around to avoid breaking iterators, but 267 // replace it with a dummy instruction to remove uses. 268 // 269 // FIXME: We should not invert how this pass looks at operands to avoid 270 // this. Should track set of foldable movs instead of looking for uses 271 // when looking at a use. 272 Dst0.setReg(NewReg0); 273 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I) 274 MI->RemoveOperand(I); 275 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF)); 276 277 if (Fold.isCommuted()) 278 TII.commuteInstruction(*Inst32, false); 279 return true; 280 } 281 282 assert(!Fold.needsShrink() && "not handled"); 283 284 if (Fold.isImm()) { 285 Old.ChangeToImmediate(Fold.ImmToFold); 286 return true; 287 } 288 289 if (Fold.isGlobal()) { 290 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(), 291 Fold.OpToFold->getTargetFlags()); 292 return true; 293 } 294 295 if (Fold.isFI()) { 296 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 297 return true; 298 } 299 300 MachineOperand *New = Fold.OpToFold; 301 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 302 Old.setIsUndef(New->isUndef()); 303 return true; 304 } 305 306 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 307 const MachineInstr *MI) { 308 for (auto Candidate : FoldList) { 309 if (Candidate.UseMI == MI) 310 return true; 311 } 312 return false; 313 } 314 315 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList, 316 MachineInstr *MI, unsigned OpNo, 317 MachineOperand *FoldOp, bool Commuted = false, 318 int ShrinkOp = -1) { 319 // Skip additional folding on the same operand. 320 for (FoldCandidate &Fold : FoldList) 321 if (Fold.UseMI == MI && Fold.UseOpNo == OpNo) 322 return; 323 LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal") 324 << " operand " << OpNo << "\n " << *MI << '\n'); 325 FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp)); 326 } 327 328 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 329 MachineInstr *MI, unsigned OpNo, 330 MachineOperand *OpToFold, 331 const SIInstrInfo *TII) { 332 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 333 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 334 unsigned Opc = MI->getOpcode(); 335 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 336 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) && 337 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 338 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 339 Opc == AMDGPU::V_FMAC_F16_e64; 340 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 341 Opc == AMDGPU::V_FMAC_F32_e64; 342 unsigned NewOpc = IsFMA ? 343 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 344 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 345 346 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 347 // to fold the operand. 348 MI->setDesc(TII->get(NewOpc)); 349 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 350 if (FoldAsMAD) { 351 MI->untieRegOperand(OpNo); 352 return true; 353 } 354 MI->setDesc(TII->get(Opc)); 355 } 356 357 // Special case for s_setreg_b32 358 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 359 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 360 appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 361 return true; 362 } 363 364 // If we are already folding into another operand of MI, then 365 // we can't commute the instruction, otherwise we risk making the 366 // other fold illegal. 367 if (isUseMIInFoldList(FoldList, MI)) 368 return false; 369 370 unsigned CommuteOpNo = OpNo; 371 372 // Operand is not legal, so try to commute the instruction to 373 // see if this makes it possible to fold. 374 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 375 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 376 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 377 378 if (CanCommute) { 379 if (CommuteIdx0 == OpNo) 380 CommuteOpNo = CommuteIdx1; 381 else if (CommuteIdx1 == OpNo) 382 CommuteOpNo = CommuteIdx0; 383 } 384 385 386 // One of operands might be an Imm operand, and OpNo may refer to it after 387 // the call of commuteInstruction() below. Such situations are avoided 388 // here explicitly as OpNo must be a register operand to be a candidate 389 // for memory folding. 390 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 391 !MI->getOperand(CommuteIdx1).isReg())) 392 return false; 393 394 if (!CanCommute || 395 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 396 return false; 397 398 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 399 if ((Opc == AMDGPU::V_ADD_I32_e64 || 400 Opc == AMDGPU::V_SUB_I32_e64 || 401 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME 402 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) { 403 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 404 405 // Verify the other operand is a VGPR, otherwise we would violate the 406 // constant bus restriction. 407 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0; 408 MachineOperand &OtherOp = MI->getOperand(OtherIdx); 409 if (!OtherOp.isReg() || 410 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg())) 411 return false; 412 413 assert(MI->getOperand(1).isDef()); 414 415 // Make sure to get the 32-bit version of the commuted opcode. 416 unsigned MaybeCommutedOpc = MI->getOpcode(); 417 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); 418 419 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32); 420 return true; 421 } 422 423 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 424 return false; 425 } 426 427 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true); 428 return true; 429 } 430 431 // Check the case where we might introduce a second constant operand to a 432 // scalar instruction 433 if (TII->isSALU(MI->getOpcode())) { 434 const MCInstrDesc &InstDesc = MI->getDesc(); 435 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 436 const SIRegisterInfo &SRI = TII->getRegisterInfo(); 437 438 // Fine if the operand can be encoded as an inline constant 439 if (OpToFold->isImm()) { 440 if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) || 441 !TII->isInlineConstant(*OpToFold, OpInfo)) { 442 // Otherwise check for another constant 443 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { 444 auto &Op = MI->getOperand(i); 445 if (OpNo != i && 446 TII->isLiteralConstantLike(Op, OpInfo)) { 447 return false; 448 } 449 } 450 } 451 } 452 } 453 454 appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 455 return true; 456 } 457 458 // If the use operand doesn't care about the value, this may be an operand only 459 // used for register indexing, in which case it is unsafe to fold. 460 static bool isUseSafeToFold(const SIInstrInfo *TII, 461 const MachineInstr &MI, 462 const MachineOperand &UseMO) { 463 return !UseMO.isUndef() && !TII->isSDWA(MI); 464 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 465 } 466 467 // Find a def of the UseReg, check if it is a reg_seqence and find initializers 468 // for each subreg, tracking it to foldable inline immediate if possible. 469 // Returns true on success. 470 static bool getRegSeqInit( 471 SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs, 472 Register UseReg, uint8_t OpTy, 473 const SIInstrInfo *TII, const MachineRegisterInfo &MRI) { 474 MachineInstr *Def = MRI.getUniqueVRegDef(UseReg); 475 if (!Def || !Def->isRegSequence()) 476 return false; 477 478 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) { 479 MachineOperand *Sub = &Def->getOperand(I); 480 assert (Sub->isReg()); 481 482 for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg()); 483 SubDef && Sub->isReg() && !Sub->getSubReg() && 484 TII->isFoldableCopy(*SubDef); 485 SubDef = MRI.getUniqueVRegDef(Sub->getReg())) { 486 MachineOperand *Op = &SubDef->getOperand(1); 487 if (Op->isImm()) { 488 if (TII->isInlineConstant(*Op, OpTy)) 489 Sub = Op; 490 break; 491 } 492 if (!Op->isReg()) 493 break; 494 Sub = Op; 495 } 496 497 Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm())); 498 } 499 500 return true; 501 } 502 503 static bool tryToFoldACImm(const SIInstrInfo *TII, 504 const MachineOperand &OpToFold, 505 MachineInstr *UseMI, 506 unsigned UseOpIdx, 507 SmallVectorImpl<FoldCandidate> &FoldList) { 508 const MCInstrDesc &Desc = UseMI->getDesc(); 509 const MCOperandInfo *OpInfo = Desc.OpInfo; 510 if (!OpInfo || UseOpIdx >= Desc.getNumOperands()) 511 return false; 512 513 uint8_t OpTy = OpInfo[UseOpIdx].OperandType; 514 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST || 515 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) 516 return false; 517 518 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && 519 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { 520 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm()); 521 return true; 522 } 523 524 if (!OpToFold.isReg()) 525 return false; 526 527 Register UseReg = OpToFold.getReg(); 528 if (!Register::isVirtualRegister(UseReg)) 529 return false; 530 531 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) { 532 return FC.UseMI == UseMI; }) != FoldList.end()) 533 return false; 534 535 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo(); 536 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 537 if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI)) 538 return false; 539 540 int32_t Imm; 541 for (unsigned I = 0, E = Defs.size(); I != E; ++I) { 542 const MachineOperand *Op = Defs[I].first; 543 if (!Op->isImm()) 544 return false; 545 546 auto SubImm = Op->getImm(); 547 if (!I) { 548 Imm = SubImm; 549 if (!TII->isInlineConstant(*Op, OpTy) || 550 !TII->isOperandLegal(*UseMI, UseOpIdx, Op)) 551 return false; 552 553 continue; 554 } 555 if (Imm != SubImm) 556 return false; // Can only fold splat constants 557 } 558 559 appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first); 560 return true; 561 } 562 563 void SIFoldOperands::foldOperand( 564 MachineOperand &OpToFold, 565 MachineInstr *UseMI, 566 int UseOpIdx, 567 SmallVectorImpl<FoldCandidate> &FoldList, 568 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 569 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 570 571 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 572 return; 573 574 // FIXME: Fold operands with subregs. 575 if (UseOp.isReg() && OpToFold.isReg()) { 576 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 577 return; 578 } 579 580 // Special case for REG_SEQUENCE: We can't fold literals into 581 // REG_SEQUENCE instructions, so we have to fold them into the 582 // uses of REG_SEQUENCE. 583 if (UseMI->isRegSequence()) { 584 Register RegSeqDstReg = UseMI->getOperand(0).getReg(); 585 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 586 587 MachineRegisterInfo::use_iterator Next; 588 for (MachineRegisterInfo::use_iterator 589 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 590 RSUse != RSE; RSUse = Next) { 591 Next = std::next(RSUse); 592 593 MachineInstr *RSUseMI = RSUse->getParent(); 594 595 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI, 596 RSUse.getOperandNo(), FoldList)) 597 continue; 598 599 if (RSUse->getSubReg() != RegSeqDstSubReg) 600 continue; 601 602 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 603 CopiesToReplace); 604 } 605 606 return; 607 } 608 609 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList)) 610 return; 611 612 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) { 613 // Sanity check that this is a stack access. 614 // FIXME: Should probably use stack pseudos before frame lowering. 615 616 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != 617 MFI->getScratchRSrcReg()) 618 return; 619 620 // Ensure this is either relative to the current frame or the current wave. 621 MachineOperand &SOff = 622 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); 623 if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) && 624 (!SOff.isImm() || SOff.getImm() != 0)) 625 return; 626 627 // A frame index will resolve to a positive constant, so it should always be 628 // safe to fold the addressing mode, even pre-GFX9. 629 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex()); 630 631 // If this is relative to the current wave, update it to be relative to the 632 // current frame. 633 if (SOff.isImm()) 634 SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false); 635 return; 636 } 637 638 bool FoldingImmLike = 639 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 640 641 if (FoldingImmLike && UseMI->isCopy()) { 642 Register DestReg = UseMI->getOperand(0).getReg(); 643 644 // Don't fold into a copy to a physical register. Doing so would interfere 645 // with the register coalescer's logic which would avoid redundant 646 // initalizations. 647 if (DestReg.isPhysical()) 648 return; 649 650 const TargetRegisterClass *DestRC = MRI->getRegClass(DestReg); 651 652 Register SrcReg = UseMI->getOperand(1).getReg(); 653 if (SrcReg.isVirtual()) { // XXX - This can be an assert? 654 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); 655 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { 656 MachineRegisterInfo::use_iterator NextUse; 657 SmallVector<FoldCandidate, 4> CopyUses; 658 for (MachineRegisterInfo::use_iterator 659 Use = MRI->use_begin(DestReg), E = MRI->use_end(); 660 Use != E; Use = NextUse) { 661 NextUse = std::next(Use); 662 FoldCandidate FC = FoldCandidate(Use->getParent(), 663 Use.getOperandNo(), &UseMI->getOperand(1)); 664 CopyUses.push_back(FC); 665 } 666 for (auto & F : CopyUses) { 667 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, 668 FoldList, CopiesToReplace); 669 } 670 } 671 } 672 673 if (DestRC == &AMDGPU::AGPR_32RegClass && 674 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 675 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 676 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 677 CopiesToReplace.push_back(UseMI); 678 return; 679 } 680 681 // In order to fold immediates into copies, we need to change the 682 // copy to a MOV. 683 684 unsigned MovOp = TII->getMovOpcode(DestRC); 685 if (MovOp == AMDGPU::COPY) 686 return; 687 688 UseMI->setDesc(TII->get(MovOp)); 689 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin(); 690 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end(); 691 while (ImpOpI != ImpOpE) { 692 MachineInstr::mop_iterator Tmp = ImpOpI; 693 ImpOpI++; 694 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp)); 695 } 696 CopiesToReplace.push_back(UseMI); 697 } else { 698 if (UseMI->isCopy() && OpToFold.isReg() && 699 UseMI->getOperand(0).getReg().isVirtual() && 700 !UseMI->getOperand(1).getSubReg()) { 701 LLVM_DEBUG(dbgs() << "Folding " << OpToFold 702 << "\n into " << *UseMI << '\n'); 703 unsigned Size = TII->getOpSize(*UseMI, 1); 704 Register UseReg = OpToFold.getReg(); 705 UseMI->getOperand(1).setReg(UseReg); 706 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 707 UseMI->getOperand(1).setIsKill(false); 708 CopiesToReplace.push_back(UseMI); 709 OpToFold.setIsKill(false); 710 711 // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32 712 // can only accept VGPR or inline immediate. Recreate a reg_sequence with 713 // its initializers right here, so we will rematerialize immediates and 714 // avoid copies via different reg classes. 715 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 716 if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 717 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII, 718 *MRI)) { 719 const DebugLoc &DL = UseMI->getDebugLoc(); 720 MachineBasicBlock &MBB = *UseMI->getParent(); 721 722 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); 723 for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I) 724 UseMI->RemoveOperand(I); 725 726 MachineInstrBuilder B(*MBB.getParent(), UseMI); 727 DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies; 728 SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs; 729 for (unsigned I = 0; I < Size / 4; ++I) { 730 MachineOperand *Def = Defs[I].first; 731 TargetInstrInfo::RegSubRegPair CopyToVGPR; 732 if (Def->isImm() && 733 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 734 int64_t Imm = Def->getImm(); 735 736 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 737 BuildMI(MBB, UseMI, DL, 738 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm); 739 B.addReg(Tmp); 740 } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) { 741 auto Src = getRegSubRegPair(*Def); 742 Def->setIsKill(false); 743 if (!SeenAGPRs.insert(Src)) { 744 // We cannot build a reg_sequence out of the same registers, they 745 // must be copied. Better do it here before copyPhysReg() created 746 // several reads to do the AGPR->VGPR->AGPR copy. 747 CopyToVGPR = Src; 748 } else { 749 B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0, 750 Src.SubReg); 751 } 752 } else { 753 assert(Def->isReg()); 754 Def->setIsKill(false); 755 auto Src = getRegSubRegPair(*Def); 756 757 // Direct copy from SGPR to AGPR is not possible. To avoid creation 758 // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later, 759 // create a copy here and track if we already have such a copy. 760 if (TRI->isSGPRReg(*MRI, Src.Reg)) { 761 CopyToVGPR = Src; 762 } else { 763 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 764 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); 765 B.addReg(Tmp); 766 } 767 } 768 769 if (CopyToVGPR.Reg) { 770 Register Vgpr; 771 if (VGPRCopies.count(CopyToVGPR)) { 772 Vgpr = VGPRCopies[CopyToVGPR]; 773 } else { 774 Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 775 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); 776 VGPRCopies[CopyToVGPR] = Vgpr; 777 } 778 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 779 BuildMI(MBB, UseMI, DL, 780 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr); 781 B.addReg(Tmp); 782 } 783 784 B.addImm(Defs[I].second); 785 } 786 LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n'); 787 return; 788 } 789 790 if (Size != 4) 791 return; 792 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 793 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg())) 794 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 795 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 796 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg())) 797 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32)); 798 return; 799 } 800 801 unsigned UseOpc = UseMI->getOpcode(); 802 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || 803 (UseOpc == AMDGPU::V_READLANE_B32 && 804 (int)UseOpIdx == 805 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 806 // %vgpr = V_MOV_B32 imm 807 // %sgpr = V_READFIRSTLANE_B32 %vgpr 808 // => 809 // %sgpr = S_MOV_B32 imm 810 if (FoldingImmLike) { 811 if (execMayBeModifiedBeforeUse(*MRI, 812 UseMI->getOperand(UseOpIdx).getReg(), 813 *OpToFold.getParent(), 814 *UseMI)) 815 return; 816 817 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 818 819 // FIXME: ChangeToImmediate should clear subreg 820 UseMI->getOperand(1).setSubReg(0); 821 if (OpToFold.isImm()) 822 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 823 else 824 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex()); 825 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 826 return; 827 } 828 829 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) { 830 if (execMayBeModifiedBeforeUse(*MRI, 831 UseMI->getOperand(UseOpIdx).getReg(), 832 *OpToFold.getParent(), 833 *UseMI)) 834 return; 835 836 // %vgpr = COPY %sgpr0 837 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr 838 // => 839 // %sgpr1 = COPY %sgpr0 840 UseMI->setDesc(TII->get(AMDGPU::COPY)); 841 UseMI->getOperand(1).setReg(OpToFold.getReg()); 842 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 843 UseMI->getOperand(1).setIsKill(false); 844 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 845 return; 846 } 847 } 848 849 const MCInstrDesc &UseDesc = UseMI->getDesc(); 850 851 // Don't fold into target independent nodes. Target independent opcodes 852 // don't have defined register classes. 853 if (UseDesc.isVariadic() || 854 UseOp.isImplicit() || 855 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 856 return; 857 } 858 859 if (!FoldingImmLike) { 860 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 861 862 // FIXME: We could try to change the instruction from 64-bit to 32-bit 863 // to enable more folding opportunites. The shrink operands pass 864 // already does this. 865 return; 866 } 867 868 869 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 870 const TargetRegisterClass *FoldRC = 871 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 872 873 // Split 64-bit constants into 32-bits for folding. 874 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 875 Register UseReg = UseOp.getReg(); 876 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); 877 878 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 879 return; 880 881 APInt Imm(64, OpToFold.getImm()); 882 if (UseOp.getSubReg() == AMDGPU::sub0) { 883 Imm = Imm.getLoBits(32); 884 } else { 885 assert(UseOp.getSubReg() == AMDGPU::sub1); 886 Imm = Imm.getHiBits(32); 887 } 888 889 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 890 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 891 return; 892 } 893 894 895 896 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 897 } 898 899 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 900 uint32_t LHS, uint32_t RHS) { 901 switch (Opcode) { 902 case AMDGPU::V_AND_B32_e64: 903 case AMDGPU::V_AND_B32_e32: 904 case AMDGPU::S_AND_B32: 905 Result = LHS & RHS; 906 return true; 907 case AMDGPU::V_OR_B32_e64: 908 case AMDGPU::V_OR_B32_e32: 909 case AMDGPU::S_OR_B32: 910 Result = LHS | RHS; 911 return true; 912 case AMDGPU::V_XOR_B32_e64: 913 case AMDGPU::V_XOR_B32_e32: 914 case AMDGPU::S_XOR_B32: 915 Result = LHS ^ RHS; 916 return true; 917 case AMDGPU::V_LSHL_B32_e64: 918 case AMDGPU::V_LSHL_B32_e32: 919 case AMDGPU::S_LSHL_B32: 920 // The instruction ignores the high bits for out of bounds shifts. 921 Result = LHS << (RHS & 31); 922 return true; 923 case AMDGPU::V_LSHLREV_B32_e64: 924 case AMDGPU::V_LSHLREV_B32_e32: 925 Result = RHS << (LHS & 31); 926 return true; 927 case AMDGPU::V_LSHR_B32_e64: 928 case AMDGPU::V_LSHR_B32_e32: 929 case AMDGPU::S_LSHR_B32: 930 Result = LHS >> (RHS & 31); 931 return true; 932 case AMDGPU::V_LSHRREV_B32_e64: 933 case AMDGPU::V_LSHRREV_B32_e32: 934 Result = RHS >> (LHS & 31); 935 return true; 936 case AMDGPU::V_ASHR_I32_e64: 937 case AMDGPU::V_ASHR_I32_e32: 938 case AMDGPU::S_ASHR_I32: 939 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 940 return true; 941 case AMDGPU::V_ASHRREV_I32_e64: 942 case AMDGPU::V_ASHRREV_I32_e32: 943 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 944 return true; 945 default: 946 return false; 947 } 948 } 949 950 static unsigned getMovOpc(bool IsScalar) { 951 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 952 } 953 954 /// Remove any leftover implicit operands from mutating the instruction. e.g. 955 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 956 /// anymore. 957 static void stripExtraCopyOperands(MachineInstr &MI) { 958 const MCInstrDesc &Desc = MI.getDesc(); 959 unsigned NumOps = Desc.getNumOperands() + 960 Desc.getNumImplicitUses() + 961 Desc.getNumImplicitDefs(); 962 963 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 964 MI.RemoveOperand(I); 965 } 966 967 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 968 MI.setDesc(NewDesc); 969 stripExtraCopyOperands(MI); 970 } 971 972 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 973 MachineOperand &Op) { 974 if (Op.isReg()) { 975 // If this has a subregister, it obviously is a register source. 976 if (Op.getSubReg() != AMDGPU::NoSubRegister || 977 !Register::isVirtualRegister(Op.getReg())) 978 return &Op; 979 980 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 981 if (Def && Def->isMoveImmediate()) { 982 MachineOperand &ImmSrc = Def->getOperand(1); 983 if (ImmSrc.isImm()) 984 return &ImmSrc; 985 } 986 } 987 988 return &Op; 989 } 990 991 // Try to simplify operations with a constant that may appear after instruction 992 // selection. 993 // TODO: See if a frame index with a fixed offset can fold. 994 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 995 const SIInstrInfo *TII, 996 MachineInstr *MI, 997 MachineOperand *ImmOp) { 998 unsigned Opc = MI->getOpcode(); 999 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 1000 Opc == AMDGPU::S_NOT_B32) { 1001 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 1002 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 1003 return true; 1004 } 1005 1006 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1007 if (Src1Idx == -1) 1008 return false; 1009 1010 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1011 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 1012 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 1013 1014 if (!Src0->isImm() && !Src1->isImm()) 1015 return false; 1016 1017 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) { 1018 if (Src0->isImm() && Src0->getImm() == 0) { 1019 // v_lshl_or_b32 0, X, Y -> copy Y 1020 // v_lshl_or_b32 0, X, K -> v_mov_b32 K 1021 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg(); 1022 MI->RemoveOperand(Src1Idx); 1023 MI->RemoveOperand(Src0Idx); 1024 1025 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32)); 1026 return true; 1027 } 1028 } 1029 1030 // and k0, k1 -> v_mov_b32 (k0 & k1) 1031 // or k0, k1 -> v_mov_b32 (k0 | k1) 1032 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 1033 if (Src0->isImm() && Src1->isImm()) { 1034 int32_t NewImm; 1035 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 1036 return false; 1037 1038 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1039 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 1040 1041 // Be careful to change the right operand, src0 may belong to a different 1042 // instruction. 1043 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 1044 MI->RemoveOperand(Src1Idx); 1045 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 1046 return true; 1047 } 1048 1049 if (!MI->isCommutable()) 1050 return false; 1051 1052 if (Src0->isImm() && !Src1->isImm()) { 1053 std::swap(Src0, Src1); 1054 std::swap(Src0Idx, Src1Idx); 1055 } 1056 1057 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 1058 if (Opc == AMDGPU::V_OR_B32_e64 || 1059 Opc == AMDGPU::V_OR_B32_e32 || 1060 Opc == AMDGPU::S_OR_B32) { 1061 if (Src1Val == 0) { 1062 // y = or x, 0 => y = copy x 1063 MI->RemoveOperand(Src1Idx); 1064 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1065 } else if (Src1Val == -1) { 1066 // y = or x, -1 => y = v_mov_b32 -1 1067 MI->RemoveOperand(Src1Idx); 1068 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 1069 } else 1070 return false; 1071 1072 return true; 1073 } 1074 1075 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 1076 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 1077 MI->getOpcode() == AMDGPU::S_AND_B32) { 1078 if (Src1Val == 0) { 1079 // y = and x, 0 => y = v_mov_b32 0 1080 MI->RemoveOperand(Src0Idx); 1081 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 1082 } else if (Src1Val == -1) { 1083 // y = and x, -1 => y = copy x 1084 MI->RemoveOperand(Src1Idx); 1085 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1086 stripExtraCopyOperands(*MI); 1087 } else 1088 return false; 1089 1090 return true; 1091 } 1092 1093 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 1094 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 1095 MI->getOpcode() == AMDGPU::S_XOR_B32) { 1096 if (Src1Val == 0) { 1097 // y = xor x, 0 => y = copy x 1098 MI->RemoveOperand(Src1Idx); 1099 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1100 return true; 1101 } 1102 } 1103 1104 return false; 1105 } 1106 1107 // Try to fold an instruction into a simpler one 1108 static bool tryFoldInst(const SIInstrInfo *TII, 1109 MachineInstr *MI) { 1110 unsigned Opc = MI->getOpcode(); 1111 1112 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 1113 Opc == AMDGPU::V_CNDMASK_B32_e64 || 1114 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 1115 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 1116 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 1117 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 1118 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 1119 if (Src1->isIdenticalTo(*Src0) && 1120 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) && 1121 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) { 1122 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into "); 1123 auto &NewDesc = 1124 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 1125 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 1126 if (Src2Idx != -1) 1127 MI->RemoveOperand(Src2Idx); 1128 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 1129 if (Src1ModIdx != -1) 1130 MI->RemoveOperand(Src1ModIdx); 1131 if (Src0ModIdx != -1) 1132 MI->RemoveOperand(Src0ModIdx); 1133 mutateCopyOp(*MI, NewDesc); 1134 LLVM_DEBUG(dbgs() << *MI << '\n'); 1135 return true; 1136 } 1137 } 1138 1139 return false; 1140 } 1141 1142 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 1143 MachineOperand &OpToFold) const { 1144 // We need mutate the operands of new mov instructions to add implicit 1145 // uses of EXEC, but adding them invalidates the use_iterator, so defer 1146 // this. 1147 SmallVector<MachineInstr *, 4> CopiesToReplace; 1148 SmallVector<FoldCandidate, 4> FoldList; 1149 MachineOperand &Dst = MI.getOperand(0); 1150 1151 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1152 if (FoldingImm) { 1153 unsigned NumLiteralUses = 0; 1154 MachineOperand *NonInlineUse = nullptr; 1155 int NonInlineUseOpNo = -1; 1156 1157 MachineRegisterInfo::use_iterator NextUse; 1158 for (MachineRegisterInfo::use_iterator 1159 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1160 Use != E; Use = NextUse) { 1161 NextUse = std::next(Use); 1162 MachineInstr *UseMI = Use->getParent(); 1163 unsigned OpNo = Use.getOperandNo(); 1164 1165 // Folding the immediate may reveal operations that can be constant 1166 // folded or replaced with a copy. This can happen for example after 1167 // frame indices are lowered to constants or from splitting 64-bit 1168 // constants. 1169 // 1170 // We may also encounter cases where one or both operands are 1171 // immediates materialized into a register, which would ordinarily not 1172 // be folded due to multiple uses or operand constraints. 1173 1174 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 1175 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n'); 1176 1177 // Some constant folding cases change the same immediate's use to a new 1178 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 1179 // again. The same constant folded instruction could also have a second 1180 // use operand. 1181 NextUse = MRI->use_begin(Dst.getReg()); 1182 FoldList.clear(); 1183 continue; 1184 } 1185 1186 // Try to fold any inline immediate uses, and then only fold other 1187 // constants if they have one use. 1188 // 1189 // The legality of the inline immediate must be checked based on the use 1190 // operand, not the defining instruction, because 32-bit instructions 1191 // with 32-bit inline immediate sources may be used to materialize 1192 // constants used in 16-bit operands. 1193 // 1194 // e.g. it is unsafe to fold: 1195 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 1196 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 1197 1198 // Folding immediates with more than one use will increase program size. 1199 // FIXME: This will also reduce register usage, which may be better 1200 // in some cases. A better heuristic is needed. 1201 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 1202 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 1203 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) { 1204 foldOperand(OpToFold, UseMI, OpNo, FoldList, 1205 CopiesToReplace); 1206 } else { 1207 if (++NumLiteralUses == 1) { 1208 NonInlineUse = &*Use; 1209 NonInlineUseOpNo = OpNo; 1210 } 1211 } 1212 } 1213 1214 if (NumLiteralUses == 1) { 1215 MachineInstr *UseMI = NonInlineUse->getParent(); 1216 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 1217 } 1218 } else { 1219 // Folding register. 1220 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess; 1221 for (MachineRegisterInfo::use_iterator 1222 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1223 Use != E; ++Use) { 1224 UsesToProcess.push_back(Use); 1225 } 1226 for (auto U : UsesToProcess) { 1227 MachineInstr *UseMI = U->getParent(); 1228 1229 foldOperand(OpToFold, UseMI, U.getOperandNo(), 1230 FoldList, CopiesToReplace); 1231 } 1232 } 1233 1234 MachineFunction *MF = MI.getParent()->getParent(); 1235 // Make sure we add EXEC uses to any new v_mov instructions created. 1236 for (MachineInstr *Copy : CopiesToReplace) 1237 Copy->addImplicitDefUseOperands(*MF); 1238 1239 for (FoldCandidate &Fold : FoldList) { 1240 assert(!Fold.isReg() || Fold.OpToFold); 1241 if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) { 1242 Register Reg = Fold.OpToFold->getReg(); 1243 MachineInstr *DefMI = Fold.OpToFold->getParent(); 1244 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) && 1245 execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI)) 1246 continue; 1247 } 1248 if (updateOperand(Fold, *TII, *TRI, *ST)) { 1249 // Clear kill flags. 1250 if (Fold.isReg()) { 1251 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 1252 // FIXME: Probably shouldn't bother trying to fold if not an 1253 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 1254 // copies. 1255 MRI->clearKillFlags(Fold.OpToFold->getReg()); 1256 } 1257 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 1258 << static_cast<int>(Fold.UseOpNo) << " of " 1259 << *Fold.UseMI << '\n'); 1260 tryFoldInst(TII, Fold.UseMI); 1261 } else if (Fold.isCommuted()) { 1262 // Restoring instruction's original operand order if fold has failed. 1263 TII->commuteInstruction(*Fold.UseMI, false); 1264 } 1265 } 1266 } 1267 1268 // Clamp patterns are canonically selected to v_max_* instructions, so only 1269 // handle them. 1270 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 1271 unsigned Op = MI.getOpcode(); 1272 switch (Op) { 1273 case AMDGPU::V_MAX_F32_e64: 1274 case AMDGPU::V_MAX_F16_e64: 1275 case AMDGPU::V_MAX_F64: 1276 case AMDGPU::V_PK_MAX_F16: { 1277 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 1278 return nullptr; 1279 1280 // Make sure sources are identical. 1281 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1282 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1283 if (!Src0->isReg() || !Src1->isReg() || 1284 Src0->getReg() != Src1->getReg() || 1285 Src0->getSubReg() != Src1->getSubReg() || 1286 Src0->getSubReg() != AMDGPU::NoSubRegister) 1287 return nullptr; 1288 1289 // Can't fold up if we have modifiers. 1290 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1291 return nullptr; 1292 1293 unsigned Src0Mods 1294 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 1295 unsigned Src1Mods 1296 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 1297 1298 // Having a 0 op_sel_hi would require swizzling the output in the source 1299 // instruction, which we can't do. 1300 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 1301 : 0u; 1302 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 1303 return nullptr; 1304 return Src0; 1305 } 1306 default: 1307 return nullptr; 1308 } 1309 } 1310 1311 // We obviously have multiple uses in a clamp since the register is used twice 1312 // in the same instruction. 1313 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 1314 int Count = 0; 1315 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 1316 I != E; ++I) { 1317 if (++Count > 1) 1318 return false; 1319 } 1320 1321 return true; 1322 } 1323 1324 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 1325 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 1326 const MachineOperand *ClampSrc = isClamp(MI); 1327 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 1328 return false; 1329 1330 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 1331 1332 // The type of clamp must be compatible. 1333 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 1334 return false; 1335 1336 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 1337 if (!DefClamp) 1338 return false; 1339 1340 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def 1341 << '\n'); 1342 1343 // Clamp is applied after omod, so it is OK if omod is set. 1344 DefClamp->setImm(1); 1345 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1346 MI.eraseFromParent(); 1347 return true; 1348 } 1349 1350 static int getOModValue(unsigned Opc, int64_t Val) { 1351 switch (Opc) { 1352 case AMDGPU::V_MUL_F32_e64: { 1353 switch (static_cast<uint32_t>(Val)) { 1354 case 0x3f000000: // 0.5 1355 return SIOutMods::DIV2; 1356 case 0x40000000: // 2.0 1357 return SIOutMods::MUL2; 1358 case 0x40800000: // 4.0 1359 return SIOutMods::MUL4; 1360 default: 1361 return SIOutMods::NONE; 1362 } 1363 } 1364 case AMDGPU::V_MUL_F16_e64: { 1365 switch (static_cast<uint16_t>(Val)) { 1366 case 0x3800: // 0.5 1367 return SIOutMods::DIV2; 1368 case 0x4000: // 2.0 1369 return SIOutMods::MUL2; 1370 case 0x4400: // 4.0 1371 return SIOutMods::MUL4; 1372 default: 1373 return SIOutMods::NONE; 1374 } 1375 } 1376 default: 1377 llvm_unreachable("invalid mul opcode"); 1378 } 1379 } 1380 1381 // FIXME: Does this really not support denormals with f16? 1382 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 1383 // handled, so will anything other than that break? 1384 std::pair<const MachineOperand *, int> 1385 SIFoldOperands::isOMod(const MachineInstr &MI) const { 1386 unsigned Op = MI.getOpcode(); 1387 switch (Op) { 1388 case AMDGPU::V_MUL_F32_e64: 1389 case AMDGPU::V_MUL_F16_e64: { 1390 // If output denormals are enabled, omod is ignored. 1391 if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) || 1392 (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals)) 1393 return std::make_pair(nullptr, SIOutMods::NONE); 1394 1395 const MachineOperand *RegOp = nullptr; 1396 const MachineOperand *ImmOp = nullptr; 1397 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1398 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1399 if (Src0->isImm()) { 1400 ImmOp = Src0; 1401 RegOp = Src1; 1402 } else if (Src1->isImm()) { 1403 ImmOp = Src1; 1404 RegOp = Src0; 1405 } else 1406 return std::make_pair(nullptr, SIOutMods::NONE); 1407 1408 int OMod = getOModValue(Op, ImmOp->getImm()); 1409 if (OMod == SIOutMods::NONE || 1410 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 1411 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 1412 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 1413 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1414 return std::make_pair(nullptr, SIOutMods::NONE); 1415 1416 return std::make_pair(RegOp, OMod); 1417 } 1418 case AMDGPU::V_ADD_F32_e64: 1419 case AMDGPU::V_ADD_F16_e64: { 1420 // If output denormals are enabled, omod is ignored. 1421 if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) || 1422 (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals)) 1423 return std::make_pair(nullptr, SIOutMods::NONE); 1424 1425 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 1426 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1427 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1428 1429 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 1430 Src0->getSubReg() == Src1->getSubReg() && 1431 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 1432 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 1433 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 1434 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1435 return std::make_pair(Src0, SIOutMods::MUL2); 1436 1437 return std::make_pair(nullptr, SIOutMods::NONE); 1438 } 1439 default: 1440 return std::make_pair(nullptr, SIOutMods::NONE); 1441 } 1442 } 1443 1444 // FIXME: Does this need to check IEEE bit on function? 1445 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 1446 const MachineOperand *RegOp; 1447 int OMod; 1448 std::tie(RegOp, OMod) = isOMod(MI); 1449 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 1450 RegOp->getSubReg() != AMDGPU::NoSubRegister || 1451 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 1452 return false; 1453 1454 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 1455 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 1456 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 1457 return false; 1458 1459 // Clamp is applied after omod. If the source already has clamp set, don't 1460 // fold it. 1461 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 1462 return false; 1463 1464 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 1465 1466 DefOMod->setImm(OMod); 1467 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1468 MI.eraseFromParent(); 1469 return true; 1470 } 1471 1472 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 1473 if (skipFunction(MF.getFunction())) 1474 return false; 1475 1476 MRI = &MF.getRegInfo(); 1477 ST = &MF.getSubtarget<GCNSubtarget>(); 1478 TII = ST->getInstrInfo(); 1479 TRI = &TII->getRegisterInfo(); 1480 MFI = MF.getInfo<SIMachineFunctionInfo>(); 1481 1482 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 1483 // correctly handle signed zeros. 1484 // 1485 // FIXME: Also need to check strictfp 1486 bool IsIEEEMode = MFI->getMode().IEEE; 1487 bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 1488 1489 for (MachineBasicBlock *MBB : depth_first(&MF)) { 1490 MachineBasicBlock::iterator I, Next; 1491 1492 MachineOperand *CurrentKnownM0Val = nullptr; 1493 for (I = MBB->begin(); I != MBB->end(); I = Next) { 1494 Next = std::next(I); 1495 MachineInstr &MI = *I; 1496 1497 tryFoldInst(TII, &MI); 1498 1499 if (!TII->isFoldableCopy(MI)) { 1500 // Saw an unknown clobber of m0, so we no longer know what it is. 1501 if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI)) 1502 CurrentKnownM0Val = nullptr; 1503 1504 // TODO: Omod might be OK if there is NSZ only on the source 1505 // instruction, and not the omod multiply. 1506 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 1507 !tryFoldOMod(MI)) 1508 tryFoldClamp(MI); 1509 1510 continue; 1511 } 1512 1513 // Specially track simple redefs of m0 to the same value in a block, so we 1514 // can erase the later ones. 1515 if (MI.getOperand(0).getReg() == AMDGPU::M0) { 1516 MachineOperand &NewM0Val = MI.getOperand(1); 1517 if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) { 1518 MI.eraseFromParent(); 1519 continue; 1520 } 1521 1522 // We aren't tracking other physical registers 1523 CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ? 1524 nullptr : &NewM0Val; 1525 continue; 1526 } 1527 1528 MachineOperand &OpToFold = MI.getOperand(1); 1529 bool FoldingImm = 1530 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1531 1532 // FIXME: We could also be folding things like TargetIndexes. 1533 if (!FoldingImm && !OpToFold.isReg()) 1534 continue; 1535 1536 if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg())) 1537 continue; 1538 1539 // Prevent folding operands backwards in the function. For example, 1540 // the COPY opcode must not be replaced by 1 in this example: 1541 // 1542 // %3 = COPY %vgpr0; VGPR_32:%3 1543 // ... 1544 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1545 MachineOperand &Dst = MI.getOperand(0); 1546 if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg())) 1547 continue; 1548 1549 foldInstOperand(MI, OpToFold); 1550 } 1551 } 1552 return true; 1553 } 1554