10b57cec5SDimitry Andric //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric /// \file 80b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 90b57cec5SDimitry Andric // 100b57cec5SDimitry Andric 110b57cec5SDimitry Andric #include "AMDGPU.h" 12e8d8bef9SDimitry Andric #include "GCNSubtarget.h" 130b57cec5SDimitry Andric #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 14e8d8bef9SDimitry Andric #include "SIMachineFunctionInfo.h" 150b57cec5SDimitry Andric #include "llvm/ADT/DepthFirstIterator.h" 160b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunctionPass.h" 17bdd1243dSDimitry Andric #include "llvm/CodeGen/MachineOperand.h" 180b57cec5SDimitry Andric 190b57cec5SDimitry Andric #define DEBUG_TYPE "si-fold-operands" 200b57cec5SDimitry Andric using namespace llvm; 210b57cec5SDimitry Andric 220b57cec5SDimitry Andric namespace { 230b57cec5SDimitry Andric 240b57cec5SDimitry Andric struct FoldCandidate { 250b57cec5SDimitry Andric MachineInstr *UseMI; 260b57cec5SDimitry Andric union { 270b57cec5SDimitry Andric MachineOperand *OpToFold; 280b57cec5SDimitry Andric uint64_t ImmToFold; 290b57cec5SDimitry Andric int FrameIndexToFold; 300b57cec5SDimitry Andric }; 310b57cec5SDimitry Andric int ShrinkOpcode; 32e8d8bef9SDimitry Andric unsigned UseOpNo; 330b57cec5SDimitry Andric MachineOperand::MachineOperandType Kind; 340b57cec5SDimitry Andric bool Commuted; 350b57cec5SDimitry Andric 360b57cec5SDimitry Andric FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 370b57cec5SDimitry Andric bool Commuted_ = false, 380b57cec5SDimitry Andric int ShrinkOp = -1) : 390b57cec5SDimitry Andric UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 400b57cec5SDimitry Andric Kind(FoldOp->getType()), 410b57cec5SDimitry Andric Commuted(Commuted_) { 420b57cec5SDimitry Andric if (FoldOp->isImm()) { 430b57cec5SDimitry Andric ImmToFold = FoldOp->getImm(); 440b57cec5SDimitry Andric } else if (FoldOp->isFI()) { 450b57cec5SDimitry Andric FrameIndexToFold = FoldOp->getIndex(); 460b57cec5SDimitry Andric } else { 470b57cec5SDimitry Andric assert(FoldOp->isReg() || FoldOp->isGlobal()); 480b57cec5SDimitry Andric OpToFold = FoldOp; 490b57cec5SDimitry Andric } 500b57cec5SDimitry Andric } 510b57cec5SDimitry Andric 520b57cec5SDimitry Andric bool isFI() const { 530b57cec5SDimitry Andric return Kind == MachineOperand::MO_FrameIndex; 540b57cec5SDimitry Andric } 550b57cec5SDimitry Andric 560b57cec5SDimitry Andric bool isImm() const { 570b57cec5SDimitry Andric return Kind == MachineOperand::MO_Immediate; 580b57cec5SDimitry Andric } 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric bool isReg() const { 610b57cec5SDimitry Andric return Kind == MachineOperand::MO_Register; 620b57cec5SDimitry Andric } 630b57cec5SDimitry Andric 640b57cec5SDimitry Andric bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; } 650b57cec5SDimitry Andric 66bdd1243dSDimitry Andric bool needsShrink() const { return ShrinkOpcode != -1; } 670b57cec5SDimitry Andric }; 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric class SIFoldOperands : public MachineFunctionPass { 700b57cec5SDimitry Andric public: 710b57cec5SDimitry Andric static char ID; 720b57cec5SDimitry Andric MachineRegisterInfo *MRI; 730b57cec5SDimitry Andric const SIInstrInfo *TII; 740b57cec5SDimitry Andric const SIRegisterInfo *TRI; 750b57cec5SDimitry Andric const GCNSubtarget *ST; 760b57cec5SDimitry Andric const SIMachineFunctionInfo *MFI; 770b57cec5SDimitry Andric 78bdd1243dSDimitry Andric bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo, 79bdd1243dSDimitry Andric const MachineOperand &OpToFold) const; 80bdd1243dSDimitry Andric 81bdd1243dSDimitry Andric bool updateOperand(FoldCandidate &Fold) const; 82bdd1243dSDimitry Andric 835f757f3fSDimitry Andric bool canUseImmWithOpSel(FoldCandidate &Fold) const; 845f757f3fSDimitry Andric 855f757f3fSDimitry Andric bool tryFoldImmWithOpSel(FoldCandidate &Fold) const; 865f757f3fSDimitry Andric 87bdd1243dSDimitry Andric bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 88bdd1243dSDimitry Andric MachineInstr *MI, unsigned OpNo, 89bdd1243dSDimitry Andric MachineOperand *OpToFold) const; 90bdd1243dSDimitry Andric bool isUseSafeToFold(const MachineInstr &MI, 91bdd1243dSDimitry Andric const MachineOperand &UseMO) const; 92bdd1243dSDimitry Andric bool 93bdd1243dSDimitry Andric getRegSeqInit(SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs, 94bdd1243dSDimitry Andric Register UseReg, uint8_t OpTy) const; 95bdd1243dSDimitry Andric bool tryToFoldACImm(const MachineOperand &OpToFold, MachineInstr *UseMI, 96bdd1243dSDimitry Andric unsigned UseOpIdx, 97bdd1243dSDimitry Andric SmallVectorImpl<FoldCandidate> &FoldList) const; 980b57cec5SDimitry Andric void foldOperand(MachineOperand &OpToFold, 990b57cec5SDimitry Andric MachineInstr *UseMI, 1000b57cec5SDimitry Andric int UseOpIdx, 1010b57cec5SDimitry Andric SmallVectorImpl<FoldCandidate> &FoldList, 1020b57cec5SDimitry Andric SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 1030b57cec5SDimitry Andric 104bdd1243dSDimitry Andric MachineOperand *getImmOrMaterializedImm(MachineOperand &Op) const; 105bdd1243dSDimitry Andric bool tryConstantFoldOp(MachineInstr *MI) const; 106fe6060f1SDimitry Andric bool tryFoldCndMask(MachineInstr &MI) const; 107fe6060f1SDimitry Andric bool tryFoldZeroHighBits(MachineInstr &MI) const; 10881ad6265SDimitry Andric bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 109bdd1243dSDimitry Andric bool tryFoldFoldableCopy(MachineInstr &MI, 110bdd1243dSDimitry Andric MachineOperand *&CurrentKnownM0Val) const; 1110b57cec5SDimitry Andric 1120b57cec5SDimitry Andric const MachineOperand *isClamp(const MachineInstr &MI) const; 1130b57cec5SDimitry Andric bool tryFoldClamp(MachineInstr &MI); 1140b57cec5SDimitry Andric 1150b57cec5SDimitry Andric std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 1160b57cec5SDimitry Andric bool tryFoldOMod(MachineInstr &MI); 117fe6060f1SDimitry Andric bool tryFoldRegSequence(MachineInstr &MI); 11806c3fb27SDimitry Andric bool tryFoldPhiAGPR(MachineInstr &MI); 119fe6060f1SDimitry Andric bool tryFoldLoad(MachineInstr &MI); 1200b57cec5SDimitry Andric 12106c3fb27SDimitry Andric bool tryOptimizeAGPRPhis(MachineBasicBlock &MBB); 12206c3fb27SDimitry Andric 1230b57cec5SDimitry Andric public: 1240b57cec5SDimitry Andric SIFoldOperands() : MachineFunctionPass(ID) { 1250b57cec5SDimitry Andric initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 1260b57cec5SDimitry Andric } 1270b57cec5SDimitry Andric 1280b57cec5SDimitry Andric bool runOnMachineFunction(MachineFunction &MF) override; 1290b57cec5SDimitry Andric 1300b57cec5SDimitry Andric StringRef getPassName() const override { return "SI Fold Operands"; } 1310b57cec5SDimitry Andric 1320b57cec5SDimitry Andric void getAnalysisUsage(AnalysisUsage &AU) const override { 1330b57cec5SDimitry Andric AU.setPreservesCFG(); 1340b57cec5SDimitry Andric MachineFunctionPass::getAnalysisUsage(AU); 1350b57cec5SDimitry Andric } 1360b57cec5SDimitry Andric }; 1370b57cec5SDimitry Andric 1380b57cec5SDimitry Andric } // End anonymous namespace. 1390b57cec5SDimitry Andric 1400b57cec5SDimitry Andric INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 1410b57cec5SDimitry Andric "SI Fold Operands", false, false) 1420b57cec5SDimitry Andric 1430b57cec5SDimitry Andric char SIFoldOperands::ID = 0; 1440b57cec5SDimitry Andric 1450b57cec5SDimitry Andric char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 1460b57cec5SDimitry Andric 14706c3fb27SDimitry Andric static const TargetRegisterClass *getRegOpRC(const MachineRegisterInfo &MRI, 14806c3fb27SDimitry Andric const TargetRegisterInfo &TRI, 14906c3fb27SDimitry Andric const MachineOperand &MO) { 15006c3fb27SDimitry Andric const TargetRegisterClass *RC = MRI.getRegClass(MO.getReg()); 15106c3fb27SDimitry Andric if (const TargetRegisterClass *SubRC = 15206c3fb27SDimitry Andric TRI.getSubRegisterClass(RC, MO.getSubReg())) 15306c3fb27SDimitry Andric RC = SubRC; 15406c3fb27SDimitry Andric return RC; 15506c3fb27SDimitry Andric } 15606c3fb27SDimitry Andric 157e8d8bef9SDimitry Andric // Map multiply-accumulate opcode to corresponding multiply-add opcode if any. 158e8d8bef9SDimitry Andric static unsigned macToMad(unsigned Opc) { 159e8d8bef9SDimitry Andric switch (Opc) { 160e8d8bef9SDimitry Andric case AMDGPU::V_MAC_F32_e64: 161e8d8bef9SDimitry Andric return AMDGPU::V_MAD_F32_e64; 162e8d8bef9SDimitry Andric case AMDGPU::V_MAC_F16_e64: 163e8d8bef9SDimitry Andric return AMDGPU::V_MAD_F16_e64; 164e8d8bef9SDimitry Andric case AMDGPU::V_FMAC_F32_e64: 165e8d8bef9SDimitry Andric return AMDGPU::V_FMA_F32_e64; 166e8d8bef9SDimitry Andric case AMDGPU::V_FMAC_F16_e64: 167e8d8bef9SDimitry Andric return AMDGPU::V_FMA_F16_gfx9_e64; 168bdd1243dSDimitry Andric case AMDGPU::V_FMAC_F16_t16_e64: 169bdd1243dSDimitry Andric return AMDGPU::V_FMA_F16_gfx9_e64; 170e8d8bef9SDimitry Andric case AMDGPU::V_FMAC_LEGACY_F32_e64: 171e8d8bef9SDimitry Andric return AMDGPU::V_FMA_LEGACY_F32_e64; 172fe6060f1SDimitry Andric case AMDGPU::V_FMAC_F64_e64: 173fe6060f1SDimitry Andric return AMDGPU::V_FMA_F64_e64; 174e8d8bef9SDimitry Andric } 175e8d8bef9SDimitry Andric return AMDGPU::INSTRUCTION_LIST_END; 176e8d8bef9SDimitry Andric } 177e8d8bef9SDimitry Andric 1780b57cec5SDimitry Andric // TODO: Add heuristic that the frame index might not fit in the addressing mode 1790b57cec5SDimitry Andric // immediate offset to avoid materializing in loops. 180bdd1243dSDimitry Andric bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo, 181bdd1243dSDimitry Andric const MachineOperand &OpToFold) const { 182e8d8bef9SDimitry Andric if (!OpToFold.isFI()) 183e8d8bef9SDimitry Andric return false; 184e8d8bef9SDimitry Andric 185bdd1243dSDimitry Andric const unsigned Opc = UseMI.getOpcode(); 186e8d8bef9SDimitry Andric if (TII->isMUBUF(UseMI)) 187bdd1243dSDimitry Andric return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 188e8d8bef9SDimitry Andric if (!TII->isFLATScratch(UseMI)) 189e8d8bef9SDimitry Andric return false; 190e8d8bef9SDimitry Andric 191bdd1243dSDimitry Andric int SIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); 192e8d8bef9SDimitry Andric if (OpNo == SIdx) 193e8d8bef9SDimitry Andric return true; 194e8d8bef9SDimitry Andric 195bdd1243dSDimitry Andric int VIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 196e8d8bef9SDimitry Andric return OpNo == VIdx && SIdx == -1; 1970b57cec5SDimitry Andric } 1980b57cec5SDimitry Andric 1990b57cec5SDimitry Andric FunctionPass *llvm::createSIFoldOperandsPass() { 2000b57cec5SDimitry Andric return new SIFoldOperands(); 2010b57cec5SDimitry Andric } 2020b57cec5SDimitry Andric 2035f757f3fSDimitry Andric bool SIFoldOperands::canUseImmWithOpSel(FoldCandidate &Fold) const { 2040b57cec5SDimitry Andric MachineInstr *MI = Fold.UseMI; 2050b57cec5SDimitry Andric MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 206bdd1243dSDimitry Andric const uint64_t TSFlags = MI->getDesc().TSFlags; 2075f757f3fSDimitry Andric 2085f757f3fSDimitry Andric assert(Old.isReg() && Fold.isImm()); 2095f757f3fSDimitry Andric 2105f757f3fSDimitry Andric if (!(TSFlags & SIInstrFlags::IsPacked) || (TSFlags & SIInstrFlags::IsMAI) || 211b3edf446SDimitry Andric (TSFlags & SIInstrFlags::IsWMMA) || (TSFlags & SIInstrFlags::IsSWMMAC) || 2121db9f3b2SDimitry Andric (ST->hasDOTOpSelHazard() && (TSFlags & SIInstrFlags::IsDOT))) 2135f757f3fSDimitry Andric return false; 2145f757f3fSDimitry Andric 2150b57cec5SDimitry Andric unsigned Opcode = MI->getOpcode(); 2160b57cec5SDimitry Andric int OpNo = MI->getOperandNo(&Old); 2175f757f3fSDimitry Andric uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType; 2185f757f3fSDimitry Andric switch (OpType) { 2195f757f3fSDimitry Andric default: 2205f757f3fSDimitry Andric return false; 2215f757f3fSDimitry Andric case AMDGPU::OPERAND_REG_IMM_V2FP16: 2220fca6ea1SDimitry Andric case AMDGPU::OPERAND_REG_IMM_V2BF16: 2235f757f3fSDimitry Andric case AMDGPU::OPERAND_REG_IMM_V2INT16: 2245f757f3fSDimitry Andric case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 2250fca6ea1SDimitry Andric case AMDGPU::OPERAND_REG_INLINE_C_V2BF16: 2265f757f3fSDimitry Andric case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2275f757f3fSDimitry Andric break; 2285f757f3fSDimitry Andric } 2295f757f3fSDimitry Andric 2305f757f3fSDimitry Andric return true; 2315f757f3fSDimitry Andric } 2325f757f3fSDimitry Andric 2335f757f3fSDimitry Andric bool SIFoldOperands::tryFoldImmWithOpSel(FoldCandidate &Fold) const { 2345f757f3fSDimitry Andric MachineInstr *MI = Fold.UseMI; 2355f757f3fSDimitry Andric MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 2365f757f3fSDimitry Andric unsigned Opcode = MI->getOpcode(); 2375f757f3fSDimitry Andric int OpNo = MI->getOperandNo(&Old); 2381db9f3b2SDimitry Andric uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType; 2395f757f3fSDimitry Andric 2401db9f3b2SDimitry Andric // If the literal can be inlined as-is, apply it and short-circuit the 2411db9f3b2SDimitry Andric // tests below. The main motivation for this is to avoid unintuitive 2421db9f3b2SDimitry Andric // uses of opsel. 2431db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(Fold.ImmToFold, OpType)) { 2441db9f3b2SDimitry Andric Old.ChangeToImmediate(Fold.ImmToFold); 2451db9f3b2SDimitry Andric return true; 2461db9f3b2SDimitry Andric } 2471db9f3b2SDimitry Andric 2481db9f3b2SDimitry Andric // Refer to op_sel/op_sel_hi and check if we can change the immediate and 2491db9f3b2SDimitry Andric // op_sel in a way that allows an inline constant. 2500b57cec5SDimitry Andric int ModIdx = -1; 2511db9f3b2SDimitry Andric unsigned SrcIdx = ~0; 2521db9f3b2SDimitry Andric if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) { 2530b57cec5SDimitry Andric ModIdx = AMDGPU::OpName::src0_modifiers; 2541db9f3b2SDimitry Andric SrcIdx = 0; 2551db9f3b2SDimitry Andric } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) { 2560b57cec5SDimitry Andric ModIdx = AMDGPU::OpName::src1_modifiers; 2571db9f3b2SDimitry Andric SrcIdx = 1; 2581db9f3b2SDimitry Andric } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) { 2590b57cec5SDimitry Andric ModIdx = AMDGPU::OpName::src2_modifiers; 2601db9f3b2SDimitry Andric SrcIdx = 2; 2611db9f3b2SDimitry Andric } 2620b57cec5SDimitry Andric assert(ModIdx != -1); 2630b57cec5SDimitry Andric ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 2640b57cec5SDimitry Andric MachineOperand &Mod = MI->getOperand(ModIdx); 2651db9f3b2SDimitry Andric unsigned ModVal = Mod.getImm(); 2665f757f3fSDimitry Andric 2671db9f3b2SDimitry Andric uint16_t ImmLo = static_cast<uint16_t>( 2681db9f3b2SDimitry Andric Fold.ImmToFold >> (ModVal & SISrcMods::OP_SEL_0 ? 16 : 0)); 2691db9f3b2SDimitry Andric uint16_t ImmHi = static_cast<uint16_t>( 2701db9f3b2SDimitry Andric Fold.ImmToFold >> (ModVal & SISrcMods::OP_SEL_1 ? 16 : 0)); 2711db9f3b2SDimitry Andric uint32_t Imm = (static_cast<uint32_t>(ImmHi) << 16) | ImmLo; 2721db9f3b2SDimitry Andric unsigned NewModVal = ModVal & ~(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1); 2731db9f3b2SDimitry Andric 2741db9f3b2SDimitry Andric // Helper function that attempts to inline the given value with a newly 2751db9f3b2SDimitry Andric // chosen opsel pattern. 2761db9f3b2SDimitry Andric auto tryFoldToInline = [&](uint32_t Imm) -> bool { 2771db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(Imm, OpType)) { 2781db9f3b2SDimitry Andric Mod.setImm(NewModVal | SISrcMods::OP_SEL_1); 2791db9f3b2SDimitry Andric Old.ChangeToImmediate(Imm); 2800b57cec5SDimitry Andric return true; 2810b57cec5SDimitry Andric } 2821db9f3b2SDimitry Andric 2831db9f3b2SDimitry Andric // Try to shuffle the halves around and leverage opsel to get an inline 2841db9f3b2SDimitry Andric // constant. 2851db9f3b2SDimitry Andric uint16_t Lo = static_cast<uint16_t>(Imm); 2861db9f3b2SDimitry Andric uint16_t Hi = static_cast<uint16_t>(Imm >> 16); 2871db9f3b2SDimitry Andric if (Lo == Hi) { 2881db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(Lo, OpType)) { 2891db9f3b2SDimitry Andric Mod.setImm(NewModVal); 2901db9f3b2SDimitry Andric Old.ChangeToImmediate(Lo); 2910b57cec5SDimitry Andric return true; 2920b57cec5SDimitry Andric } 2935f757f3fSDimitry Andric 2941db9f3b2SDimitry Andric if (static_cast<int16_t>(Lo) < 0) { 2951db9f3b2SDimitry Andric int32_t SExt = static_cast<int16_t>(Lo); 2961db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(SExt, OpType)) { 2971db9f3b2SDimitry Andric Mod.setImm(NewModVal); 2981db9f3b2SDimitry Andric Old.ChangeToImmediate(SExt); 2991db9f3b2SDimitry Andric return true; 3001db9f3b2SDimitry Andric } 3011db9f3b2SDimitry Andric } 3021db9f3b2SDimitry Andric 3031db9f3b2SDimitry Andric // This check is only useful for integer instructions 3041db9f3b2SDimitry Andric if (OpType == AMDGPU::OPERAND_REG_IMM_V2INT16 || 3051db9f3b2SDimitry Andric OpType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16) { 3061db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(Lo << 16, OpType)) { 3071db9f3b2SDimitry Andric Mod.setImm(NewModVal | SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1); 3081db9f3b2SDimitry Andric Old.ChangeToImmediate(static_cast<uint32_t>(Lo) << 16); 3091db9f3b2SDimitry Andric return true; 3101db9f3b2SDimitry Andric } 3111db9f3b2SDimitry Andric } 3121db9f3b2SDimitry Andric } else { 3131db9f3b2SDimitry Andric uint32_t Swapped = (static_cast<uint32_t>(Lo) << 16) | Hi; 3141db9f3b2SDimitry Andric if (AMDGPU::isInlinableLiteralV216(Swapped, OpType)) { 3151db9f3b2SDimitry Andric Mod.setImm(NewModVal | SISrcMods::OP_SEL_0); 3161db9f3b2SDimitry Andric Old.ChangeToImmediate(Swapped); 3171db9f3b2SDimitry Andric return true; 3181db9f3b2SDimitry Andric } 3191db9f3b2SDimitry Andric } 3201db9f3b2SDimitry Andric 3211db9f3b2SDimitry Andric return false; 3221db9f3b2SDimitry Andric }; 3231db9f3b2SDimitry Andric 3241db9f3b2SDimitry Andric if (tryFoldToInline(Imm)) 3251db9f3b2SDimitry Andric return true; 3261db9f3b2SDimitry Andric 3271db9f3b2SDimitry Andric // Replace integer addition by subtraction and vice versa if it allows 3281db9f3b2SDimitry Andric // folding the immediate to an inline constant. 3291db9f3b2SDimitry Andric // 3301db9f3b2SDimitry Andric // We should only ever get here for SrcIdx == 1 due to canonicalization 3311db9f3b2SDimitry Andric // earlier in the pipeline, but we double-check here to be safe / fully 3321db9f3b2SDimitry Andric // general. 3331db9f3b2SDimitry Andric bool IsUAdd = Opcode == AMDGPU::V_PK_ADD_U16; 3341db9f3b2SDimitry Andric bool IsUSub = Opcode == AMDGPU::V_PK_SUB_U16; 3351db9f3b2SDimitry Andric if (SrcIdx == 1 && (IsUAdd || IsUSub)) { 3361db9f3b2SDimitry Andric unsigned ClampIdx = 3371db9f3b2SDimitry Andric AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::clamp); 3381db9f3b2SDimitry Andric bool Clamp = MI->getOperand(ClampIdx).getImm() != 0; 3391db9f3b2SDimitry Andric 3401db9f3b2SDimitry Andric if (!Clamp) { 3411db9f3b2SDimitry Andric uint16_t NegLo = -static_cast<uint16_t>(Imm); 3421db9f3b2SDimitry Andric uint16_t NegHi = -static_cast<uint16_t>(Imm >> 16); 3431db9f3b2SDimitry Andric uint32_t NegImm = (static_cast<uint32_t>(NegHi) << 16) | NegLo; 3441db9f3b2SDimitry Andric 3451db9f3b2SDimitry Andric if (tryFoldToInline(NegImm)) { 3461db9f3b2SDimitry Andric unsigned NegOpcode = 3471db9f3b2SDimitry Andric IsUAdd ? AMDGPU::V_PK_SUB_U16 : AMDGPU::V_PK_ADD_U16; 3481db9f3b2SDimitry Andric MI->setDesc(TII->get(NegOpcode)); 3491db9f3b2SDimitry Andric return true; 3501db9f3b2SDimitry Andric } 3511db9f3b2SDimitry Andric } 3521db9f3b2SDimitry Andric } 3531db9f3b2SDimitry Andric 3541db9f3b2SDimitry Andric return false; 3551db9f3b2SDimitry Andric } 3561db9f3b2SDimitry Andric 3575f757f3fSDimitry Andric bool SIFoldOperands::updateOperand(FoldCandidate &Fold) const { 3585f757f3fSDimitry Andric MachineInstr *MI = Fold.UseMI; 3595f757f3fSDimitry Andric MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 3605f757f3fSDimitry Andric assert(Old.isReg()); 3615f757f3fSDimitry Andric 3621db9f3b2SDimitry Andric if (Fold.isImm() && canUseImmWithOpSel(Fold)) { 3631db9f3b2SDimitry Andric if (tryFoldImmWithOpSel(Fold)) 3641db9f3b2SDimitry Andric return true; 3651db9f3b2SDimitry Andric 3661db9f3b2SDimitry Andric // We can't represent the candidate as an inline constant. Try as a literal 3671db9f3b2SDimitry Andric // with the original opsel, checking constant bus limitations. 3681db9f3b2SDimitry Andric MachineOperand New = MachineOperand::CreateImm(Fold.ImmToFold); 3691db9f3b2SDimitry Andric int OpNo = MI->getOperandNo(&Old); 3701db9f3b2SDimitry Andric if (!TII->isOperandLegal(*MI, OpNo, &New)) 3711db9f3b2SDimitry Andric return false; 3721db9f3b2SDimitry Andric Old.ChangeToImmediate(Fold.ImmToFold); 3731db9f3b2SDimitry Andric return true; 3741db9f3b2SDimitry Andric } 3750b57cec5SDimitry Andric 3760b57cec5SDimitry Andric if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) { 3770b57cec5SDimitry Andric MachineBasicBlock *MBB = MI->getParent(); 378bdd1243dSDimitry Andric auto Liveness = MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 16); 3798bcb0991SDimitry Andric if (Liveness != MachineBasicBlock::LQR_Dead) { 3808bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n"); 3810b57cec5SDimitry Andric return false; 3828bcb0991SDimitry Andric } 3830b57cec5SDimitry Andric 384bdd1243dSDimitry Andric int Op32 = Fold.ShrinkOpcode; 3850b57cec5SDimitry Andric MachineOperand &Dst0 = MI->getOperand(0); 3860b57cec5SDimitry Andric MachineOperand &Dst1 = MI->getOperand(1); 3870b57cec5SDimitry Andric assert(Dst0.isDef() && Dst1.isDef()); 3880b57cec5SDimitry Andric 389bdd1243dSDimitry Andric bool HaveNonDbgCarryUse = !MRI->use_nodbg_empty(Dst1.getReg()); 3900b57cec5SDimitry Andric 391bdd1243dSDimitry Andric const TargetRegisterClass *Dst0RC = MRI->getRegClass(Dst0.getReg()); 392bdd1243dSDimitry Andric Register NewReg0 = MRI->createVirtualRegister(Dst0RC); 3930b57cec5SDimitry Andric 394bdd1243dSDimitry Andric MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32); 3950b57cec5SDimitry Andric 3960b57cec5SDimitry Andric if (HaveNonDbgCarryUse) { 397bdd1243dSDimitry Andric BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY), 398bdd1243dSDimitry Andric Dst1.getReg()) 3990b57cec5SDimitry Andric .addReg(AMDGPU::VCC, RegState::Kill); 4000b57cec5SDimitry Andric } 4010b57cec5SDimitry Andric 4020b57cec5SDimitry Andric // Keep the old instruction around to avoid breaking iterators, but 4030b57cec5SDimitry Andric // replace it with a dummy instruction to remove uses. 4040b57cec5SDimitry Andric // 4050b57cec5SDimitry Andric // FIXME: We should not invert how this pass looks at operands to avoid 4060b57cec5SDimitry Andric // this. Should track set of foldable movs instead of looking for uses 4070b57cec5SDimitry Andric // when looking at a use. 4080b57cec5SDimitry Andric Dst0.setReg(NewReg0); 4090b57cec5SDimitry Andric for (unsigned I = MI->getNumOperands() - 1; I > 0; --I) 41081ad6265SDimitry Andric MI->removeOperand(I); 411bdd1243dSDimitry Andric MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF)); 4120b57cec5SDimitry Andric 413bdd1243dSDimitry Andric if (Fold.Commuted) 414bdd1243dSDimitry Andric TII->commuteInstruction(*Inst32, false); 4150b57cec5SDimitry Andric return true; 4160b57cec5SDimitry Andric } 4170b57cec5SDimitry Andric 4180b57cec5SDimitry Andric assert(!Fold.needsShrink() && "not handled"); 4190b57cec5SDimitry Andric 4200b57cec5SDimitry Andric if (Fold.isImm()) { 42104eeddc0SDimitry Andric if (Old.isTied()) { 42204eeddc0SDimitry Andric int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode()); 42304eeddc0SDimitry Andric if (NewMFMAOpc == -1) 42404eeddc0SDimitry Andric return false; 425bdd1243dSDimitry Andric MI->setDesc(TII->get(NewMFMAOpc)); 42604eeddc0SDimitry Andric MI->untieRegOperand(0); 42704eeddc0SDimitry Andric } 4280b57cec5SDimitry Andric Old.ChangeToImmediate(Fold.ImmToFold); 4290b57cec5SDimitry Andric return true; 4300b57cec5SDimitry Andric } 4310b57cec5SDimitry Andric 4320b57cec5SDimitry Andric if (Fold.isGlobal()) { 4330b57cec5SDimitry Andric Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(), 4340b57cec5SDimitry Andric Fold.OpToFold->getTargetFlags()); 4350b57cec5SDimitry Andric return true; 4360b57cec5SDimitry Andric } 4370b57cec5SDimitry Andric 4380b57cec5SDimitry Andric if (Fold.isFI()) { 4390b57cec5SDimitry Andric Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 4400b57cec5SDimitry Andric return true; 4410b57cec5SDimitry Andric } 4420b57cec5SDimitry Andric 4430b57cec5SDimitry Andric MachineOperand *New = Fold.OpToFold; 444bdd1243dSDimitry Andric Old.substVirtReg(New->getReg(), New->getSubReg(), *TRI); 4450b57cec5SDimitry Andric Old.setIsUndef(New->isUndef()); 4460b57cec5SDimitry Andric return true; 4470b57cec5SDimitry Andric } 4480b57cec5SDimitry Andric 4490b57cec5SDimitry Andric static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 4500b57cec5SDimitry Andric const MachineInstr *MI) { 451bdd1243dSDimitry Andric return any_of(FoldList, [&](const auto &C) { return C.UseMI == MI; }); 4520b57cec5SDimitry Andric } 4530b57cec5SDimitry Andric 454480093f4SDimitry Andric static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList, 455480093f4SDimitry Andric MachineInstr *MI, unsigned OpNo, 456480093f4SDimitry Andric MachineOperand *FoldOp, bool Commuted = false, 457480093f4SDimitry Andric int ShrinkOp = -1) { 458480093f4SDimitry Andric // Skip additional folding on the same operand. 459480093f4SDimitry Andric for (FoldCandidate &Fold : FoldList) 460480093f4SDimitry Andric if (Fold.UseMI == MI && Fold.UseOpNo == OpNo) 461480093f4SDimitry Andric return; 462480093f4SDimitry Andric LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal") 463fe6060f1SDimitry Andric << " operand " << OpNo << "\n " << *MI); 464fe6060f1SDimitry Andric FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp); 465480093f4SDimitry Andric } 466480093f4SDimitry Andric 467bdd1243dSDimitry Andric bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 4680b57cec5SDimitry Andric MachineInstr *MI, unsigned OpNo, 469bdd1243dSDimitry Andric MachineOperand *OpToFold) const { 4705f757f3fSDimitry Andric const unsigned Opc = MI->getOpcode(); 4715f757f3fSDimitry Andric 4725f757f3fSDimitry Andric auto tryToFoldAsFMAAKorMK = [&]() { 4735f757f3fSDimitry Andric if (!OpToFold->isImm()) 4745f757f3fSDimitry Andric return false; 4755f757f3fSDimitry Andric 4765f757f3fSDimitry Andric const bool TryAK = OpNo == 3; 4775f757f3fSDimitry Andric const unsigned NewOpc = TryAK ? AMDGPU::S_FMAAK_F32 : AMDGPU::S_FMAMK_F32; 4785f757f3fSDimitry Andric MI->setDesc(TII->get(NewOpc)); 4795f757f3fSDimitry Andric 4805f757f3fSDimitry Andric // We have to fold into operand which would be Imm not into OpNo. 4815f757f3fSDimitry Andric bool FoldAsFMAAKorMK = 4825f757f3fSDimitry Andric tryAddToFoldList(FoldList, MI, TryAK ? 3 : 2, OpToFold); 4835f757f3fSDimitry Andric if (FoldAsFMAAKorMK) { 4845f757f3fSDimitry Andric // Untie Src2 of fmac. 4855f757f3fSDimitry Andric MI->untieRegOperand(3); 4865f757f3fSDimitry Andric // For fmamk swap operands 1 and 2 if OpToFold was meant for operand 1. 4875f757f3fSDimitry Andric if (OpNo == 1) { 4885f757f3fSDimitry Andric MachineOperand &Op1 = MI->getOperand(1); 4895f757f3fSDimitry Andric MachineOperand &Op2 = MI->getOperand(2); 4905f757f3fSDimitry Andric Register OldReg = Op1.getReg(); 4915f757f3fSDimitry Andric // Operand 2 might be an inlinable constant 4925f757f3fSDimitry Andric if (Op2.isImm()) { 4935f757f3fSDimitry Andric Op1.ChangeToImmediate(Op2.getImm()); 4945f757f3fSDimitry Andric Op2.ChangeToRegister(OldReg, false); 4955f757f3fSDimitry Andric } else { 4965f757f3fSDimitry Andric Op1.setReg(Op2.getReg()); 4975f757f3fSDimitry Andric Op2.setReg(OldReg); 4985f757f3fSDimitry Andric } 4995f757f3fSDimitry Andric } 5005f757f3fSDimitry Andric return true; 5015f757f3fSDimitry Andric } 5025f757f3fSDimitry Andric MI->setDesc(TII->get(Opc)); 5035f757f3fSDimitry Andric return false; 5045f757f3fSDimitry Andric }; 5055f757f3fSDimitry Andric 5065f757f3fSDimitry Andric bool IsLegal = TII->isOperandLegal(*MI, OpNo, OpToFold); 5075f757f3fSDimitry Andric if (!IsLegal && OpToFold->isImm()) { 5085f757f3fSDimitry Andric FoldCandidate Fold(MI, OpNo, OpToFold); 5095f757f3fSDimitry Andric IsLegal = canUseImmWithOpSel(Fold); 5105f757f3fSDimitry Andric } 5115f757f3fSDimitry Andric 5125f757f3fSDimitry Andric if (!IsLegal) { 5130b57cec5SDimitry Andric // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 514e8d8bef9SDimitry Andric unsigned NewOpc = macToMad(Opc); 515e8d8bef9SDimitry Andric if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) { 5160b57cec5SDimitry Andric // Check if changing this to a v_mad_{f16, f32} instruction will allow us 5170b57cec5SDimitry Andric // to fold the operand. 5180b57cec5SDimitry Andric MI->setDesc(TII->get(NewOpc)); 51906c3fb27SDimitry Andric bool AddOpSel = !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel) && 52006c3fb27SDimitry Andric AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel); 52106c3fb27SDimitry Andric if (AddOpSel) 522bdd1243dSDimitry Andric MI->addOperand(MachineOperand::CreateImm(0)); 523bdd1243dSDimitry Andric bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold); 5240b57cec5SDimitry Andric if (FoldAsMAD) { 5250b57cec5SDimitry Andric MI->untieRegOperand(OpNo); 5260b57cec5SDimitry Andric return true; 5270b57cec5SDimitry Andric } 52806c3fb27SDimitry Andric if (AddOpSel) 52906c3fb27SDimitry Andric MI->removeOperand(MI->getNumExplicitOperands() - 1); 5300b57cec5SDimitry Andric MI->setDesc(TII->get(Opc)); 5310b57cec5SDimitry Andric } 5320b57cec5SDimitry Andric 5335f757f3fSDimitry Andric // Special case for s_fmac_f32 if we are trying to fold into Src2. 5345f757f3fSDimitry Andric // By transforming into fmaak we can untie Src2 and make folding legal. 5355f757f3fSDimitry Andric if (Opc == AMDGPU::S_FMAC_F32 && OpNo == 3) { 5365f757f3fSDimitry Andric if (tryToFoldAsFMAAKorMK()) 5375f757f3fSDimitry Andric return true; 5385f757f3fSDimitry Andric } 5395f757f3fSDimitry Andric 5400b57cec5SDimitry Andric // Special case for s_setreg_b32 541e8d8bef9SDimitry Andric if (OpToFold->isImm()) { 542e8d8bef9SDimitry Andric unsigned ImmOpc = 0; 543e8d8bef9SDimitry Andric if (Opc == AMDGPU::S_SETREG_B32) 544e8d8bef9SDimitry Andric ImmOpc = AMDGPU::S_SETREG_IMM32_B32; 545e8d8bef9SDimitry Andric else if (Opc == AMDGPU::S_SETREG_B32_mode) 546e8d8bef9SDimitry Andric ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode; 547e8d8bef9SDimitry Andric if (ImmOpc) { 548e8d8bef9SDimitry Andric MI->setDesc(TII->get(ImmOpc)); 549480093f4SDimitry Andric appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 5500b57cec5SDimitry Andric return true; 5510b57cec5SDimitry Andric } 552e8d8bef9SDimitry Andric } 5530b57cec5SDimitry Andric 5540b57cec5SDimitry Andric // If we are already folding into another operand of MI, then 5550b57cec5SDimitry Andric // we can't commute the instruction, otherwise we risk making the 5560b57cec5SDimitry Andric // other fold illegal. 5570b57cec5SDimitry Andric if (isUseMIInFoldList(FoldList, MI)) 5580b57cec5SDimitry Andric return false; 5590b57cec5SDimitry Andric 5600b57cec5SDimitry Andric // Operand is not legal, so try to commute the instruction to 5610b57cec5SDimitry Andric // see if this makes it possible to fold. 5625f757f3fSDimitry Andric unsigned CommuteOpNo = TargetInstrInfo::CommuteAnyOperandIndex; 5635f757f3fSDimitry Andric bool CanCommute = TII->findCommutedOpIndices(*MI, OpNo, CommuteOpNo); 5645f757f3fSDimitry Andric if (!CanCommute) 5655f757f3fSDimitry Andric return false; 5660b57cec5SDimitry Andric 5670b57cec5SDimitry Andric // One of operands might be an Imm operand, and OpNo may refer to it after 5680b57cec5SDimitry Andric // the call of commuteInstruction() below. Such situations are avoided 5690b57cec5SDimitry Andric // here explicitly as OpNo must be a register operand to be a candidate 5700b57cec5SDimitry Andric // for memory folding. 5715f757f3fSDimitry Andric if (!MI->getOperand(OpNo).isReg() || !MI->getOperand(CommuteOpNo).isReg()) 5720b57cec5SDimitry Andric return false; 5730b57cec5SDimitry Andric 5745f757f3fSDimitry Andric if (!TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo)) 5750b57cec5SDimitry Andric return false; 5760b57cec5SDimitry Andric 5775f757f3fSDimitry Andric int Op32 = -1; 5780b57cec5SDimitry Andric if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 5795f757f3fSDimitry Andric if ((Opc != AMDGPU::V_ADD_CO_U32_e64 && Opc != AMDGPU::V_SUB_CO_U32_e64 && 5805f757f3fSDimitry Andric Opc != AMDGPU::V_SUBREV_CO_U32_e64) || // FIXME 5815f757f3fSDimitry Andric (!OpToFold->isImm() && !OpToFold->isFI() && !OpToFold->isGlobal())) { 5825f757f3fSDimitry Andric TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo); 5835f757f3fSDimitry Andric return false; 5845f757f3fSDimitry Andric } 5850b57cec5SDimitry Andric 5860b57cec5SDimitry Andric // Verify the other operand is a VGPR, otherwise we would violate the 5870b57cec5SDimitry Andric // constant bus restriction. 5885f757f3fSDimitry Andric MachineOperand &OtherOp = MI->getOperand(OpNo); 5890b57cec5SDimitry Andric if (!OtherOp.isReg() || 590bdd1243dSDimitry Andric !TII->getRegisterInfo().isVGPR(*MRI, OtherOp.getReg())) 5910b57cec5SDimitry Andric return false; 5920b57cec5SDimitry Andric 5930b57cec5SDimitry Andric assert(MI->getOperand(1).isDef()); 5940b57cec5SDimitry Andric 5950b57cec5SDimitry Andric // Make sure to get the 32-bit version of the commuted opcode. 5960b57cec5SDimitry Andric unsigned MaybeCommutedOpc = MI->getOpcode(); 5975f757f3fSDimitry Andric Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); 5985f757f3fSDimitry Andric } 5990b57cec5SDimitry Andric 600480093f4SDimitry Andric appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32); 6010b57cec5SDimitry Andric return true; 6020b57cec5SDimitry Andric } 6030b57cec5SDimitry Andric 6045f757f3fSDimitry Andric // Inlineable constant might have been folded into Imm operand of fmaak or 6055f757f3fSDimitry Andric // fmamk and we are trying to fold a non-inlinable constant. 6065f757f3fSDimitry Andric if ((Opc == AMDGPU::S_FMAAK_F32 || Opc == AMDGPU::S_FMAMK_F32) && 6075f757f3fSDimitry Andric !OpToFold->isReg() && !TII->isInlineConstant(*OpToFold)) { 6085f757f3fSDimitry Andric unsigned ImmIdx = Opc == AMDGPU::S_FMAAK_F32 ? 3 : 2; 6095f757f3fSDimitry Andric MachineOperand &OpImm = MI->getOperand(ImmIdx); 6105f757f3fSDimitry Andric if (!OpImm.isReg() && 6115f757f3fSDimitry Andric TII->isInlineConstant(*MI, MI->getOperand(OpNo), OpImm)) 6125f757f3fSDimitry Andric return tryToFoldAsFMAAKorMK(); 6130b57cec5SDimitry Andric } 6140b57cec5SDimitry Andric 6155f757f3fSDimitry Andric // Special case for s_fmac_f32 if we are trying to fold into Src0 or Src1. 6165f757f3fSDimitry Andric // By changing into fmamk we can untie Src2. 6175f757f3fSDimitry Andric // If folding for Src0 happens first and it is identical operand to Src1 we 6185f757f3fSDimitry Andric // should avoid transforming into fmamk which requires commuting as it would 6195f757f3fSDimitry Andric // cause folding into Src1 to fail later on due to wrong OpNo used. 6205f757f3fSDimitry Andric if (Opc == AMDGPU::S_FMAC_F32 && 6215f757f3fSDimitry Andric (OpNo != 1 || !MI->getOperand(1).isIdenticalTo(MI->getOperand(2)))) { 6225f757f3fSDimitry Andric if (tryToFoldAsFMAAKorMK()) 6230b57cec5SDimitry Andric return true; 6240b57cec5SDimitry Andric } 6250b57cec5SDimitry Andric 626480093f4SDimitry Andric // Check the case where we might introduce a second constant operand to a 627480093f4SDimitry Andric // scalar instruction 628480093f4SDimitry Andric if (TII->isSALU(MI->getOpcode())) { 629480093f4SDimitry Andric const MCInstrDesc &InstDesc = MI->getDesc(); 630bdd1243dSDimitry Andric const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; 631480093f4SDimitry Andric 632480093f4SDimitry Andric // Fine if the operand can be encoded as an inline constant 633bdd1243dSDimitry Andric if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) { 634480093f4SDimitry Andric // Otherwise check for another constant 635480093f4SDimitry Andric for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { 636480093f4SDimitry Andric auto &Op = MI->getOperand(i); 6375f757f3fSDimitry Andric if (OpNo != i && !Op.isReg() && 6385f757f3fSDimitry Andric !TII->isInlineConstant(Op, InstDesc.operands()[i])) 639480093f4SDimitry Andric return false; 640480093f4SDimitry Andric } 641480093f4SDimitry Andric } 642480093f4SDimitry Andric } 643480093f4SDimitry Andric 644480093f4SDimitry Andric appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 6450b57cec5SDimitry Andric return true; 6460b57cec5SDimitry Andric } 6470b57cec5SDimitry Andric 648bdd1243dSDimitry Andric bool SIFoldOperands::isUseSafeToFold(const MachineInstr &MI, 649bdd1243dSDimitry Andric const MachineOperand &UseMO) const { 650bdd1243dSDimitry Andric // Operands of SDWA instructions must be registers. 651bdd1243dSDimitry Andric return !TII->isSDWA(MI); 6520b57cec5SDimitry Andric } 6530b57cec5SDimitry Andric 654fe6060f1SDimitry Andric // Find a def of the UseReg, check if it is a reg_sequence and find initializers 655480093f4SDimitry Andric // for each subreg, tracking it to foldable inline immediate if possible. 656480093f4SDimitry Andric // Returns true on success. 657bdd1243dSDimitry Andric bool SIFoldOperands::getRegSeqInit( 658480093f4SDimitry Andric SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs, 659bdd1243dSDimitry Andric Register UseReg, uint8_t OpTy) const { 660bdd1243dSDimitry Andric MachineInstr *Def = MRI->getVRegDef(UseReg); 661480093f4SDimitry Andric if (!Def || !Def->isRegSequence()) 662480093f4SDimitry Andric return false; 663480093f4SDimitry Andric 664480093f4SDimitry Andric for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) { 665480093f4SDimitry Andric MachineOperand *Sub = &Def->getOperand(I); 666480093f4SDimitry Andric assert(Sub->isReg()); 667480093f4SDimitry Andric 668bdd1243dSDimitry Andric for (MachineInstr *SubDef = MRI->getVRegDef(Sub->getReg()); 669fe6060f1SDimitry Andric SubDef && Sub->isReg() && Sub->getReg().isVirtual() && 670fe6060f1SDimitry Andric !Sub->getSubReg() && TII->isFoldableCopy(*SubDef); 671bdd1243dSDimitry Andric SubDef = MRI->getVRegDef(Sub->getReg())) { 672480093f4SDimitry Andric MachineOperand *Op = &SubDef->getOperand(1); 673480093f4SDimitry Andric if (Op->isImm()) { 674480093f4SDimitry Andric if (TII->isInlineConstant(*Op, OpTy)) 675480093f4SDimitry Andric Sub = Op; 676480093f4SDimitry Andric break; 677480093f4SDimitry Andric } 678fe6060f1SDimitry Andric if (!Op->isReg() || Op->getReg().isPhysical()) 679480093f4SDimitry Andric break; 680480093f4SDimitry Andric Sub = Op; 681480093f4SDimitry Andric } 682480093f4SDimitry Andric 683fe6060f1SDimitry Andric Defs.emplace_back(Sub, Def->getOperand(I + 1).getImm()); 684480093f4SDimitry Andric } 685480093f4SDimitry Andric 686480093f4SDimitry Andric return true; 687480093f4SDimitry Andric } 688480093f4SDimitry Andric 689bdd1243dSDimitry Andric bool SIFoldOperands::tryToFoldACImm( 690bdd1243dSDimitry Andric const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx, 691bdd1243dSDimitry Andric SmallVectorImpl<FoldCandidate> &FoldList) const { 6920b57cec5SDimitry Andric const MCInstrDesc &Desc = UseMI->getDesc(); 693bdd1243dSDimitry Andric if (UseOpIdx >= Desc.getNumOperands()) 6940b57cec5SDimitry Andric return false; 6950b57cec5SDimitry Andric 6965f757f3fSDimitry Andric if (!AMDGPU::isSISrcInlinableOperand(Desc, UseOpIdx)) 6970b57cec5SDimitry Andric return false; 6980b57cec5SDimitry Andric 6995f757f3fSDimitry Andric uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType; 7008bcb0991SDimitry Andric if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && 7018bcb0991SDimitry Andric TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { 7020b57cec5SDimitry Andric UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm()); 7030b57cec5SDimitry Andric return true; 7040b57cec5SDimitry Andric } 7050b57cec5SDimitry Andric 7060b57cec5SDimitry Andric if (!OpToFold.isReg()) 7070b57cec5SDimitry Andric return false; 7080b57cec5SDimitry Andric 7098bcb0991SDimitry Andric Register UseReg = OpToFold.getReg(); 710e8d8bef9SDimitry Andric if (!UseReg.isVirtual()) 7110b57cec5SDimitry Andric return false; 7120b57cec5SDimitry Andric 713fe6060f1SDimitry Andric if (isUseMIInFoldList(FoldList, UseMI)) 7140b57cec5SDimitry Andric return false; 7150b57cec5SDimitry Andric 716fe6060f1SDimitry Andric // Maybe it is just a COPY of an immediate itself. 717bdd1243dSDimitry Andric MachineInstr *Def = MRI->getVRegDef(UseReg); 718fe6060f1SDimitry Andric MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 719fe6060f1SDimitry Andric if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) { 720fe6060f1SDimitry Andric MachineOperand &DefOp = Def->getOperand(1); 721fe6060f1SDimitry Andric if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) && 722fe6060f1SDimitry Andric TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) { 723fe6060f1SDimitry Andric UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm()); 724fe6060f1SDimitry Andric return true; 725fe6060f1SDimitry Andric } 726fe6060f1SDimitry Andric } 727fe6060f1SDimitry Andric 728480093f4SDimitry Andric SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 729bdd1243dSDimitry Andric if (!getRegSeqInit(Defs, UseReg, OpTy)) 7300b57cec5SDimitry Andric return false; 7310b57cec5SDimitry Andric 732480093f4SDimitry Andric int32_t Imm; 733480093f4SDimitry Andric for (unsigned I = 0, E = Defs.size(); I != E; ++I) { 734480093f4SDimitry Andric const MachineOperand *Op = Defs[I].first; 735480093f4SDimitry Andric if (!Op->isImm()) 7360b57cec5SDimitry Andric return false; 737480093f4SDimitry Andric 7380b57cec5SDimitry Andric auto SubImm = Op->getImm(); 739480093f4SDimitry Andric if (!I) { 740480093f4SDimitry Andric Imm = SubImm; 741480093f4SDimitry Andric if (!TII->isInlineConstant(*Op, OpTy) || 742480093f4SDimitry Andric !TII->isOperandLegal(*UseMI, UseOpIdx, Op)) 7430b57cec5SDimitry Andric return false; 7440b57cec5SDimitry Andric 7450b57cec5SDimitry Andric continue; 7460b57cec5SDimitry Andric } 7470b57cec5SDimitry Andric if (Imm != SubImm) 7480b57cec5SDimitry Andric return false; // Can only fold splat constants 7490b57cec5SDimitry Andric } 7500b57cec5SDimitry Andric 751480093f4SDimitry Andric appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first); 7520b57cec5SDimitry Andric return true; 7530b57cec5SDimitry Andric } 7540b57cec5SDimitry Andric 7550b57cec5SDimitry Andric void SIFoldOperands::foldOperand( 7560b57cec5SDimitry Andric MachineOperand &OpToFold, 7570b57cec5SDimitry Andric MachineInstr *UseMI, 7580b57cec5SDimitry Andric int UseOpIdx, 7590b57cec5SDimitry Andric SmallVectorImpl<FoldCandidate> &FoldList, 7600b57cec5SDimitry Andric SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 7610fca6ea1SDimitry Andric const MachineOperand *UseOp = &UseMI->getOperand(UseOpIdx); 7620b57cec5SDimitry Andric 7630fca6ea1SDimitry Andric if (!isUseSafeToFold(*UseMI, *UseOp)) 7640b57cec5SDimitry Andric return; 7650b57cec5SDimitry Andric 7660b57cec5SDimitry Andric // FIXME: Fold operands with subregs. 7670fca6ea1SDimitry Andric if (UseOp->isReg() && OpToFold.isReg() && 7680fca6ea1SDimitry Andric (UseOp->isImplicit() || UseOp->getSubReg() != AMDGPU::NoSubRegister)) 7690b57cec5SDimitry Andric return; 7700b57cec5SDimitry Andric 7710b57cec5SDimitry Andric // Special case for REG_SEQUENCE: We can't fold literals into 7720b57cec5SDimitry Andric // REG_SEQUENCE instructions, so we have to fold them into the 7730b57cec5SDimitry Andric // uses of REG_SEQUENCE. 7740b57cec5SDimitry Andric if (UseMI->isRegSequence()) { 7758bcb0991SDimitry Andric Register RegSeqDstReg = UseMI->getOperand(0).getReg(); 7760b57cec5SDimitry Andric unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 7770b57cec5SDimitry Andric 7780fca6ea1SDimitry Andric // Grab the use operands first 7790fca6ea1SDimitry Andric SmallVector<MachineOperand *, 4> UsesToProcess; 7800fca6ea1SDimitry Andric for (auto &Use : MRI->use_nodbg_operands(RegSeqDstReg)) 7810fca6ea1SDimitry Andric UsesToProcess.push_back(&Use); 7820fca6ea1SDimitry Andric for (auto *RSUse : UsesToProcess) { 7830fca6ea1SDimitry Andric MachineInstr *RSUseMI = RSUse->getParent(); 7840b57cec5SDimitry Andric 785bdd1243dSDimitry Andric if (tryToFoldACImm(UseMI->getOperand(0), RSUseMI, 7860fca6ea1SDimitry Andric RSUseMI->getOperandNo(RSUse), FoldList)) 7870b57cec5SDimitry Andric continue; 7880b57cec5SDimitry Andric 7890fca6ea1SDimitry Andric if (RSUse->getSubReg() != RegSeqDstSubReg) 7900b57cec5SDimitry Andric continue; 7910b57cec5SDimitry Andric 7920fca6ea1SDimitry Andric foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(RSUse), FoldList, 7930b57cec5SDimitry Andric CopiesToReplace); 7940b57cec5SDimitry Andric } 7950b57cec5SDimitry Andric return; 7960b57cec5SDimitry Andric } 7970b57cec5SDimitry Andric 798bdd1243dSDimitry Andric if (tryToFoldACImm(OpToFold, UseMI, UseOpIdx, FoldList)) 7990b57cec5SDimitry Andric return; 8000b57cec5SDimitry Andric 801bdd1243dSDimitry Andric if (frameIndexMayFold(*UseMI, UseOpIdx, OpToFold)) { 802349cc55cSDimitry Andric // Verify that this is a stack access. 8030b57cec5SDimitry Andric // FIXME: Should probably use stack pseudos before frame lowering. 8040b57cec5SDimitry Andric 805e8d8bef9SDimitry Andric if (TII->isMUBUF(*UseMI)) { 8060b57cec5SDimitry Andric if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != 8070b57cec5SDimitry Andric MFI->getScratchRSrcReg()) 8080b57cec5SDimitry Andric return; 8090b57cec5SDimitry Andric 810e8d8bef9SDimitry Andric // Ensure this is either relative to the current frame or the current 811e8d8bef9SDimitry Andric // wave. 8125ffd83dbSDimitry Andric MachineOperand &SOff = 8135ffd83dbSDimitry Andric *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); 814e8d8bef9SDimitry Andric if (!SOff.isImm() || SOff.getImm() != 0) 8155ffd83dbSDimitry Andric return; 816e8d8bef9SDimitry Andric } 8175ffd83dbSDimitry Andric 8180b57cec5SDimitry Andric // A frame index will resolve to a positive constant, so it should always be 8190b57cec5SDimitry Andric // safe to fold the addressing mode, even pre-GFX9. 8200b57cec5SDimitry Andric UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex()); 8215ffd83dbSDimitry Andric 822bdd1243dSDimitry Andric const unsigned Opc = UseMI->getOpcode(); 823e8d8bef9SDimitry Andric if (TII->isFLATScratch(*UseMI) && 824bdd1243dSDimitry Andric AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr) && 825bdd1243dSDimitry Andric !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::saddr)) { 826bdd1243dSDimitry Andric unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(Opc); 827e8d8bef9SDimitry Andric UseMI->setDesc(TII->get(NewOpc)); 828e8d8bef9SDimitry Andric } 829e8d8bef9SDimitry Andric 8300b57cec5SDimitry Andric return; 8310b57cec5SDimitry Andric } 8320b57cec5SDimitry Andric 8330b57cec5SDimitry Andric bool FoldingImmLike = 8340b57cec5SDimitry Andric OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 8350b57cec5SDimitry Andric 8360b57cec5SDimitry Andric if (FoldingImmLike && UseMI->isCopy()) { 8378bcb0991SDimitry Andric Register DestReg = UseMI->getOperand(0).getReg(); 838e8d8bef9SDimitry Andric Register SrcReg = UseMI->getOperand(1).getReg(); 839e8d8bef9SDimitry Andric assert(SrcReg.isVirtual()); 8400b57cec5SDimitry Andric 841e8d8bef9SDimitry Andric const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg); 842e8d8bef9SDimitry Andric 843e8d8bef9SDimitry Andric // Don't fold into a copy to a physical register with the same class. Doing 844e8d8bef9SDimitry Andric // so would interfere with the register coalescer's logic which would avoid 845349cc55cSDimitry Andric // redundant initializations. 846e8d8bef9SDimitry Andric if (DestReg.isPhysical() && SrcRC->contains(DestReg)) 8478bcb0991SDimitry Andric return; 8488bcb0991SDimitry Andric 849e8d8bef9SDimitry Andric const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg); 850e8d8bef9SDimitry Andric if (!DestReg.isPhysical()) { 8510b57cec5SDimitry Andric if (DestRC == &AMDGPU::AGPR_32RegClass && 8520b57cec5SDimitry Andric TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 853e8d8bef9SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); 8540b57cec5SDimitry Andric UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 8550b57cec5SDimitry Andric CopiesToReplace.push_back(UseMI); 8560b57cec5SDimitry Andric return; 8570b57cec5SDimitry Andric } 858e8d8bef9SDimitry Andric } 8590b57cec5SDimitry Andric 8600b57cec5SDimitry Andric // In order to fold immediates into copies, we need to change the 8610b57cec5SDimitry Andric // copy to a MOV. 8620b57cec5SDimitry Andric 8630b57cec5SDimitry Andric unsigned MovOp = TII->getMovOpcode(DestRC); 8640b57cec5SDimitry Andric if (MovOp == AMDGPU::COPY) 8650b57cec5SDimitry Andric return; 8660b57cec5SDimitry Andric 8678bcb0991SDimitry Andric MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin(); 8688bcb0991SDimitry Andric MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end(); 8698bcb0991SDimitry Andric while (ImpOpI != ImpOpE) { 8708bcb0991SDimitry Andric MachineInstr::mop_iterator Tmp = ImpOpI; 8718bcb0991SDimitry Andric ImpOpI++; 87281ad6265SDimitry Andric UseMI->removeOperand(UseMI->getOperandNo(Tmp)); 8738bcb0991SDimitry Andric } 8740fca6ea1SDimitry Andric UseMI->setDesc(TII->get(MovOp)); 8750fca6ea1SDimitry Andric 8760fca6ea1SDimitry Andric if (MovOp == AMDGPU::V_MOV_B16_t16_e64) { 8770fca6ea1SDimitry Andric const auto &SrcOp = UseMI->getOperand(UseOpIdx); 8780fca6ea1SDimitry Andric MachineOperand NewSrcOp(SrcOp); 8790fca6ea1SDimitry Andric MachineFunction *MF = UseMI->getParent()->getParent(); 8800fca6ea1SDimitry Andric UseMI->removeOperand(1); 8810fca6ea1SDimitry Andric UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // src0_modifiers 8820fca6ea1SDimitry Andric UseMI->addOperand(NewSrcOp); // src0 8830fca6ea1SDimitry Andric UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // op_sel 8840fca6ea1SDimitry Andric UseOpIdx = 2; 8850fca6ea1SDimitry Andric UseOp = &UseMI->getOperand(UseOpIdx); 8860fca6ea1SDimitry Andric } 8870b57cec5SDimitry Andric CopiesToReplace.push_back(UseMI); 8880b57cec5SDimitry Andric } else { 8890b57cec5SDimitry Andric if (UseMI->isCopy() && OpToFold.isReg() && 890480093f4SDimitry Andric UseMI->getOperand(0).getReg().isVirtual() && 8910b57cec5SDimitry Andric !UseMI->getOperand(1).getSubReg()) { 892fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI); 8930b57cec5SDimitry Andric unsigned Size = TII->getOpSize(*UseMI, 1); 894480093f4SDimitry Andric Register UseReg = OpToFold.getReg(); 895480093f4SDimitry Andric UseMI->getOperand(1).setReg(UseReg); 8960b57cec5SDimitry Andric UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 8970b57cec5SDimitry Andric UseMI->getOperand(1).setIsKill(false); 8980b57cec5SDimitry Andric CopiesToReplace.push_back(UseMI); 8990b57cec5SDimitry Andric OpToFold.setIsKill(false); 900480093f4SDimitry Andric 901bdd1243dSDimitry Andric // Remove kill flags as kills may now be out of order with uses. 902bdd1243dSDimitry Andric MRI->clearKillFlags(OpToFold.getReg()); 903bdd1243dSDimitry Andric 904480093f4SDimitry Andric // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32 905480093f4SDimitry Andric // can only accept VGPR or inline immediate. Recreate a reg_sequence with 906480093f4SDimitry Andric // its initializers right here, so we will rematerialize immediates and 907480093f4SDimitry Andric // avoid copies via different reg classes. 908480093f4SDimitry Andric SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 909480093f4SDimitry Andric if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 910bdd1243dSDimitry Andric getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 911480093f4SDimitry Andric const DebugLoc &DL = UseMI->getDebugLoc(); 912480093f4SDimitry Andric MachineBasicBlock &MBB = *UseMI->getParent(); 913480093f4SDimitry Andric 914480093f4SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); 915480093f4SDimitry Andric for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I) 91681ad6265SDimitry Andric UseMI->removeOperand(I); 917480093f4SDimitry Andric 918480093f4SDimitry Andric MachineInstrBuilder B(*MBB.getParent(), UseMI); 919480093f4SDimitry Andric DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies; 920480093f4SDimitry Andric SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs; 921480093f4SDimitry Andric for (unsigned I = 0; I < Size / 4; ++I) { 922480093f4SDimitry Andric MachineOperand *Def = Defs[I].first; 923480093f4SDimitry Andric TargetInstrInfo::RegSubRegPair CopyToVGPR; 924480093f4SDimitry Andric if (Def->isImm() && 925480093f4SDimitry Andric TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 926480093f4SDimitry Andric int64_t Imm = Def->getImm(); 927480093f4SDimitry Andric 928480093f4SDimitry Andric auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 929480093f4SDimitry Andric BuildMI(MBB, UseMI, DL, 930e8d8bef9SDimitry Andric TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm); 931480093f4SDimitry Andric B.addReg(Tmp); 932480093f4SDimitry Andric } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) { 933480093f4SDimitry Andric auto Src = getRegSubRegPair(*Def); 934480093f4SDimitry Andric Def->setIsKill(false); 935480093f4SDimitry Andric if (!SeenAGPRs.insert(Src)) { 936480093f4SDimitry Andric // We cannot build a reg_sequence out of the same registers, they 937480093f4SDimitry Andric // must be copied. Better do it here before copyPhysReg() created 938480093f4SDimitry Andric // several reads to do the AGPR->VGPR->AGPR copy. 939480093f4SDimitry Andric CopyToVGPR = Src; 940480093f4SDimitry Andric } else { 941480093f4SDimitry Andric B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0, 942480093f4SDimitry Andric Src.SubReg); 943480093f4SDimitry Andric } 944480093f4SDimitry Andric } else { 945480093f4SDimitry Andric assert(Def->isReg()); 946480093f4SDimitry Andric Def->setIsKill(false); 947480093f4SDimitry Andric auto Src = getRegSubRegPair(*Def); 948480093f4SDimitry Andric 949480093f4SDimitry Andric // Direct copy from SGPR to AGPR is not possible. To avoid creation 950480093f4SDimitry Andric // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later, 951480093f4SDimitry Andric // create a copy here and track if we already have such a copy. 952480093f4SDimitry Andric if (TRI->isSGPRReg(*MRI, Src.Reg)) { 953480093f4SDimitry Andric CopyToVGPR = Src; 954480093f4SDimitry Andric } else { 955480093f4SDimitry Andric auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 956480093f4SDimitry Andric BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); 957480093f4SDimitry Andric B.addReg(Tmp); 958480093f4SDimitry Andric } 959480093f4SDimitry Andric } 960480093f4SDimitry Andric 961480093f4SDimitry Andric if (CopyToVGPR.Reg) { 962480093f4SDimitry Andric Register Vgpr; 963480093f4SDimitry Andric if (VGPRCopies.count(CopyToVGPR)) { 964480093f4SDimitry Andric Vgpr = VGPRCopies[CopyToVGPR]; 965480093f4SDimitry Andric } else { 966480093f4SDimitry Andric Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 967480093f4SDimitry Andric BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); 968480093f4SDimitry Andric VGPRCopies[CopyToVGPR] = Vgpr; 969480093f4SDimitry Andric } 970480093f4SDimitry Andric auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 971480093f4SDimitry Andric BuildMI(MBB, UseMI, DL, 972e8d8bef9SDimitry Andric TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr); 973480093f4SDimitry Andric B.addReg(Tmp); 974480093f4SDimitry Andric } 975480093f4SDimitry Andric 976480093f4SDimitry Andric B.addImm(Defs[I].second); 977480093f4SDimitry Andric } 978fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folded " << *UseMI); 979480093f4SDimitry Andric return; 980480093f4SDimitry Andric } 981480093f4SDimitry Andric 9820b57cec5SDimitry Andric if (Size != 4) 9830b57cec5SDimitry Andric return; 984bdd1243dSDimitry Andric 985bdd1243dSDimitry Andric Register Reg0 = UseMI->getOperand(0).getReg(); 986bdd1243dSDimitry Andric Register Reg1 = UseMI->getOperand(1).getReg(); 987bdd1243dSDimitry Andric if (TRI->isAGPR(*MRI, Reg0) && TRI->isVGPR(*MRI, Reg1)) 988e8d8bef9SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); 989bdd1243dSDimitry Andric else if (TRI->isVGPR(*MRI, Reg0) && TRI->isAGPR(*MRI, Reg1)) 990e8d8bef9SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64)); 991bdd1243dSDimitry Andric else if (ST->hasGFX90AInsts() && TRI->isAGPR(*MRI, Reg0) && 992bdd1243dSDimitry Andric TRI->isAGPR(*MRI, Reg1)) 993fe6060f1SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32)); 9940b57cec5SDimitry Andric return; 9950b57cec5SDimitry Andric } 9960b57cec5SDimitry Andric 9970b57cec5SDimitry Andric unsigned UseOpc = UseMI->getOpcode(); 9980b57cec5SDimitry Andric if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || 9990b57cec5SDimitry Andric (UseOpc == AMDGPU::V_READLANE_B32 && 10000b57cec5SDimitry Andric (int)UseOpIdx == 10010b57cec5SDimitry Andric AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 10020b57cec5SDimitry Andric // %vgpr = V_MOV_B32 imm 10030b57cec5SDimitry Andric // %sgpr = V_READFIRSTLANE_B32 %vgpr 10040b57cec5SDimitry Andric // => 10050b57cec5SDimitry Andric // %sgpr = S_MOV_B32 imm 10060b57cec5SDimitry Andric if (FoldingImmLike) { 10070b57cec5SDimitry Andric if (execMayBeModifiedBeforeUse(*MRI, 10080b57cec5SDimitry Andric UseMI->getOperand(UseOpIdx).getReg(), 10090b57cec5SDimitry Andric *OpToFold.getParent(), 10100b57cec5SDimitry Andric *UseMI)) 10110b57cec5SDimitry Andric return; 10120b57cec5SDimitry Andric 10130b57cec5SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 10140b57cec5SDimitry Andric 10150b57cec5SDimitry Andric if (OpToFold.isImm()) 10160b57cec5SDimitry Andric UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 10170b57cec5SDimitry Andric else 10180b57cec5SDimitry Andric UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex()); 101981ad6265SDimitry Andric UseMI->removeOperand(2); // Remove exec read (or src1 for readlane) 10200b57cec5SDimitry Andric return; 10210b57cec5SDimitry Andric } 10220b57cec5SDimitry Andric 10230b57cec5SDimitry Andric if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) { 10240b57cec5SDimitry Andric if (execMayBeModifiedBeforeUse(*MRI, 10250b57cec5SDimitry Andric UseMI->getOperand(UseOpIdx).getReg(), 10260b57cec5SDimitry Andric *OpToFold.getParent(), 10270b57cec5SDimitry Andric *UseMI)) 10280b57cec5SDimitry Andric return; 10290b57cec5SDimitry Andric 10300b57cec5SDimitry Andric // %vgpr = COPY %sgpr0 10310b57cec5SDimitry Andric // %sgpr1 = V_READFIRSTLANE_B32 %vgpr 10320b57cec5SDimitry Andric // => 10330b57cec5SDimitry Andric // %sgpr1 = COPY %sgpr0 10340b57cec5SDimitry Andric UseMI->setDesc(TII->get(AMDGPU::COPY)); 10358bcb0991SDimitry Andric UseMI->getOperand(1).setReg(OpToFold.getReg()); 10368bcb0991SDimitry Andric UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 10378bcb0991SDimitry Andric UseMI->getOperand(1).setIsKill(false); 103881ad6265SDimitry Andric UseMI->removeOperand(2); // Remove exec read (or src1 for readlane) 10390b57cec5SDimitry Andric return; 10400b57cec5SDimitry Andric } 10410b57cec5SDimitry Andric } 10420b57cec5SDimitry Andric 10430b57cec5SDimitry Andric const MCInstrDesc &UseDesc = UseMI->getDesc(); 10440b57cec5SDimitry Andric 10450b57cec5SDimitry Andric // Don't fold into target independent nodes. Target independent opcodes 10460b57cec5SDimitry Andric // don't have defined register classes. 10470fca6ea1SDimitry Andric if (UseDesc.isVariadic() || UseOp->isImplicit() || 1048bdd1243dSDimitry Andric UseDesc.operands()[UseOpIdx].RegClass == -1) 10490b57cec5SDimitry Andric return; 10500b57cec5SDimitry Andric } 10510b57cec5SDimitry Andric 10520b57cec5SDimitry Andric if (!FoldingImmLike) { 105381ad6265SDimitry Andric if (OpToFold.isReg() && ST->needsAlignedVGPRs()) { 105481ad6265SDimitry Andric // Don't fold if OpToFold doesn't hold an aligned register. 105581ad6265SDimitry Andric const TargetRegisterClass *RC = 105681ad6265SDimitry Andric TRI->getRegClassForReg(*MRI, OpToFold.getReg()); 10570fca6ea1SDimitry Andric assert(RC); 105881ad6265SDimitry Andric if (TRI->hasVectorRegisters(RC) && OpToFold.getSubReg()) { 105981ad6265SDimitry Andric unsigned SubReg = OpToFold.getSubReg(); 1060bdd1243dSDimitry Andric if (const TargetRegisterClass *SubRC = 1061bdd1243dSDimitry Andric TRI->getSubRegisterClass(RC, SubReg)) 106281ad6265SDimitry Andric RC = SubRC; 106381ad6265SDimitry Andric } 106481ad6265SDimitry Andric 106581ad6265SDimitry Andric if (!RC || !TRI->isProperlyAlignedRC(*RC)) 106681ad6265SDimitry Andric return; 106781ad6265SDimitry Andric } 106881ad6265SDimitry Andric 1069bdd1243dSDimitry Andric tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold); 10700b57cec5SDimitry Andric 10710b57cec5SDimitry Andric // FIXME: We could try to change the instruction from 64-bit to 32-bit 1072349cc55cSDimitry Andric // to enable more folding opportunities. The shrink operands pass 10730b57cec5SDimitry Andric // already does this. 10740b57cec5SDimitry Andric return; 10750b57cec5SDimitry Andric } 10760b57cec5SDimitry Andric 10770b57cec5SDimitry Andric 10780b57cec5SDimitry Andric const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 10790b57cec5SDimitry Andric const TargetRegisterClass *FoldRC = 1080bdd1243dSDimitry Andric TRI->getRegClass(FoldDesc.operands()[0].RegClass); 10810b57cec5SDimitry Andric 10820b57cec5SDimitry Andric // Split 64-bit constants into 32-bits for folding. 10830fca6ea1SDimitry Andric if (UseOp->getSubReg() && AMDGPU::getRegBitWidth(*FoldRC) == 64) { 10840fca6ea1SDimitry Andric Register UseReg = UseOp->getReg(); 10850b57cec5SDimitry Andric const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); 108606c3fb27SDimitry Andric if (AMDGPU::getRegBitWidth(*UseRC) != 64) 10870b57cec5SDimitry Andric return; 10880b57cec5SDimitry Andric 10890b57cec5SDimitry Andric APInt Imm(64, OpToFold.getImm()); 10900fca6ea1SDimitry Andric if (UseOp->getSubReg() == AMDGPU::sub0) { 10910b57cec5SDimitry Andric Imm = Imm.getLoBits(32); 10920b57cec5SDimitry Andric } else { 10930fca6ea1SDimitry Andric assert(UseOp->getSubReg() == AMDGPU::sub1); 10940b57cec5SDimitry Andric Imm = Imm.getHiBits(32); 10950b57cec5SDimitry Andric } 10960b57cec5SDimitry Andric 10970b57cec5SDimitry Andric MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 1098bdd1243dSDimitry Andric tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp); 10990b57cec5SDimitry Andric return; 11000b57cec5SDimitry Andric } 11010b57cec5SDimitry Andric 1102bdd1243dSDimitry Andric tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold); 11030b57cec5SDimitry Andric } 11040b57cec5SDimitry Andric 11050b57cec5SDimitry Andric static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 11060b57cec5SDimitry Andric uint32_t LHS, uint32_t RHS) { 11070b57cec5SDimitry Andric switch (Opcode) { 11080b57cec5SDimitry Andric case AMDGPU::V_AND_B32_e64: 11090b57cec5SDimitry Andric case AMDGPU::V_AND_B32_e32: 11100b57cec5SDimitry Andric case AMDGPU::S_AND_B32: 11110b57cec5SDimitry Andric Result = LHS & RHS; 11120b57cec5SDimitry Andric return true; 11130b57cec5SDimitry Andric case AMDGPU::V_OR_B32_e64: 11140b57cec5SDimitry Andric case AMDGPU::V_OR_B32_e32: 11150b57cec5SDimitry Andric case AMDGPU::S_OR_B32: 11160b57cec5SDimitry Andric Result = LHS | RHS; 11170b57cec5SDimitry Andric return true; 11180b57cec5SDimitry Andric case AMDGPU::V_XOR_B32_e64: 11190b57cec5SDimitry Andric case AMDGPU::V_XOR_B32_e32: 11200b57cec5SDimitry Andric case AMDGPU::S_XOR_B32: 11210b57cec5SDimitry Andric Result = LHS ^ RHS; 11220b57cec5SDimitry Andric return true; 11235ffd83dbSDimitry Andric case AMDGPU::S_XNOR_B32: 11245ffd83dbSDimitry Andric Result = ~(LHS ^ RHS); 11255ffd83dbSDimitry Andric return true; 11265ffd83dbSDimitry Andric case AMDGPU::S_NAND_B32: 11275ffd83dbSDimitry Andric Result = ~(LHS & RHS); 11285ffd83dbSDimitry Andric return true; 11295ffd83dbSDimitry Andric case AMDGPU::S_NOR_B32: 11305ffd83dbSDimitry Andric Result = ~(LHS | RHS); 11315ffd83dbSDimitry Andric return true; 11325ffd83dbSDimitry Andric case AMDGPU::S_ANDN2_B32: 11335ffd83dbSDimitry Andric Result = LHS & ~RHS; 11345ffd83dbSDimitry Andric return true; 11355ffd83dbSDimitry Andric case AMDGPU::S_ORN2_B32: 11365ffd83dbSDimitry Andric Result = LHS | ~RHS; 11375ffd83dbSDimitry Andric return true; 11380b57cec5SDimitry Andric case AMDGPU::V_LSHL_B32_e64: 11390b57cec5SDimitry Andric case AMDGPU::V_LSHL_B32_e32: 11400b57cec5SDimitry Andric case AMDGPU::S_LSHL_B32: 11410b57cec5SDimitry Andric // The instruction ignores the high bits for out of bounds shifts. 11420b57cec5SDimitry Andric Result = LHS << (RHS & 31); 11430b57cec5SDimitry Andric return true; 11440b57cec5SDimitry Andric case AMDGPU::V_LSHLREV_B32_e64: 11450b57cec5SDimitry Andric case AMDGPU::V_LSHLREV_B32_e32: 11460b57cec5SDimitry Andric Result = RHS << (LHS & 31); 11470b57cec5SDimitry Andric return true; 11480b57cec5SDimitry Andric case AMDGPU::V_LSHR_B32_e64: 11490b57cec5SDimitry Andric case AMDGPU::V_LSHR_B32_e32: 11500b57cec5SDimitry Andric case AMDGPU::S_LSHR_B32: 11510b57cec5SDimitry Andric Result = LHS >> (RHS & 31); 11520b57cec5SDimitry Andric return true; 11530b57cec5SDimitry Andric case AMDGPU::V_LSHRREV_B32_e64: 11540b57cec5SDimitry Andric case AMDGPU::V_LSHRREV_B32_e32: 11550b57cec5SDimitry Andric Result = RHS >> (LHS & 31); 11560b57cec5SDimitry Andric return true; 11570b57cec5SDimitry Andric case AMDGPU::V_ASHR_I32_e64: 11580b57cec5SDimitry Andric case AMDGPU::V_ASHR_I32_e32: 11590b57cec5SDimitry Andric case AMDGPU::S_ASHR_I32: 11600b57cec5SDimitry Andric Result = static_cast<int32_t>(LHS) >> (RHS & 31); 11610b57cec5SDimitry Andric return true; 11620b57cec5SDimitry Andric case AMDGPU::V_ASHRREV_I32_e64: 11630b57cec5SDimitry Andric case AMDGPU::V_ASHRREV_I32_e32: 11640b57cec5SDimitry Andric Result = static_cast<int32_t>(RHS) >> (LHS & 31); 11650b57cec5SDimitry Andric return true; 11660b57cec5SDimitry Andric default: 11670b57cec5SDimitry Andric return false; 11680b57cec5SDimitry Andric } 11690b57cec5SDimitry Andric } 11700b57cec5SDimitry Andric 11710b57cec5SDimitry Andric static unsigned getMovOpc(bool IsScalar) { 11720b57cec5SDimitry Andric return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 11730b57cec5SDimitry Andric } 11740b57cec5SDimitry Andric 1175bdd1243dSDimitry Andric static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 1176bdd1243dSDimitry Andric MI.setDesc(NewDesc); 1177bdd1243dSDimitry Andric 1178bdd1243dSDimitry Andric // Remove any leftover implicit operands from mutating the instruction. e.g. 1179bdd1243dSDimitry Andric // if we replace an s_and_b32 with a copy, we don't need the implicit scc def 1180bdd1243dSDimitry Andric // anymore. 11810b57cec5SDimitry Andric const MCInstrDesc &Desc = MI.getDesc(); 1182bdd1243dSDimitry Andric unsigned NumOps = Desc.getNumOperands() + Desc.implicit_uses().size() + 1183bdd1243dSDimitry Andric Desc.implicit_defs().size(); 11840b57cec5SDimitry Andric 11850b57cec5SDimitry Andric for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 118681ad6265SDimitry Andric MI.removeOperand(I); 11870b57cec5SDimitry Andric } 11880b57cec5SDimitry Andric 1189bdd1243dSDimitry Andric MachineOperand * 1190bdd1243dSDimitry Andric SIFoldOperands::getImmOrMaterializedImm(MachineOperand &Op) const { 11910b57cec5SDimitry Andric // If this has a subregister, it obviously is a register source. 1192bdd1243dSDimitry Andric if (!Op.isReg() || Op.getSubReg() != AMDGPU::NoSubRegister || 1193bdd1243dSDimitry Andric !Op.getReg().isVirtual()) 11940b57cec5SDimitry Andric return &Op; 11950b57cec5SDimitry Andric 1196bdd1243dSDimitry Andric MachineInstr *Def = MRI->getVRegDef(Op.getReg()); 11970b57cec5SDimitry Andric if (Def && Def->isMoveImmediate()) { 11980b57cec5SDimitry Andric MachineOperand &ImmSrc = Def->getOperand(1); 11990b57cec5SDimitry Andric if (ImmSrc.isImm()) 12000b57cec5SDimitry Andric return &ImmSrc; 12010b57cec5SDimitry Andric } 12020b57cec5SDimitry Andric 12030b57cec5SDimitry Andric return &Op; 12040b57cec5SDimitry Andric } 12050b57cec5SDimitry Andric 12060b57cec5SDimitry Andric // Try to simplify operations with a constant that may appear after instruction 12070b57cec5SDimitry Andric // selection. 12080b57cec5SDimitry Andric // TODO: See if a frame index with a fixed offset can fold. 1209bdd1243dSDimitry Andric bool SIFoldOperands::tryConstantFoldOp(MachineInstr *MI) const { 12105f757f3fSDimitry Andric if (!MI->allImplicitDefsAreDead()) 12115f757f3fSDimitry Andric return false; 12125f757f3fSDimitry Andric 12130b57cec5SDimitry Andric unsigned Opc = MI->getOpcode(); 1214fe6060f1SDimitry Andric 1215fe6060f1SDimitry Andric int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1216fe6060f1SDimitry Andric if (Src0Idx == -1) 1217fe6060f1SDimitry Andric return false; 1218bdd1243dSDimitry Andric MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx)); 1219fe6060f1SDimitry Andric 1220fe6060f1SDimitry Andric if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 1221fe6060f1SDimitry Andric Opc == AMDGPU::S_NOT_B32) && 1222fe6060f1SDimitry Andric Src0->isImm()) { 1223fe6060f1SDimitry Andric MI->getOperand(1).ChangeToImmediate(~Src0->getImm()); 12240b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 12250b57cec5SDimitry Andric return true; 12260b57cec5SDimitry Andric } 12270b57cec5SDimitry Andric 12280b57cec5SDimitry Andric int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 12290b57cec5SDimitry Andric if (Src1Idx == -1) 12300b57cec5SDimitry Andric return false; 1231bdd1243dSDimitry Andric MachineOperand *Src1 = getImmOrMaterializedImm(MI->getOperand(Src1Idx)); 12320b57cec5SDimitry Andric 12330b57cec5SDimitry Andric if (!Src0->isImm() && !Src1->isImm()) 12340b57cec5SDimitry Andric return false; 12350b57cec5SDimitry Andric 12360b57cec5SDimitry Andric // and k0, k1 -> v_mov_b32 (k0 & k1) 12370b57cec5SDimitry Andric // or k0, k1 -> v_mov_b32 (k0 | k1) 12380b57cec5SDimitry Andric // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 12390b57cec5SDimitry Andric if (Src0->isImm() && Src1->isImm()) { 12400b57cec5SDimitry Andric int32_t NewImm; 12410b57cec5SDimitry Andric if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 12420b57cec5SDimitry Andric return false; 12430b57cec5SDimitry Andric 1244bdd1243dSDimitry Andric bool IsSGPR = TRI->isSGPRReg(*MRI, MI->getOperand(0).getReg()); 12450b57cec5SDimitry Andric 12460b57cec5SDimitry Andric // Be careful to change the right operand, src0 may belong to a different 12470b57cec5SDimitry Andric // instruction. 12480b57cec5SDimitry Andric MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 124981ad6265SDimitry Andric MI->removeOperand(Src1Idx); 12500b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 12510b57cec5SDimitry Andric return true; 12520b57cec5SDimitry Andric } 12530b57cec5SDimitry Andric 12540b57cec5SDimitry Andric if (!MI->isCommutable()) 12550b57cec5SDimitry Andric return false; 12560b57cec5SDimitry Andric 12570b57cec5SDimitry Andric if (Src0->isImm() && !Src1->isImm()) { 12580b57cec5SDimitry Andric std::swap(Src0, Src1); 12590b57cec5SDimitry Andric std::swap(Src0Idx, Src1Idx); 12600b57cec5SDimitry Andric } 12610b57cec5SDimitry Andric 12620b57cec5SDimitry Andric int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 12630b57cec5SDimitry Andric if (Opc == AMDGPU::V_OR_B32_e64 || 12640b57cec5SDimitry Andric Opc == AMDGPU::V_OR_B32_e32 || 12650b57cec5SDimitry Andric Opc == AMDGPU::S_OR_B32) { 12660b57cec5SDimitry Andric if (Src1Val == 0) { 12670b57cec5SDimitry Andric // y = or x, 0 => y = copy x 126881ad6265SDimitry Andric MI->removeOperand(Src1Idx); 12690b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 12700b57cec5SDimitry Andric } else if (Src1Val == -1) { 12710b57cec5SDimitry Andric // y = or x, -1 => y = v_mov_b32 -1 127281ad6265SDimitry Andric MI->removeOperand(Src1Idx); 12730b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 12740b57cec5SDimitry Andric } else 12750b57cec5SDimitry Andric return false; 12760b57cec5SDimitry Andric 12770b57cec5SDimitry Andric return true; 12780b57cec5SDimitry Andric } 12790b57cec5SDimitry Andric 1280bdd1243dSDimitry Andric if (Opc == AMDGPU::V_AND_B32_e64 || Opc == AMDGPU::V_AND_B32_e32 || 1281bdd1243dSDimitry Andric Opc == AMDGPU::S_AND_B32) { 12820b57cec5SDimitry Andric if (Src1Val == 0) { 12830b57cec5SDimitry Andric // y = and x, 0 => y = v_mov_b32 0 128481ad6265SDimitry Andric MI->removeOperand(Src0Idx); 12850b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 12860b57cec5SDimitry Andric } else if (Src1Val == -1) { 12870b57cec5SDimitry Andric // y = and x, -1 => y = copy x 128881ad6265SDimitry Andric MI->removeOperand(Src1Idx); 12890b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 12900b57cec5SDimitry Andric } else 12910b57cec5SDimitry Andric return false; 12920b57cec5SDimitry Andric 12930b57cec5SDimitry Andric return true; 12940b57cec5SDimitry Andric } 12950b57cec5SDimitry Andric 1296bdd1243dSDimitry Andric if (Opc == AMDGPU::V_XOR_B32_e64 || Opc == AMDGPU::V_XOR_B32_e32 || 1297bdd1243dSDimitry Andric Opc == AMDGPU::S_XOR_B32) { 12980b57cec5SDimitry Andric if (Src1Val == 0) { 12990b57cec5SDimitry Andric // y = xor x, 0 => y = copy x 130081ad6265SDimitry Andric MI->removeOperand(Src1Idx); 13010b57cec5SDimitry Andric mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 13020b57cec5SDimitry Andric return true; 13030b57cec5SDimitry Andric } 13040b57cec5SDimitry Andric } 13050b57cec5SDimitry Andric 13060b57cec5SDimitry Andric return false; 13070b57cec5SDimitry Andric } 13080b57cec5SDimitry Andric 13090b57cec5SDimitry Andric // Try to fold an instruction into a simpler one 1310fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const { 1311fe6060f1SDimitry Andric unsigned Opc = MI.getOpcode(); 1312fe6060f1SDimitry Andric if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 && 1313fe6060f1SDimitry Andric Opc != AMDGPU::V_CNDMASK_B64_PSEUDO) 1314fe6060f1SDimitry Andric return false; 13150b57cec5SDimitry Andric 1316fe6060f1SDimitry Andric MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1317fe6060f1SDimitry Andric MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1318fe6060f1SDimitry Andric if (!Src1->isIdenticalTo(*Src0)) { 1319bdd1243dSDimitry Andric auto *Src0Imm = getImmOrMaterializedImm(*Src0); 1320bdd1243dSDimitry Andric auto *Src1Imm = getImmOrMaterializedImm(*Src1); 1321fe6060f1SDimitry Andric if (!Src1Imm->isIdenticalTo(*Src0Imm)) 1322fe6060f1SDimitry Andric return false; 1323fe6060f1SDimitry Andric } 1324fe6060f1SDimitry Andric 1325fe6060f1SDimitry Andric int Src1ModIdx = 1326fe6060f1SDimitry Andric AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 1327fe6060f1SDimitry Andric int Src0ModIdx = 1328fe6060f1SDimitry Andric AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 1329fe6060f1SDimitry Andric if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) || 1330fe6060f1SDimitry Andric (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0)) 1331fe6060f1SDimitry Andric return false; 1332fe6060f1SDimitry Andric 1333fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folded " << MI << " into "); 13340b57cec5SDimitry Andric auto &NewDesc = 13350b57cec5SDimitry Andric TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 13360b57cec5SDimitry Andric int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 13370b57cec5SDimitry Andric if (Src2Idx != -1) 133881ad6265SDimitry Andric MI.removeOperand(Src2Idx); 133981ad6265SDimitry Andric MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 13400b57cec5SDimitry Andric if (Src1ModIdx != -1) 134181ad6265SDimitry Andric MI.removeOperand(Src1ModIdx); 13420b57cec5SDimitry Andric if (Src0ModIdx != -1) 134381ad6265SDimitry Andric MI.removeOperand(Src0ModIdx); 1344fe6060f1SDimitry Andric mutateCopyOp(MI, NewDesc); 1345fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << MI); 13460b57cec5SDimitry Andric return true; 13470b57cec5SDimitry Andric } 1348fe6060f1SDimitry Andric 1349fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const { 1350fe6060f1SDimitry Andric if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 && 1351fe6060f1SDimitry Andric MI.getOpcode() != AMDGPU::V_AND_B32_e32) 1352fe6060f1SDimitry Andric return false; 1353fe6060f1SDimitry Andric 1354bdd1243dSDimitry Andric MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1)); 1355fe6060f1SDimitry Andric if (!Src0->isImm() || Src0->getImm() != 0xffff) 1356fe6060f1SDimitry Andric return false; 1357fe6060f1SDimitry Andric 1358fe6060f1SDimitry Andric Register Src1 = MI.getOperand(2).getReg(); 1359fe6060f1SDimitry Andric MachineInstr *SrcDef = MRI->getVRegDef(Src1); 1360bdd1243dSDimitry Andric if (!ST->zeroesHigh16BitsOfDest(SrcDef->getOpcode())) 1361bdd1243dSDimitry Andric return false; 1362bdd1243dSDimitry Andric 1363fe6060f1SDimitry Andric Register Dst = MI.getOperand(0).getReg(); 13640fca6ea1SDimitry Andric MRI->replaceRegWith(Dst, Src1); 13650fca6ea1SDimitry Andric if (!MI.getOperand(2).isKill()) 13660fca6ea1SDimitry Andric MRI->clearKillFlags(Src1); 1367fe6060f1SDimitry Andric MI.eraseFromParent(); 1368fe6060f1SDimitry Andric return true; 13690b57cec5SDimitry Andric } 13700b57cec5SDimitry Andric 137181ad6265SDimitry Andric bool SIFoldOperands::foldInstOperand(MachineInstr &MI, 13720b57cec5SDimitry Andric MachineOperand &OpToFold) const { 13730b57cec5SDimitry Andric // We need mutate the operands of new mov instructions to add implicit 13740b57cec5SDimitry Andric // uses of EXEC, but adding them invalidates the use_iterator, so defer 13750b57cec5SDimitry Andric // this. 13760b57cec5SDimitry Andric SmallVector<MachineInstr *, 4> CopiesToReplace; 13770b57cec5SDimitry Andric SmallVector<FoldCandidate, 4> FoldList; 13780b57cec5SDimitry Andric MachineOperand &Dst = MI.getOperand(0); 137981ad6265SDimitry Andric bool Changed = false; 13800b57cec5SDimitry Andric 1381fe6060f1SDimitry Andric if (OpToFold.isImm()) { 1382fe6060f1SDimitry Andric for (auto &UseMI : 1383fe6060f1SDimitry Andric make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) { 13840b57cec5SDimitry Andric // Folding the immediate may reveal operations that can be constant 13850b57cec5SDimitry Andric // folded or replaced with a copy. This can happen for example after 13860b57cec5SDimitry Andric // frame indices are lowered to constants or from splitting 64-bit 13870b57cec5SDimitry Andric // constants. 13880b57cec5SDimitry Andric // 13890b57cec5SDimitry Andric // We may also encounter cases where one or both operands are 13900b57cec5SDimitry Andric // immediates materialized into a register, which would ordinarily not 13910b57cec5SDimitry Andric // be folded due to multiple uses or operand constraints. 1392bdd1243dSDimitry Andric if (tryConstantFoldOp(&UseMI)) { 1393fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Constant folded " << UseMI); 139481ad6265SDimitry Andric Changed = true; 13950b57cec5SDimitry Andric } 13960b57cec5SDimitry Andric } 13970b57cec5SDimitry Andric } 13980b57cec5SDimitry Andric 1399fe6060f1SDimitry Andric SmallVector<MachineOperand *, 4> UsesToProcess; 1400fe6060f1SDimitry Andric for (auto &Use : MRI->use_nodbg_operands(Dst.getReg())) 1401fe6060f1SDimitry Andric UsesToProcess.push_back(&Use); 1402bdd1243dSDimitry Andric for (auto *U : UsesToProcess) { 14030b57cec5SDimitry Andric MachineInstr *UseMI = U->getParent(); 140481ad6265SDimitry Andric foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U), FoldList, 140581ad6265SDimitry Andric CopiesToReplace); 140681ad6265SDimitry Andric } 14070b57cec5SDimitry Andric 140881ad6265SDimitry Andric if (CopiesToReplace.empty() && FoldList.empty()) 140981ad6265SDimitry Andric return Changed; 14100b57cec5SDimitry Andric 14110b57cec5SDimitry Andric MachineFunction *MF = MI.getParent()->getParent(); 14120b57cec5SDimitry Andric // Make sure we add EXEC uses to any new v_mov instructions created. 14130b57cec5SDimitry Andric for (MachineInstr *Copy : CopiesToReplace) 14140b57cec5SDimitry Andric Copy->addImplicitDefUseOperands(*MF); 14150b57cec5SDimitry Andric 14160b57cec5SDimitry Andric for (FoldCandidate &Fold : FoldList) { 1417480093f4SDimitry Andric assert(!Fold.isReg() || Fold.OpToFold); 1418e8d8bef9SDimitry Andric if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) { 14198bcb0991SDimitry Andric Register Reg = Fold.OpToFold->getReg(); 14208bcb0991SDimitry Andric MachineInstr *DefMI = Fold.OpToFold->getParent(); 14218bcb0991SDimitry Andric if (DefMI->readsRegister(AMDGPU::EXEC, TRI) && 14228bcb0991SDimitry Andric execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI)) 14238bcb0991SDimitry Andric continue; 14248bcb0991SDimitry Andric } 1425bdd1243dSDimitry Andric if (updateOperand(Fold)) { 14260b57cec5SDimitry Andric // Clear kill flags. 14270b57cec5SDimitry Andric if (Fold.isReg()) { 14280b57cec5SDimitry Andric assert(Fold.OpToFold && Fold.OpToFold->isReg()); 14290b57cec5SDimitry Andric // FIXME: Probably shouldn't bother trying to fold if not an 14300b57cec5SDimitry Andric // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 14310b57cec5SDimitry Andric // copies. 14320b57cec5SDimitry Andric MRI->clearKillFlags(Fold.OpToFold->getReg()); 14330b57cec5SDimitry Andric } 14340b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 14350b57cec5SDimitry Andric << static_cast<int>(Fold.UseOpNo) << " of " 1436fe6060f1SDimitry Andric << *Fold.UseMI); 1437bdd1243dSDimitry Andric } else if (Fold.Commuted) { 14380b57cec5SDimitry Andric // Restoring instruction's original operand order if fold has failed. 14390b57cec5SDimitry Andric TII->commuteInstruction(*Fold.UseMI, false); 14400b57cec5SDimitry Andric } 14410b57cec5SDimitry Andric } 144281ad6265SDimitry Andric return true; 14430b57cec5SDimitry Andric } 14440b57cec5SDimitry Andric 1445bdd1243dSDimitry Andric bool SIFoldOperands::tryFoldFoldableCopy( 1446bdd1243dSDimitry Andric MachineInstr &MI, MachineOperand *&CurrentKnownM0Val) const { 1447bdd1243dSDimitry Andric // Specially track simple redefs of m0 to the same value in a block, so we 1448bdd1243dSDimitry Andric // can erase the later ones. 1449bdd1243dSDimitry Andric if (MI.getOperand(0).getReg() == AMDGPU::M0) { 1450bdd1243dSDimitry Andric MachineOperand &NewM0Val = MI.getOperand(1); 1451bdd1243dSDimitry Andric if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) { 1452bdd1243dSDimitry Andric MI.eraseFromParent(); 1453bdd1243dSDimitry Andric return true; 1454bdd1243dSDimitry Andric } 1455bdd1243dSDimitry Andric 1456bdd1243dSDimitry Andric // We aren't tracking other physical registers 1457bdd1243dSDimitry Andric CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) 1458bdd1243dSDimitry Andric ? nullptr 1459bdd1243dSDimitry Andric : &NewM0Val; 1460bdd1243dSDimitry Andric return false; 1461bdd1243dSDimitry Andric } 1462bdd1243dSDimitry Andric 1463bdd1243dSDimitry Andric MachineOperand &OpToFold = MI.getOperand(1); 1464bdd1243dSDimitry Andric bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1465bdd1243dSDimitry Andric 1466bdd1243dSDimitry Andric // FIXME: We could also be folding things like TargetIndexes. 1467bdd1243dSDimitry Andric if (!FoldingImm && !OpToFold.isReg()) 1468bdd1243dSDimitry Andric return false; 1469bdd1243dSDimitry Andric 1470bdd1243dSDimitry Andric if (OpToFold.isReg() && !OpToFold.getReg().isVirtual()) 1471bdd1243dSDimitry Andric return false; 1472bdd1243dSDimitry Andric 1473bdd1243dSDimitry Andric // Prevent folding operands backwards in the function. For example, 1474bdd1243dSDimitry Andric // the COPY opcode must not be replaced by 1 in this example: 1475bdd1243dSDimitry Andric // 1476bdd1243dSDimitry Andric // %3 = COPY %vgpr0; VGPR_32:%3 1477bdd1243dSDimitry Andric // ... 1478bdd1243dSDimitry Andric // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1479bdd1243dSDimitry Andric if (!MI.getOperand(0).getReg().isVirtual()) 1480bdd1243dSDimitry Andric return false; 1481bdd1243dSDimitry Andric 1482bdd1243dSDimitry Andric bool Changed = foldInstOperand(MI, OpToFold); 1483bdd1243dSDimitry Andric 1484bdd1243dSDimitry Andric // If we managed to fold all uses of this copy then we might as well 1485bdd1243dSDimitry Andric // delete it now. 1486bdd1243dSDimitry Andric // The only reason we need to follow chains of copies here is that 1487bdd1243dSDimitry Andric // tryFoldRegSequence looks forward through copies before folding a 1488bdd1243dSDimitry Andric // REG_SEQUENCE into its eventual users. 1489bdd1243dSDimitry Andric auto *InstToErase = &MI; 1490bdd1243dSDimitry Andric while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) { 1491bdd1243dSDimitry Andric auto &SrcOp = InstToErase->getOperand(1); 1492bdd1243dSDimitry Andric auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register(); 1493bdd1243dSDimitry Andric InstToErase->eraseFromParent(); 1494bdd1243dSDimitry Andric Changed = true; 1495bdd1243dSDimitry Andric InstToErase = nullptr; 1496bdd1243dSDimitry Andric if (!SrcReg || SrcReg.isPhysical()) 1497bdd1243dSDimitry Andric break; 1498bdd1243dSDimitry Andric InstToErase = MRI->getVRegDef(SrcReg); 1499bdd1243dSDimitry Andric if (!InstToErase || !TII->isFoldableCopy(*InstToErase)) 1500bdd1243dSDimitry Andric break; 1501bdd1243dSDimitry Andric } 1502bdd1243dSDimitry Andric 1503bdd1243dSDimitry Andric if (InstToErase && InstToErase->isRegSequence() && 1504bdd1243dSDimitry Andric MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) { 1505bdd1243dSDimitry Andric InstToErase->eraseFromParent(); 1506bdd1243dSDimitry Andric Changed = true; 1507bdd1243dSDimitry Andric } 1508bdd1243dSDimitry Andric 1509bdd1243dSDimitry Andric return Changed; 1510bdd1243dSDimitry Andric } 1511bdd1243dSDimitry Andric 15120b57cec5SDimitry Andric // Clamp patterns are canonically selected to v_max_* instructions, so only 15130b57cec5SDimitry Andric // handle them. 15140b57cec5SDimitry Andric const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 15150b57cec5SDimitry Andric unsigned Op = MI.getOpcode(); 15160b57cec5SDimitry Andric switch (Op) { 15170b57cec5SDimitry Andric case AMDGPU::V_MAX_F32_e64: 15180b57cec5SDimitry Andric case AMDGPU::V_MAX_F16_e64: 1519bdd1243dSDimitry Andric case AMDGPU::V_MAX_F16_t16_e64: 15205f757f3fSDimitry Andric case AMDGPU::V_MAX_F16_fake16_e64: 1521e8d8bef9SDimitry Andric case AMDGPU::V_MAX_F64_e64: 15227a6dacacSDimitry Andric case AMDGPU::V_MAX_NUM_F64_e64: 15230b57cec5SDimitry Andric case AMDGPU::V_PK_MAX_F16: { 15240fca6ea1SDimitry Andric if (MI.mayRaiseFPException()) 15250fca6ea1SDimitry Andric return nullptr; 15260fca6ea1SDimitry Andric 15270b57cec5SDimitry Andric if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 15280b57cec5SDimitry Andric return nullptr; 15290b57cec5SDimitry Andric 15300b57cec5SDimitry Andric // Make sure sources are identical. 15310b57cec5SDimitry Andric const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 15320b57cec5SDimitry Andric const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 15330b57cec5SDimitry Andric if (!Src0->isReg() || !Src1->isReg() || 15340b57cec5SDimitry Andric Src0->getReg() != Src1->getReg() || 15350b57cec5SDimitry Andric Src0->getSubReg() != Src1->getSubReg() || 15360b57cec5SDimitry Andric Src0->getSubReg() != AMDGPU::NoSubRegister) 15370b57cec5SDimitry Andric return nullptr; 15380b57cec5SDimitry Andric 15390b57cec5SDimitry Andric // Can't fold up if we have modifiers. 15400b57cec5SDimitry Andric if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 15410b57cec5SDimitry Andric return nullptr; 15420b57cec5SDimitry Andric 15430b57cec5SDimitry Andric unsigned Src0Mods 15440b57cec5SDimitry Andric = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 15450b57cec5SDimitry Andric unsigned Src1Mods 15460b57cec5SDimitry Andric = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 15470b57cec5SDimitry Andric 15480b57cec5SDimitry Andric // Having a 0 op_sel_hi would require swizzling the output in the source 15490b57cec5SDimitry Andric // instruction, which we can't do. 15500b57cec5SDimitry Andric unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 15510b57cec5SDimitry Andric : 0u; 15520b57cec5SDimitry Andric if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 15530b57cec5SDimitry Andric return nullptr; 15540b57cec5SDimitry Andric return Src0; 15550b57cec5SDimitry Andric } 15560b57cec5SDimitry Andric default: 15570b57cec5SDimitry Andric return nullptr; 15580b57cec5SDimitry Andric } 15590b57cec5SDimitry Andric } 15600b57cec5SDimitry Andric 15610b57cec5SDimitry Andric // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 15620b57cec5SDimitry Andric bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 15630b57cec5SDimitry Andric const MachineOperand *ClampSrc = isClamp(MI); 1564fe6060f1SDimitry Andric if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg())) 15650b57cec5SDimitry Andric return false; 15660b57cec5SDimitry Andric 15670b57cec5SDimitry Andric MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 15680b57cec5SDimitry Andric 15690b57cec5SDimitry Andric // The type of clamp must be compatible. 15700b57cec5SDimitry Andric if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 15710b57cec5SDimitry Andric return false; 15720b57cec5SDimitry Andric 15730fca6ea1SDimitry Andric if (Def->mayRaiseFPException()) 15740fca6ea1SDimitry Andric return false; 15750fca6ea1SDimitry Andric 15760b57cec5SDimitry Andric MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 15770b57cec5SDimitry Andric if (!DefClamp) 15780b57cec5SDimitry Andric return false; 15790b57cec5SDimitry Andric 1580fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def); 15810b57cec5SDimitry Andric 15820b57cec5SDimitry Andric // Clamp is applied after omod, so it is OK if omod is set. 15830b57cec5SDimitry Andric DefClamp->setImm(1); 1584*62987288SDimitry Andric 1585*62987288SDimitry Andric Register DefReg = Def->getOperand(0).getReg(); 1586*62987288SDimitry Andric Register MIDstReg = MI.getOperand(0).getReg(); 1587*62987288SDimitry Andric if (TRI->isSGPRReg(*MRI, DefReg)) { 1588*62987288SDimitry Andric // Pseudo scalar instructions have a SGPR for dst and clamp is a v_max* 1589*62987288SDimitry Andric // instruction with a VGPR dst. 1590*62987288SDimitry Andric BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), 1591*62987288SDimitry Andric MIDstReg) 1592*62987288SDimitry Andric .addReg(DefReg); 1593*62987288SDimitry Andric } else { 1594*62987288SDimitry Andric MRI->replaceRegWith(MIDstReg, DefReg); 1595*62987288SDimitry Andric } 15960b57cec5SDimitry Andric MI.eraseFromParent(); 1597349cc55cSDimitry Andric 1598349cc55cSDimitry Andric // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac 1599349cc55cSDimitry Andric // instruction, so we might as well convert it to the more flexible VOP3-only 1600349cc55cSDimitry Andric // mad/fma form. 1601349cc55cSDimitry Andric if (TII->convertToThreeAddress(*Def, nullptr, nullptr)) 1602349cc55cSDimitry Andric Def->eraseFromParent(); 1603349cc55cSDimitry Andric 16040b57cec5SDimitry Andric return true; 16050b57cec5SDimitry Andric } 16060b57cec5SDimitry Andric 16070b57cec5SDimitry Andric static int getOModValue(unsigned Opc, int64_t Val) { 16080b57cec5SDimitry Andric switch (Opc) { 16097a6dacacSDimitry Andric case AMDGPU::V_MUL_F64_e64: 16107a6dacacSDimitry Andric case AMDGPU::V_MUL_F64_pseudo_e64: { 1611fe6060f1SDimitry Andric switch (Val) { 1612fe6060f1SDimitry Andric case 0x3fe0000000000000: // 0.5 1613fe6060f1SDimitry Andric return SIOutMods::DIV2; 1614fe6060f1SDimitry Andric case 0x4000000000000000: // 2.0 1615fe6060f1SDimitry Andric return SIOutMods::MUL2; 1616fe6060f1SDimitry Andric case 0x4010000000000000: // 4.0 1617fe6060f1SDimitry Andric return SIOutMods::MUL4; 1618fe6060f1SDimitry Andric default: 1619fe6060f1SDimitry Andric return SIOutMods::NONE; 1620fe6060f1SDimitry Andric } 1621fe6060f1SDimitry Andric } 16220b57cec5SDimitry Andric case AMDGPU::V_MUL_F32_e64: { 16230b57cec5SDimitry Andric switch (static_cast<uint32_t>(Val)) { 16240b57cec5SDimitry Andric case 0x3f000000: // 0.5 16250b57cec5SDimitry Andric return SIOutMods::DIV2; 16260b57cec5SDimitry Andric case 0x40000000: // 2.0 16270b57cec5SDimitry Andric return SIOutMods::MUL2; 16280b57cec5SDimitry Andric case 0x40800000: // 4.0 16290b57cec5SDimitry Andric return SIOutMods::MUL4; 16300b57cec5SDimitry Andric default: 16310b57cec5SDimitry Andric return SIOutMods::NONE; 16320b57cec5SDimitry Andric } 16330b57cec5SDimitry Andric } 1634bdd1243dSDimitry Andric case AMDGPU::V_MUL_F16_e64: 16355f757f3fSDimitry Andric case AMDGPU::V_MUL_F16_t16_e64: 16365f757f3fSDimitry Andric case AMDGPU::V_MUL_F16_fake16_e64: { 16370b57cec5SDimitry Andric switch (static_cast<uint16_t>(Val)) { 16380b57cec5SDimitry Andric case 0x3800: // 0.5 16390b57cec5SDimitry Andric return SIOutMods::DIV2; 16400b57cec5SDimitry Andric case 0x4000: // 2.0 16410b57cec5SDimitry Andric return SIOutMods::MUL2; 16420b57cec5SDimitry Andric case 0x4400: // 4.0 16430b57cec5SDimitry Andric return SIOutMods::MUL4; 16440b57cec5SDimitry Andric default: 16450b57cec5SDimitry Andric return SIOutMods::NONE; 16460b57cec5SDimitry Andric } 16470b57cec5SDimitry Andric } 16480b57cec5SDimitry Andric default: 16490b57cec5SDimitry Andric llvm_unreachable("invalid mul opcode"); 16500b57cec5SDimitry Andric } 16510b57cec5SDimitry Andric } 16520b57cec5SDimitry Andric 16530b57cec5SDimitry Andric // FIXME: Does this really not support denormals with f16? 16540b57cec5SDimitry Andric // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 16550b57cec5SDimitry Andric // handled, so will anything other than that break? 16560b57cec5SDimitry Andric std::pair<const MachineOperand *, int> 16570b57cec5SDimitry Andric SIFoldOperands::isOMod(const MachineInstr &MI) const { 16580b57cec5SDimitry Andric unsigned Op = MI.getOpcode(); 16590b57cec5SDimitry Andric switch (Op) { 1660fe6060f1SDimitry Andric case AMDGPU::V_MUL_F64_e64: 16617a6dacacSDimitry Andric case AMDGPU::V_MUL_F64_pseudo_e64: 16620b57cec5SDimitry Andric case AMDGPU::V_MUL_F32_e64: 1663bdd1243dSDimitry Andric case AMDGPU::V_MUL_F16_t16_e64: 16645f757f3fSDimitry Andric case AMDGPU::V_MUL_F16_fake16_e64: 16650b57cec5SDimitry Andric case AMDGPU::V_MUL_F16_e64: { 16660b57cec5SDimitry Andric // If output denormals are enabled, omod is ignored. 1667bdd1243dSDimitry Andric if ((Op == AMDGPU::V_MUL_F32_e64 && 1668bdd1243dSDimitry Andric MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) || 16697a6dacacSDimitry Andric ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F64_pseudo_e64 || 16707a6dacacSDimitry Andric Op == AMDGPU::V_MUL_F16_e64 || Op == AMDGPU::V_MUL_F16_t16_e64 || 16715f757f3fSDimitry Andric Op == AMDGPU::V_MUL_F16_fake16_e64) && 16720fca6ea1SDimitry Andric MFI->getMode().FP64FP16Denormals.Output != 16730fca6ea1SDimitry Andric DenormalMode::PreserveSign) || 16740fca6ea1SDimitry Andric MI.mayRaiseFPException()) 1675bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 16760b57cec5SDimitry Andric 16770b57cec5SDimitry Andric const MachineOperand *RegOp = nullptr; 16780b57cec5SDimitry Andric const MachineOperand *ImmOp = nullptr; 16790b57cec5SDimitry Andric const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 16800b57cec5SDimitry Andric const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 16810b57cec5SDimitry Andric if (Src0->isImm()) { 16820b57cec5SDimitry Andric ImmOp = Src0; 16830b57cec5SDimitry Andric RegOp = Src1; 16840b57cec5SDimitry Andric } else if (Src1->isImm()) { 16850b57cec5SDimitry Andric ImmOp = Src1; 16860b57cec5SDimitry Andric RegOp = Src0; 16870b57cec5SDimitry Andric } else 1688bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 16890b57cec5SDimitry Andric 16900b57cec5SDimitry Andric int OMod = getOModValue(Op, ImmOp->getImm()); 16910b57cec5SDimitry Andric if (OMod == SIOutMods::NONE || 16920b57cec5SDimitry Andric TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 16930b57cec5SDimitry Andric TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 16940b57cec5SDimitry Andric TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 16950b57cec5SDimitry Andric TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1696bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 16970b57cec5SDimitry Andric 1698bdd1243dSDimitry Andric return std::pair(RegOp, OMod); 16990b57cec5SDimitry Andric } 1700fe6060f1SDimitry Andric case AMDGPU::V_ADD_F64_e64: 17017a6dacacSDimitry Andric case AMDGPU::V_ADD_F64_pseudo_e64: 17020b57cec5SDimitry Andric case AMDGPU::V_ADD_F32_e64: 1703bdd1243dSDimitry Andric case AMDGPU::V_ADD_F16_e64: 17045f757f3fSDimitry Andric case AMDGPU::V_ADD_F16_t16_e64: 17055f757f3fSDimitry Andric case AMDGPU::V_ADD_F16_fake16_e64: { 17060b57cec5SDimitry Andric // If output denormals are enabled, omod is ignored. 1707bdd1243dSDimitry Andric if ((Op == AMDGPU::V_ADD_F32_e64 && 1708bdd1243dSDimitry Andric MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) || 17097a6dacacSDimitry Andric ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F64_pseudo_e64 || 17107a6dacacSDimitry Andric Op == AMDGPU::V_ADD_F16_e64 || Op == AMDGPU::V_ADD_F16_t16_e64 || 17115f757f3fSDimitry Andric Op == AMDGPU::V_ADD_F16_fake16_e64) && 1712bdd1243dSDimitry Andric MFI->getMode().FP64FP16Denormals.Output != DenormalMode::PreserveSign)) 1713bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 17140b57cec5SDimitry Andric 17150b57cec5SDimitry Andric // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 17160b57cec5SDimitry Andric const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 17170b57cec5SDimitry Andric const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 17180b57cec5SDimitry Andric 17190b57cec5SDimitry Andric if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 17200b57cec5SDimitry Andric Src0->getSubReg() == Src1->getSubReg() && 17210b57cec5SDimitry Andric !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 17220b57cec5SDimitry Andric !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 17230b57cec5SDimitry Andric !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 17240b57cec5SDimitry Andric !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1725bdd1243dSDimitry Andric return std::pair(Src0, SIOutMods::MUL2); 17260b57cec5SDimitry Andric 1727bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 17280b57cec5SDimitry Andric } 17290b57cec5SDimitry Andric default: 1730bdd1243dSDimitry Andric return std::pair(nullptr, SIOutMods::NONE); 17310b57cec5SDimitry Andric } 17320b57cec5SDimitry Andric } 17330b57cec5SDimitry Andric 17340b57cec5SDimitry Andric // FIXME: Does this need to check IEEE bit on function? 17350b57cec5SDimitry Andric bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 17360b57cec5SDimitry Andric const MachineOperand *RegOp; 17370b57cec5SDimitry Andric int OMod; 17380b57cec5SDimitry Andric std::tie(RegOp, OMod) = isOMod(MI); 17390b57cec5SDimitry Andric if (OMod == SIOutMods::NONE || !RegOp->isReg() || 17400b57cec5SDimitry Andric RegOp->getSubReg() != AMDGPU::NoSubRegister || 1741fe6060f1SDimitry Andric !MRI->hasOneNonDBGUser(RegOp->getReg())) 17420b57cec5SDimitry Andric return false; 17430b57cec5SDimitry Andric 17440b57cec5SDimitry Andric MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 17450b57cec5SDimitry Andric MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 17460b57cec5SDimitry Andric if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 17470b57cec5SDimitry Andric return false; 17480b57cec5SDimitry Andric 17490fca6ea1SDimitry Andric if (Def->mayRaiseFPException()) 17500fca6ea1SDimitry Andric return false; 17510fca6ea1SDimitry Andric 17520b57cec5SDimitry Andric // Clamp is applied after omod. If the source already has clamp set, don't 17530b57cec5SDimitry Andric // fold it. 17540b57cec5SDimitry Andric if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 17550b57cec5SDimitry Andric return false; 17560b57cec5SDimitry Andric 1757fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def); 17580b57cec5SDimitry Andric 17590b57cec5SDimitry Andric DefOMod->setImm(OMod); 17600b57cec5SDimitry Andric MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 17610b57cec5SDimitry Andric MI.eraseFromParent(); 1762349cc55cSDimitry Andric 1763349cc55cSDimitry Andric // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac 1764349cc55cSDimitry Andric // instruction, so we might as well convert it to the more flexible VOP3-only 1765349cc55cSDimitry Andric // mad/fma form. 1766349cc55cSDimitry Andric if (TII->convertToThreeAddress(*Def, nullptr, nullptr)) 1767349cc55cSDimitry Andric Def->eraseFromParent(); 1768349cc55cSDimitry Andric 17690b57cec5SDimitry Andric return true; 17700b57cec5SDimitry Andric } 17710b57cec5SDimitry Andric 1772fe6060f1SDimitry Andric // Try to fold a reg_sequence with vgpr output and agpr inputs into an 1773fe6060f1SDimitry Andric // instruction which can take an agpr. So far that means a store. 1774fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) { 1775fe6060f1SDimitry Andric assert(MI.isRegSequence()); 1776fe6060f1SDimitry Andric auto Reg = MI.getOperand(0).getReg(); 1777fe6060f1SDimitry Andric 1778fe6060f1SDimitry Andric if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) || 1779fe6060f1SDimitry Andric !MRI->hasOneNonDBGUse(Reg)) 1780fe6060f1SDimitry Andric return false; 1781fe6060f1SDimitry Andric 1782fe6060f1SDimitry Andric SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 1783bdd1243dSDimitry Andric if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER)) 1784fe6060f1SDimitry Andric return false; 1785fe6060f1SDimitry Andric 17860fca6ea1SDimitry Andric for (auto &[Op, SubIdx] : Defs) { 1787fe6060f1SDimitry Andric if (!Op->isReg()) 1788fe6060f1SDimitry Andric return false; 1789fe6060f1SDimitry Andric if (TRI->isAGPR(*MRI, Op->getReg())) 1790fe6060f1SDimitry Andric continue; 1791fe6060f1SDimitry Andric // Maybe this is a COPY from AREG 1792fe6060f1SDimitry Andric const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg()); 1793fe6060f1SDimitry Andric if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg()) 1794fe6060f1SDimitry Andric return false; 1795fe6060f1SDimitry Andric if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg())) 1796fe6060f1SDimitry Andric return false; 1797fe6060f1SDimitry Andric } 1798fe6060f1SDimitry Andric 1799fe6060f1SDimitry Andric MachineOperand *Op = &*MRI->use_nodbg_begin(Reg); 1800fe6060f1SDimitry Andric MachineInstr *UseMI = Op->getParent(); 1801fe6060f1SDimitry Andric while (UseMI->isCopy() && !Op->getSubReg()) { 1802fe6060f1SDimitry Andric Reg = UseMI->getOperand(0).getReg(); 1803fe6060f1SDimitry Andric if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg)) 1804fe6060f1SDimitry Andric return false; 1805fe6060f1SDimitry Andric Op = &*MRI->use_nodbg_begin(Reg); 1806fe6060f1SDimitry Andric UseMI = Op->getParent(); 1807fe6060f1SDimitry Andric } 1808fe6060f1SDimitry Andric 1809fe6060f1SDimitry Andric if (Op->getSubReg()) 1810fe6060f1SDimitry Andric return false; 1811fe6060f1SDimitry Andric 1812fe6060f1SDimitry Andric unsigned OpIdx = Op - &UseMI->getOperand(0); 1813fe6060f1SDimitry Andric const MCInstrDesc &InstDesc = UseMI->getDesc(); 181481ad6265SDimitry Andric const TargetRegisterClass *OpRC = 181581ad6265SDimitry Andric TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF()); 181681ad6265SDimitry Andric if (!OpRC || !TRI->isVectorSuperClass(OpRC)) 1817fe6060f1SDimitry Andric return false; 1818fe6060f1SDimitry Andric 1819fe6060f1SDimitry Andric const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)); 1820fe6060f1SDimitry Andric auto Dst = MRI->createVirtualRegister(NewDstRC); 1821fe6060f1SDimitry Andric auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 1822fe6060f1SDimitry Andric TII->get(AMDGPU::REG_SEQUENCE), Dst); 1823fe6060f1SDimitry Andric 18240fca6ea1SDimitry Andric for (auto &[Def, SubIdx] : Defs) { 1825fe6060f1SDimitry Andric Def->setIsKill(false); 1826fe6060f1SDimitry Andric if (TRI->isAGPR(*MRI, Def->getReg())) { 1827fe6060f1SDimitry Andric RS.add(*Def); 1828fe6060f1SDimitry Andric } else { // This is a copy 1829fe6060f1SDimitry Andric MachineInstr *SubDef = MRI->getVRegDef(Def->getReg()); 1830fe6060f1SDimitry Andric SubDef->getOperand(1).setIsKill(false); 1831fe6060f1SDimitry Andric RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg()); 1832fe6060f1SDimitry Andric } 18330fca6ea1SDimitry Andric RS.addImm(SubIdx); 1834fe6060f1SDimitry Andric } 1835fe6060f1SDimitry Andric 1836fe6060f1SDimitry Andric Op->setReg(Dst); 1837fe6060f1SDimitry Andric if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) { 1838fe6060f1SDimitry Andric Op->setReg(Reg); 1839fe6060f1SDimitry Andric RS->eraseFromParent(); 1840fe6060f1SDimitry Andric return false; 1841fe6060f1SDimitry Andric } 1842fe6060f1SDimitry Andric 1843fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI); 1844fe6060f1SDimitry Andric 1845fe6060f1SDimitry Andric // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users, 1846fe6060f1SDimitry Andric // in which case we can erase them all later in runOnMachineFunction. 1847fe6060f1SDimitry Andric if (MRI->use_nodbg_empty(MI.getOperand(0).getReg())) 18480eae32dcSDimitry Andric MI.eraseFromParent(); 1849fe6060f1SDimitry Andric return true; 1850fe6060f1SDimitry Andric } 1851fe6060f1SDimitry Andric 185206c3fb27SDimitry Andric /// Checks whether \p Copy is a AGPR -> VGPR copy. Returns `true` on success and 185306c3fb27SDimitry Andric /// stores the AGPR register in \p OutReg and the subreg in \p OutSubReg 185406c3fb27SDimitry Andric static bool isAGPRCopy(const SIRegisterInfo &TRI, 185506c3fb27SDimitry Andric const MachineRegisterInfo &MRI, const MachineInstr &Copy, 185606c3fb27SDimitry Andric Register &OutReg, unsigned &OutSubReg) { 185706c3fb27SDimitry Andric assert(Copy.isCopy()); 185806c3fb27SDimitry Andric 185906c3fb27SDimitry Andric const MachineOperand &CopySrc = Copy.getOperand(1); 186006c3fb27SDimitry Andric Register CopySrcReg = CopySrc.getReg(); 186106c3fb27SDimitry Andric if (!CopySrcReg.isVirtual()) 186206c3fb27SDimitry Andric return false; 186306c3fb27SDimitry Andric 186406c3fb27SDimitry Andric // Common case: copy from AGPR directly, e.g. 186506c3fb27SDimitry Andric // %1:vgpr_32 = COPY %0:agpr_32 186606c3fb27SDimitry Andric if (TRI.isAGPR(MRI, CopySrcReg)) { 186706c3fb27SDimitry Andric OutReg = CopySrcReg; 186806c3fb27SDimitry Andric OutSubReg = CopySrc.getSubReg(); 186906c3fb27SDimitry Andric return true; 187006c3fb27SDimitry Andric } 187106c3fb27SDimitry Andric 187206c3fb27SDimitry Andric // Sometimes it can also involve two copies, e.g. 187306c3fb27SDimitry Andric // %1:vgpr_256 = COPY %0:agpr_256 187406c3fb27SDimitry Andric // %2:vgpr_32 = COPY %1:vgpr_256.sub0 187506c3fb27SDimitry Andric const MachineInstr *CopySrcDef = MRI.getVRegDef(CopySrcReg); 187606c3fb27SDimitry Andric if (!CopySrcDef || !CopySrcDef->isCopy()) 187706c3fb27SDimitry Andric return false; 187806c3fb27SDimitry Andric 187906c3fb27SDimitry Andric const MachineOperand &OtherCopySrc = CopySrcDef->getOperand(1); 188006c3fb27SDimitry Andric Register OtherCopySrcReg = OtherCopySrc.getReg(); 188106c3fb27SDimitry Andric if (!OtherCopySrcReg.isVirtual() || 188206c3fb27SDimitry Andric CopySrcDef->getOperand(0).getSubReg() != AMDGPU::NoSubRegister || 188306c3fb27SDimitry Andric OtherCopySrc.getSubReg() != AMDGPU::NoSubRegister || 188406c3fb27SDimitry Andric !TRI.isAGPR(MRI, OtherCopySrcReg)) 188506c3fb27SDimitry Andric return false; 188606c3fb27SDimitry Andric 188706c3fb27SDimitry Andric OutReg = OtherCopySrcReg; 188806c3fb27SDimitry Andric OutSubReg = CopySrc.getSubReg(); 188906c3fb27SDimitry Andric return true; 189006c3fb27SDimitry Andric } 189106c3fb27SDimitry Andric 189206c3fb27SDimitry Andric // Try to hoist an AGPR to VGPR copy across a PHI. 1893fe6060f1SDimitry Andric // This should allow folding of an AGPR into a consumer which may support it. 1894fe6060f1SDimitry Andric // 189506c3fb27SDimitry Andric // Example 1: LCSSA PHI 189606c3fb27SDimitry Andric // loop: 189706c3fb27SDimitry Andric // %1:vreg = COPY %0:areg 189806c3fb27SDimitry Andric // exit: 189906c3fb27SDimitry Andric // %2:vreg = PHI %1:vreg, %loop 190006c3fb27SDimitry Andric // => 190106c3fb27SDimitry Andric // loop: 190206c3fb27SDimitry Andric // exit: 190306c3fb27SDimitry Andric // %1:areg = PHI %0:areg, %loop 190406c3fb27SDimitry Andric // %2:vreg = COPY %1:areg 190506c3fb27SDimitry Andric // 190606c3fb27SDimitry Andric // Example 2: PHI with multiple incoming values: 190706c3fb27SDimitry Andric // entry: 190806c3fb27SDimitry Andric // %1:vreg = GLOBAL_LOAD(..) 190906c3fb27SDimitry Andric // loop: 191006c3fb27SDimitry Andric // %2:vreg = PHI %1:vreg, %entry, %5:vreg, %loop 191106c3fb27SDimitry Andric // %3:areg = COPY %2:vreg 191206c3fb27SDimitry Andric // %4:areg = (instr using %3:areg) 191306c3fb27SDimitry Andric // %5:vreg = COPY %4:areg 191406c3fb27SDimitry Andric // => 191506c3fb27SDimitry Andric // entry: 191606c3fb27SDimitry Andric // %1:vreg = GLOBAL_LOAD(..) 191706c3fb27SDimitry Andric // %2:areg = COPY %1:vreg 191806c3fb27SDimitry Andric // loop: 191906c3fb27SDimitry Andric // %3:areg = PHI %2:areg, %entry, %X:areg, 192006c3fb27SDimitry Andric // %4:areg = (instr using %3:areg) 192106c3fb27SDimitry Andric bool SIFoldOperands::tryFoldPhiAGPR(MachineInstr &PHI) { 1922fe6060f1SDimitry Andric assert(PHI.isPHI()); 1923fe6060f1SDimitry Andric 1924fe6060f1SDimitry Andric Register PhiOut = PHI.getOperand(0).getReg(); 192506c3fb27SDimitry Andric if (!TRI->isVGPR(*MRI, PhiOut)) 1926fe6060f1SDimitry Andric return false; 1927fe6060f1SDimitry Andric 192806c3fb27SDimitry Andric // Iterate once over all incoming values of the PHI to check if this PHI is 192906c3fb27SDimitry Andric // eligible, and determine the exact AGPR RC we'll target. 193006c3fb27SDimitry Andric const TargetRegisterClass *ARC = nullptr; 193106c3fb27SDimitry Andric for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) { 193206c3fb27SDimitry Andric MachineOperand &MO = PHI.getOperand(K); 193306c3fb27SDimitry Andric MachineInstr *Copy = MRI->getVRegDef(MO.getReg()); 1934fe6060f1SDimitry Andric if (!Copy || !Copy->isCopy()) 193506c3fb27SDimitry Andric continue; 193606c3fb27SDimitry Andric 193706c3fb27SDimitry Andric Register AGPRSrc; 193806c3fb27SDimitry Andric unsigned AGPRRegMask = AMDGPU::NoSubRegister; 193906c3fb27SDimitry Andric if (!isAGPRCopy(*TRI, *MRI, *Copy, AGPRSrc, AGPRRegMask)) 194006c3fb27SDimitry Andric continue; 194106c3fb27SDimitry Andric 194206c3fb27SDimitry Andric const TargetRegisterClass *CopyInRC = MRI->getRegClass(AGPRSrc); 194306c3fb27SDimitry Andric if (const auto *SubRC = TRI->getSubRegisterClass(CopyInRC, AGPRRegMask)) 194406c3fb27SDimitry Andric CopyInRC = SubRC; 194506c3fb27SDimitry Andric 194606c3fb27SDimitry Andric if (ARC && !ARC->hasSubClassEq(CopyInRC)) 194706c3fb27SDimitry Andric return false; 194806c3fb27SDimitry Andric ARC = CopyInRC; 194906c3fb27SDimitry Andric } 195006c3fb27SDimitry Andric 195106c3fb27SDimitry Andric if (!ARC) 1952fe6060f1SDimitry Andric return false; 1953fe6060f1SDimitry Andric 195406c3fb27SDimitry Andric bool IsAGPR32 = (ARC == &AMDGPU::AGPR_32RegClass); 1955fe6060f1SDimitry Andric 195606c3fb27SDimitry Andric // Rewrite the PHI's incoming values to ARC. 195706c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << "Folding AGPR copies into: " << PHI); 195806c3fb27SDimitry Andric for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) { 195906c3fb27SDimitry Andric MachineOperand &MO = PHI.getOperand(K); 196006c3fb27SDimitry Andric Register Reg = MO.getReg(); 196106c3fb27SDimitry Andric 196206c3fb27SDimitry Andric MachineBasicBlock::iterator InsertPt; 196306c3fb27SDimitry Andric MachineBasicBlock *InsertMBB = nullptr; 196406c3fb27SDimitry Andric 196506c3fb27SDimitry Andric // Look at the def of Reg, ignoring all copies. 196606c3fb27SDimitry Andric unsigned CopyOpc = AMDGPU::COPY; 196706c3fb27SDimitry Andric if (MachineInstr *Def = MRI->getVRegDef(Reg)) { 196806c3fb27SDimitry Andric 196906c3fb27SDimitry Andric // Look at pre-existing COPY instructions from ARC: Steal the operand. If 197006c3fb27SDimitry Andric // the copy was single-use, it will be removed by DCE later. 197106c3fb27SDimitry Andric if (Def->isCopy()) { 197206c3fb27SDimitry Andric Register AGPRSrc; 197306c3fb27SDimitry Andric unsigned AGPRSubReg = AMDGPU::NoSubRegister; 197406c3fb27SDimitry Andric if (isAGPRCopy(*TRI, *MRI, *Def, AGPRSrc, AGPRSubReg)) { 197506c3fb27SDimitry Andric MO.setReg(AGPRSrc); 197606c3fb27SDimitry Andric MO.setSubReg(AGPRSubReg); 197706c3fb27SDimitry Andric continue; 197806c3fb27SDimitry Andric } 197906c3fb27SDimitry Andric 198006c3fb27SDimitry Andric // If this is a multi-use SGPR -> VGPR copy, use V_ACCVGPR_WRITE on 198106c3fb27SDimitry Andric // GFX908 directly instead of a COPY. Otherwise, SIFoldOperand may try 198206c3fb27SDimitry Andric // to fold the sgpr -> vgpr -> agpr copy into a sgpr -> agpr copy which 198306c3fb27SDimitry Andric // is unlikely to be profitable. 198406c3fb27SDimitry Andric // 198506c3fb27SDimitry Andric // Note that V_ACCVGPR_WRITE is only used for AGPR_32. 198606c3fb27SDimitry Andric MachineOperand &CopyIn = Def->getOperand(1); 198706c3fb27SDimitry Andric if (IsAGPR32 && !ST->hasGFX90AInsts() && !MRI->hasOneNonDBGUse(Reg) && 198806c3fb27SDimitry Andric TRI->isSGPRReg(*MRI, CopyIn.getReg())) 198906c3fb27SDimitry Andric CopyOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 199006c3fb27SDimitry Andric } 199106c3fb27SDimitry Andric 199206c3fb27SDimitry Andric InsertMBB = Def->getParent(); 199306c3fb27SDimitry Andric InsertPt = InsertMBB->SkipPHIsLabelsAndDebug(++Def->getIterator()); 199406c3fb27SDimitry Andric } else { 199506c3fb27SDimitry Andric InsertMBB = PHI.getOperand(MO.getOperandNo() + 1).getMBB(); 199606c3fb27SDimitry Andric InsertPt = InsertMBB->getFirstTerminator(); 199706c3fb27SDimitry Andric } 199806c3fb27SDimitry Andric 1999fe6060f1SDimitry Andric Register NewReg = MRI->createVirtualRegister(ARC); 200006c3fb27SDimitry Andric MachineInstr *MI = BuildMI(*InsertMBB, InsertPt, PHI.getDebugLoc(), 200106c3fb27SDimitry Andric TII->get(CopyOpc), NewReg) 200206c3fb27SDimitry Andric .addReg(Reg); 200306c3fb27SDimitry Andric MO.setReg(NewReg); 200406c3fb27SDimitry Andric 200506c3fb27SDimitry Andric (void)MI; 200606c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << " Created COPY: " << *MI); 200706c3fb27SDimitry Andric } 200806c3fb27SDimitry Andric 200906c3fb27SDimitry Andric // Replace the PHI's result with a new register. 201006c3fb27SDimitry Andric Register NewReg = MRI->createVirtualRegister(ARC); 2011fe6060f1SDimitry Andric PHI.getOperand(0).setReg(NewReg); 2012fe6060f1SDimitry Andric 201306c3fb27SDimitry Andric // COPY that new register back to the original PhiOut register. This COPY will 201406c3fb27SDimitry Andric // usually be folded out later. 2015fe6060f1SDimitry Andric MachineBasicBlock *MBB = PHI.getParent(); 201606c3fb27SDimitry Andric BuildMI(*MBB, MBB->getFirstNonPHI(), PHI.getDebugLoc(), 2017fe6060f1SDimitry Andric TII->get(AMDGPU::COPY), PhiOut) 201806c3fb27SDimitry Andric .addReg(NewReg); 2019fe6060f1SDimitry Andric 202006c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << " Done: Folded " << PHI); 2021fe6060f1SDimitry Andric return true; 2022fe6060f1SDimitry Andric } 2023fe6060f1SDimitry Andric 2024fe6060f1SDimitry Andric // Attempt to convert VGPR load to an AGPR load. 2025fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) { 2026fe6060f1SDimitry Andric assert(MI.mayLoad()); 2027fe6060f1SDimitry Andric if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1) 2028fe6060f1SDimitry Andric return false; 2029fe6060f1SDimitry Andric 2030fe6060f1SDimitry Andric MachineOperand &Def = MI.getOperand(0); 2031fe6060f1SDimitry Andric if (!Def.isDef()) 2032fe6060f1SDimitry Andric return false; 2033fe6060f1SDimitry Andric 2034fe6060f1SDimitry Andric Register DefReg = Def.getReg(); 2035fe6060f1SDimitry Andric 2036fe6060f1SDimitry Andric if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg)) 2037fe6060f1SDimitry Andric return false; 2038fe6060f1SDimitry Andric 2039fe6060f1SDimitry Andric SmallVector<const MachineInstr*, 8> Users; 2040fe6060f1SDimitry Andric SmallVector<Register, 8> MoveRegs; 2041bdd1243dSDimitry Andric for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg)) 2042fe6060f1SDimitry Andric Users.push_back(&I); 2043bdd1243dSDimitry Andric 2044fe6060f1SDimitry Andric if (Users.empty()) 2045fe6060f1SDimitry Andric return false; 2046fe6060f1SDimitry Andric 2047fe6060f1SDimitry Andric // Check that all uses a copy to an agpr or a reg_sequence producing an agpr. 2048fe6060f1SDimitry Andric while (!Users.empty()) { 2049fe6060f1SDimitry Andric const MachineInstr *I = Users.pop_back_val(); 2050fe6060f1SDimitry Andric if (!I->isCopy() && !I->isRegSequence()) 2051fe6060f1SDimitry Andric return false; 2052fe6060f1SDimitry Andric Register DstReg = I->getOperand(0).getReg(); 2053bdd1243dSDimitry Andric // Physical registers may have more than one instruction definitions 2054bdd1243dSDimitry Andric if (DstReg.isPhysical()) 2055bdd1243dSDimitry Andric return false; 2056fe6060f1SDimitry Andric if (TRI->isAGPR(*MRI, DstReg)) 2057fe6060f1SDimitry Andric continue; 2058fe6060f1SDimitry Andric MoveRegs.push_back(DstReg); 2059bdd1243dSDimitry Andric for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg)) 2060fe6060f1SDimitry Andric Users.push_back(&U); 2061fe6060f1SDimitry Andric } 2062fe6060f1SDimitry Andric 2063fe6060f1SDimitry Andric const TargetRegisterClass *RC = MRI->getRegClass(DefReg); 2064fe6060f1SDimitry Andric MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC)); 2065fe6060f1SDimitry Andric if (!TII->isOperandLegal(MI, 0, &Def)) { 2066fe6060f1SDimitry Andric MRI->setRegClass(DefReg, RC); 2067fe6060f1SDimitry Andric return false; 2068fe6060f1SDimitry Andric } 2069fe6060f1SDimitry Andric 2070fe6060f1SDimitry Andric while (!MoveRegs.empty()) { 2071fe6060f1SDimitry Andric Register Reg = MoveRegs.pop_back_val(); 2072fe6060f1SDimitry Andric MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg))); 2073fe6060f1SDimitry Andric } 2074fe6060f1SDimitry Andric 2075fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Folded " << MI); 2076fe6060f1SDimitry Andric 2077fe6060f1SDimitry Andric return true; 2078fe6060f1SDimitry Andric } 2079fe6060f1SDimitry Andric 208006c3fb27SDimitry Andric // tryFoldPhiAGPR will aggressively try to create AGPR PHIs. 208106c3fb27SDimitry Andric // For GFX90A and later, this is pretty much always a good thing, but for GFX908 208206c3fb27SDimitry Andric // there's cases where it can create a lot more AGPR-AGPR copies, which are 208306c3fb27SDimitry Andric // expensive on this architecture due to the lack of V_ACCVGPR_MOV. 208406c3fb27SDimitry Andric // 208506c3fb27SDimitry Andric // This function looks at all AGPR PHIs in a basic block and collects their 208606c3fb27SDimitry Andric // operands. Then, it checks for register that are used more than once across 208706c3fb27SDimitry Andric // all PHIs and caches them in a VGPR. This prevents ExpandPostRAPseudo from 208806c3fb27SDimitry Andric // having to create one VGPR temporary per use, which can get very messy if 208906c3fb27SDimitry Andric // these PHIs come from a broken-up large PHI (e.g. 32 AGPR phis, one per vector 209006c3fb27SDimitry Andric // element). 209106c3fb27SDimitry Andric // 209206c3fb27SDimitry Andric // Example 209306c3fb27SDimitry Andric // a: 209406c3fb27SDimitry Andric // %in:agpr_256 = COPY %foo:vgpr_256 209506c3fb27SDimitry Andric // c: 209606c3fb27SDimitry Andric // %x:agpr_32 = .. 209706c3fb27SDimitry Andric // b: 209806c3fb27SDimitry Andric // %0:areg = PHI %in.sub0:agpr_32, %a, %x, %c 209906c3fb27SDimitry Andric // %1:areg = PHI %in.sub0:agpr_32, %a, %y, %c 210006c3fb27SDimitry Andric // %2:areg = PHI %in.sub0:agpr_32, %a, %z, %c 210106c3fb27SDimitry Andric // => 210206c3fb27SDimitry Andric // a: 210306c3fb27SDimitry Andric // %in:agpr_256 = COPY %foo:vgpr_256 210406c3fb27SDimitry Andric // %tmp:vgpr_32 = V_ACCVGPR_READ_B32_e64 %in.sub0:agpr_32 210506c3fb27SDimitry Andric // %tmp_agpr:agpr_32 = COPY %tmp 210606c3fb27SDimitry Andric // c: 210706c3fb27SDimitry Andric // %x:agpr_32 = .. 210806c3fb27SDimitry Andric // b: 210906c3fb27SDimitry Andric // %0:areg = PHI %tmp_agpr, %a, %x, %c 211006c3fb27SDimitry Andric // %1:areg = PHI %tmp_agpr, %a, %y, %c 211106c3fb27SDimitry Andric // %2:areg = PHI %tmp_agpr, %a, %z, %c 211206c3fb27SDimitry Andric bool SIFoldOperands::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) { 211306c3fb27SDimitry Andric // This is only really needed on GFX908 where AGPR-AGPR copies are 211406c3fb27SDimitry Andric // unreasonably difficult. 211506c3fb27SDimitry Andric if (ST->hasGFX90AInsts()) 211606c3fb27SDimitry Andric return false; 211706c3fb27SDimitry Andric 211806c3fb27SDimitry Andric // Look at all AGPR Phis and collect the register + subregister used. 211906c3fb27SDimitry Andric DenseMap<std::pair<Register, unsigned>, std::vector<MachineOperand *>> 212006c3fb27SDimitry Andric RegToMO; 212106c3fb27SDimitry Andric 212206c3fb27SDimitry Andric for (auto &MI : MBB) { 212306c3fb27SDimitry Andric if (!MI.isPHI()) 212406c3fb27SDimitry Andric break; 212506c3fb27SDimitry Andric 212606c3fb27SDimitry Andric if (!TRI->isAGPR(*MRI, MI.getOperand(0).getReg())) 212706c3fb27SDimitry Andric continue; 212806c3fb27SDimitry Andric 212906c3fb27SDimitry Andric for (unsigned K = 1; K < MI.getNumOperands(); K += 2) { 213006c3fb27SDimitry Andric MachineOperand &PhiMO = MI.getOperand(K); 21310fca6ea1SDimitry Andric if (!PhiMO.getSubReg()) 21320fca6ea1SDimitry Andric continue; 213306c3fb27SDimitry Andric RegToMO[{PhiMO.getReg(), PhiMO.getSubReg()}].push_back(&PhiMO); 213406c3fb27SDimitry Andric } 213506c3fb27SDimitry Andric } 213606c3fb27SDimitry Andric 213706c3fb27SDimitry Andric // For all (Reg, SubReg) pair that are used more than once, cache the value in 213806c3fb27SDimitry Andric // a VGPR. 213906c3fb27SDimitry Andric bool Changed = false; 214006c3fb27SDimitry Andric for (const auto &[Entry, MOs] : RegToMO) { 214106c3fb27SDimitry Andric if (MOs.size() == 1) 214206c3fb27SDimitry Andric continue; 214306c3fb27SDimitry Andric 214406c3fb27SDimitry Andric const auto [Reg, SubReg] = Entry; 214506c3fb27SDimitry Andric MachineInstr *Def = MRI->getVRegDef(Reg); 214606c3fb27SDimitry Andric MachineBasicBlock *DefMBB = Def->getParent(); 214706c3fb27SDimitry Andric 214806c3fb27SDimitry Andric // Create a copy in a VGPR using V_ACCVGPR_READ_B32_e64 so it's not folded 214906c3fb27SDimitry Andric // out. 215006c3fb27SDimitry Andric const TargetRegisterClass *ARC = getRegOpRC(*MRI, *TRI, *MOs.front()); 215106c3fb27SDimitry Andric Register TempVGPR = 215206c3fb27SDimitry Andric MRI->createVirtualRegister(TRI->getEquivalentVGPRClass(ARC)); 215306c3fb27SDimitry Andric MachineInstr *VGPRCopy = 215406c3fb27SDimitry Andric BuildMI(*DefMBB, ++Def->getIterator(), Def->getDebugLoc(), 215506c3fb27SDimitry Andric TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TempVGPR) 215606c3fb27SDimitry Andric .addReg(Reg, /* flags */ 0, SubReg); 215706c3fb27SDimitry Andric 215806c3fb27SDimitry Andric // Copy back to an AGPR and use that instead of the AGPR subreg in all MOs. 215906c3fb27SDimitry Andric Register TempAGPR = MRI->createVirtualRegister(ARC); 216006c3fb27SDimitry Andric BuildMI(*DefMBB, ++VGPRCopy->getIterator(), Def->getDebugLoc(), 216106c3fb27SDimitry Andric TII->get(AMDGPU::COPY), TempAGPR) 216206c3fb27SDimitry Andric .addReg(TempVGPR); 216306c3fb27SDimitry Andric 216406c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << "Caching AGPR into VGPR: " << *VGPRCopy); 216506c3fb27SDimitry Andric for (MachineOperand *MO : MOs) { 216606c3fb27SDimitry Andric MO->setReg(TempAGPR); 216706c3fb27SDimitry Andric MO->setSubReg(AMDGPU::NoSubRegister); 216806c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << " Changed PHI Operand: " << *MO << "\n"); 216906c3fb27SDimitry Andric } 217006c3fb27SDimitry Andric 217106c3fb27SDimitry Andric Changed = true; 217206c3fb27SDimitry Andric } 217306c3fb27SDimitry Andric 217406c3fb27SDimitry Andric return Changed; 217506c3fb27SDimitry Andric } 217606c3fb27SDimitry Andric 21770b57cec5SDimitry Andric bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 21780b57cec5SDimitry Andric if (skipFunction(MF.getFunction())) 21790b57cec5SDimitry Andric return false; 21800b57cec5SDimitry Andric 21810b57cec5SDimitry Andric MRI = &MF.getRegInfo(); 21820b57cec5SDimitry Andric ST = &MF.getSubtarget<GCNSubtarget>(); 21830b57cec5SDimitry Andric TII = ST->getInstrInfo(); 21840b57cec5SDimitry Andric TRI = &TII->getRegisterInfo(); 21850b57cec5SDimitry Andric MFI = MF.getInfo<SIMachineFunctionInfo>(); 21860b57cec5SDimitry Andric 21870b57cec5SDimitry Andric // omod is ignored by hardware if IEEE bit is enabled. omod also does not 21880b57cec5SDimitry Andric // correctly handle signed zeros. 21890b57cec5SDimitry Andric // 21900b57cec5SDimitry Andric // FIXME: Also need to check strictfp 21910b57cec5SDimitry Andric bool IsIEEEMode = MFI->getMode().IEEE; 21920b57cec5SDimitry Andric bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 21930b57cec5SDimitry Andric 219481ad6265SDimitry Andric bool Changed = false; 21950b57cec5SDimitry Andric for (MachineBasicBlock *MBB : depth_first(&MF)) { 21968bcb0991SDimitry Andric MachineOperand *CurrentKnownM0Val = nullptr; 2197fe6060f1SDimitry Andric for (auto &MI : make_early_inc_range(*MBB)) { 219881ad6265SDimitry Andric Changed |= tryFoldCndMask(MI); 21990b57cec5SDimitry Andric 220081ad6265SDimitry Andric if (tryFoldZeroHighBits(MI)) { 220181ad6265SDimitry Andric Changed = true; 2202fe6060f1SDimitry Andric continue; 220381ad6265SDimitry Andric } 2204fe6060f1SDimitry Andric 220581ad6265SDimitry Andric if (MI.isRegSequence() && tryFoldRegSequence(MI)) { 220681ad6265SDimitry Andric Changed = true; 2207fe6060f1SDimitry Andric continue; 220881ad6265SDimitry Andric } 2209fe6060f1SDimitry Andric 221006c3fb27SDimitry Andric if (MI.isPHI() && tryFoldPhiAGPR(MI)) { 221181ad6265SDimitry Andric Changed = true; 2212fe6060f1SDimitry Andric continue; 221381ad6265SDimitry Andric } 2214fe6060f1SDimitry Andric 221581ad6265SDimitry Andric if (MI.mayLoad() && tryFoldLoad(MI)) { 221681ad6265SDimitry Andric Changed = true; 2217fe6060f1SDimitry Andric continue; 221881ad6265SDimitry Andric } 22190b57cec5SDimitry Andric 2220bdd1243dSDimitry Andric if (TII->isFoldableCopy(MI)) { 2221bdd1243dSDimitry Andric Changed |= tryFoldFoldableCopy(MI, CurrentKnownM0Val); 2222bdd1243dSDimitry Andric continue; 2223bdd1243dSDimitry Andric } 2224bdd1243dSDimitry Andric 2225480093f4SDimitry Andric // Saw an unknown clobber of m0, so we no longer know what it is. 2226480093f4SDimitry Andric if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI)) 2227480093f4SDimitry Andric CurrentKnownM0Val = nullptr; 2228480093f4SDimitry Andric 22290b57cec5SDimitry Andric // TODO: Omod might be OK if there is NSZ only on the source 22300b57cec5SDimitry Andric // instruction, and not the omod multiply. 22310b57cec5SDimitry Andric if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 22320b57cec5SDimitry Andric !tryFoldOMod(MI)) 223381ad6265SDimitry Andric Changed |= tryFoldClamp(MI); 22340b57cec5SDimitry Andric } 223506c3fb27SDimitry Andric 223606c3fb27SDimitry Andric Changed |= tryOptimizeAGPRPhis(*MBB); 22370b57cec5SDimitry Andric } 2238bdd1243dSDimitry Andric 223981ad6265SDimitry Andric return Changed; 22400b57cec5SDimitry Andric } 2241