Lines Matching defs:UseMI
25 MachineInstr *UseMI;
39 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
78 bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
95 bool tryToFoldACImm(const MachineOperand &OpToFold, MachineInstr *UseMI,
99 MachineInstr *UseMI,
180 bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
185 const unsigned Opc = UseMI.getOpcode();
186 if (TII->isMUBUF(UseMI))
188 if (!TII->isFLATScratch(UseMI))
204 MachineInstr *MI = Fold.UseMI;
234 MachineInstr *MI = Fold.UseMI;
358 MachineInstr *MI = Fold.UseMI;
451 return any_of(FoldList, [&](const auto &C) { return C.UseMI == MI; });
460 if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
690 const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
692 const MCInstrDesc &Desc = UseMI->getDesc();
701 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
702 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
713 if (isUseMIInFoldList(FoldList, UseMI))
718 MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
722 TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
723 UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
742 !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
751 appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
757 MachineInstr *UseMI,
761 const MachineOperand *UseOp = &UseMI->getOperand(UseOpIdx);
763 if (!isUseSafeToFold(*UseMI, *UseOp))
774 if (UseMI->isRegSequence()) {
775 Register RegSeqDstReg = UseMI->getOperand(0).getReg();
776 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
785 if (tryToFoldACImm(UseMI->getOperand(0), RSUseMI,
798 if (tryToFoldACImm(OpToFold, UseMI, UseOpIdx, FoldList))
801 if (frameIndexMayFold(*UseMI, UseOpIdx, OpToFold)) {
805 if (TII->isMUBUF(*UseMI)) {
806 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
813 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
820 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
822 const unsigned Opc = UseMI->getOpcode();
823 if (TII->isFLATScratch(*UseMI) &&
827 UseMI->setDesc(TII->get(NewOpc));
836 if (FoldingImmLike && UseMI->isCopy()) {
837 Register DestReg = UseMI->getOperand(0).getReg();
838 Register SrcReg = UseMI->getOperand(1).getReg();
853 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
854 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
855 CopiesToReplace.push_back(UseMI);
867 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
868 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
872 UseMI->removeOperand(UseMI->getOperandNo(Tmp));
874 UseMI->setDesc(TII->get(MovOp));
877 const auto &SrcOp = UseMI->getOperand(UseOpIdx);
879 MachineFunction *MF = UseMI->getParent()->getParent();
880 UseMI->removeOperand(1);
881 UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // src0_modifiers
882 UseMI->addOperand(NewSrcOp); // src0
883 UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // op_sel
885 UseOp = &UseMI->getOperand(UseOpIdx);
887 CopiesToReplace.push_back(UseMI);
889 if (UseMI->isCopy() && OpToFold.isReg() &&
890 UseMI->getOperand(0).getReg().isVirtual() &&
891 !UseMI->getOperand(1).getSubReg()) {
892 LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
893 unsigned Size = TII->getOpSize(*UseMI, 1);
895 UseMI->getOperand(1).setReg(UseReg);
896 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
897 UseMI->getOperand(1).setIsKill(false);
898 CopiesToReplace.push_back(UseMI);
909 if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
911 const DebugLoc &DL = UseMI->getDebugLoc();
912 MachineBasicBlock &MBB = *UseMI->getParent();
914 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
915 for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
916 UseMI->removeOperand(I);
918 MachineInstrBuilder B(*MBB.getParent(), UseMI);
929 BuildMI(MBB, UseMI, DL,
956 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
967 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
971 BuildMI(MBB, UseMI, DL,
978 LLVM_DEBUG(dbgs() << "Folded " << *UseMI);
985 Register Reg0 = UseMI->getOperand(0).getReg();
986 Register Reg1 = UseMI->getOperand(1).getReg();
988 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
990 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
993 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
997 unsigned UseOpc = UseMI->getOpcode();
1008 UseMI->getOperand(UseOpIdx).getReg(),
1010 *UseMI))
1013 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
1016 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
1018 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
1019 UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
1025 UseMI->getOperand(UseOpIdx).getReg(),
1027 *UseMI))
1034 UseMI->setDesc(TII->get(AMDGPU::COPY));
1035 UseMI->getOperand(1).setReg(OpToFold.getReg());
1036 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
1037 UseMI->getOperand(1).setIsKill(false);
1038 UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
1043 const MCInstrDesc &UseDesc = UseMI->getDesc();
1069 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
1098 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp);
1102 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
1382 for (auto &UseMI :
1392 if (tryConstantFoldOp(&UseMI)) {
1393 LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
1403 MachineInstr *UseMI = U->getParent();
1404 foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U), FoldList,
1422 execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1436 << *Fold.UseMI);
1439 TII->commuteInstruction(*Fold.UseMI, false);
1800 MachineInstr *UseMI = Op->getParent();
1801 while (UseMI->isCopy() && !Op->getSubReg()) {
1802 Reg = UseMI->getOperand(0).getReg();
1806 UseMI = Op->getParent();
1812 unsigned OpIdx = Op - &UseMI->getOperand(0);
1813 const MCInstrDesc &InstDesc = UseMI->getDesc();
1837 if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
1843 LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);