/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUPostLegalizerCombiner.cpp | 384 MachineInstr *LoadMI = MRI.getVRegDef(LoadReg); in matchCombineSignExtendInReg() local
|
/llvm-project/llvm/lib/CodeGen/ |
H A D | InlineSpiller.cpp | 888 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *,unsigned>> Ops,MachineInstr * LoadMI) foldMemoryOperand() argument
|
H A D | TargetInstrInfo.cpp | 731 MachineInstr &LoadMI, in foldMemoryOperand() argument
|
/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCMIPeephole.cpp | 668 MachineInstr *LoadMI = MRI->getVRegDef(FeedReg1); simplifyCode() local
|
/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64LoadStoreOptimizer.cpp | 1298 MachineInstr &LoadMI = *I; findMatchingStore() local
|
H A D | AArch64FastISel.cpp | 4527 const auto *LoadMI = MI; optimizeIntExtLoad() local
|
H A D | AArch64InstrInfo.cpp | 5641 MachineInstr &LoadMI = *--InsertPt; foldMemoryOperandImpl() local
|
/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 593 ChoosePreferredUse(MachineInstr & LoadMI,PreferredTuple & CurrentUse,const LLT TyForCandidate,unsigned OpcodeForCandidate,MachineInstr * MIForCandidate) ChoosePreferredUse() argument 712 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); matchCombineExtendingLoads() local 915 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); matchCombineLoadWithAndMask() local 1011 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) { matchSextTruncSextLoad() local 1324 auto *LoadMI = getOpcodeDef<GLoad>(MI.getOperand(1).getReg(), MRI); matchCombineExtractedVectorLoad() local [all...] |
H A D | LegalizerHelper.cpp | 1341 auto &LoadMI = cast<GLoad>(MI); narrowScalar() local 1359 auto &LoadMI = cast<GExtLoad>(MI); narrowScalar() local 3400 lowerLoad(GAnyLoad & LoadMI) lowerLoad() argument [all...] |
/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZInstrInfo.cpp | 1522 foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const foldMemoryOperandImpl() argument
|
/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64InstructionSelector.cpp | 2656 auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB); select() local 3288 auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI); select() local 4120 MachineInstr *LoadMI = nullptr; emitLoadFromConstantPool() local [all...] |
/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 7564 isNonFoldablePartialRegisterLoad(const MachineInstr & LoadMI,const MachineInstr & UserMI,const MachineFunction & MF) isNonFoldablePartialRegisterLoad() argument 7988 foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const foldMemoryOperandImpl() argument [all...] |
H A D | X86FastISel.cpp | 791 MachineInstrBuilder LoadMI = handleConstantAddresses() local
|