/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | TargetInstrInfo.h | 1446 areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2) areLoadsFromSameBasePtr() argument 1459 shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) shouldScheduleLoadsNear() argument 1564 shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,int64_t Offset1,bool OffsetIsScalable1,ArrayRef<const MachineOperand * > BaseOps2,int64_t Offset2,bool OffsetIsScalable2,unsigned ClusterSize,unsigned NumBytes) shouldClusterMemOps() argument
|
/llvm-project/llvm/lib/Target/Mips/ |
H A D | MicroMipsSizeReduction.cpp | 400 int64_t Offset1, Offset2; in ConsecutiveInstr() local
|
/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | SeparateConstOffsetFromGEP.cpp | 1369 Value *Offset2 = Second->getOperand(1); in swapGEPOperand() local
|
H A D | ConstraintElimination.cpp | 667 int64_t Offset2 = BDec.Offset; getConstraint() local
|
/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | ScheduleDAGSDNodes.cpp | 247 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local
|
/llvm-project/llvm/lib/IR/ |
H A D | Value.cpp | 1033 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0); in getPointerOffsetFrom() local
|
/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/ |
H A D | AggressiveInstCombine.cpp | 677 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0); foldLoadsRecursive() local
|
/llvm-project/clang/lib/StaticAnalyzer/Checkers/ |
H A D | ContainerModeling.cpp | 970 SymbolRef Offset2, in invalidateIteratorPositions()
|
/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZInstrInfo.cpp | 1898 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); getOpcodeForOffset() local
|
/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64InstrInfo.cpp | 4277 shouldClusterFI(const MachineFrameInfo & MFI,int FI1,int64_t Offset1,unsigned Opcode1,int FI2,int64_t Offset2,unsigned Opcode2) shouldClusterFI() argument 4348 int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); shouldClusterMemOps() local
|
H A D | AArch64ISelLowering.cpp | 22396 TypeSize Offset2 = TypeSize::getFixed(2); combineV3I8LoadExt() local 22680 TypeSize Offset2 = TypeSize::getFixed(2); combineI8TruncStore() local [all...] |
/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVInstrInfo.cpp | 2681 shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,int64_t Offset1,bool OffsetIsScalable1,ArrayRef<const MachineOperand * > BaseOps2,int64_t Offset2,bool OffsetIsScalable2,unsigned ClusterSize,unsigned NumBytes) const shouldClusterMemOps() argument
|
H A D | RISCVISelLowering.cpp | 19482 unsigned Offset2 = State.AllocateStack(2, Align(2)); CC_RISCV_FastCC() local
|
/llvm-project/llvm/lib/CodeGen/ |
H A D | MachinePipeliner.cpp | 863 int64_t Offset1, Offset2; addLoopCarriedDependences() local
|
H A D | CodeGenPrepare.cpp | 2409 uint64_t Offset2 = Offset.getLimitedValue(); optimizeCallInst() local
|
/llvm-project/clang/tools/c-index-test/ |
H A D | c-index-test.c | 1820 long long Offset2 = clang_Cursor_getOffsetOfField(cursor); in PrintTypeSize() local
|
/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCInstrInfo.cpp | 2930 int64_t Offset1 = 0, Offset2 = 0; shouldClusterMemOps() local
|
H A D | PPCISelLowering.cpp | 13839 int64_t Offset1 = 0, Offset2 = 0; isConsecutiveLSLoc() local
|
/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineAndOrXor.cpp | 1234 const APInt *Offset1 = nullptr, *Offset2 = nullptr; foldAndOrOfICmpsUsingRanges() local
|
/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMBaseInstrInfo.cpp | 2016 shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const shouldScheduleLoadsNear() argument
|
/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 8778 shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const shouldScheduleLoadsNear() argument
|
/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIInstrInfo.cpp | 553 shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,int64_t Offset1,bool OffsetIsScalable1,ArrayRef<const MachineOperand * > BaseOps2,int64_t Offset2,bool OffsetIsScalable2,unsigned ClusterSize,unsigned NumBytes) const shouldClusterMemOps() argument
|
/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 6983 std::optional<APInt> Offset2; tryFoldAndOrOrICmpsUsingRanges() local
|
/llvm-project/llvm/lib/Transforms/Vectorize/ |
H A D | SLPVectorizer.cpp | 5950 int Offset2 = Pair2.second; canFormVector() local
|