| /netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/SelectionDAG/ |
| H A D | ScheduleDAGSDNodes.cpp | 243 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 244 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 245 Offset1 == Offset2 || in ClusterNeighboringLoads() 253 O2SMap.insert(std::make_pair(Offset2, User)); in ClusterNeighboringLoads() 254 Offsets.push_back(Offset2); in ClusterNeighboringLoads() 255 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/Scalar/ |
| H A D | ConstraintElimination.cpp | 132 int64_t Offset2 = 0; in getConstraint() local 180 Offset2 = BDec[0].first; in getConstraint() 201 R[0] = Offset1 + Offset2 + (Pred == CmpInst::ICMP_ULT ? -1 : 0); in getConstraint()
|
| H A D | SeparateConstOffsetFromGEP.cpp | 1352 Value *Offset2 = Second->getOperand(1); in swapGEPOperand() local 1353 First->setOperand(1, Offset2); in swapGEPOperand()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/ |
| H A D | RISCVISelDAGToDAG.cpp | 1490 uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx); in doPeepholeLoadStoreADDI() local 1494 int64_t CombinedOffset = Offset1 + Offset2; in doPeepholeLoadStoreADDI() 1506 if (Offset2 != 0 && Alignment <= Offset2) in doPeepholeLoadStoreADDI() 1509 int64_t CombinedOffset = Offset1 + Offset2; in doPeepholeLoadStoreADDI() 1516 if (Offset2 != 0 && Alignment <= Offset2) in doPeepholeLoadStoreADDI() 1519 int64_t CombinedOffset = Offset1 + Offset2; in doPeepholeLoadStoreADDI()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/Mips/ |
| H A D | MicroMipsSizeReduction.cpp | 400 int64_t Offset1, Offset2; in ConsecutiveInstr() local 403 if (!GetImm(MI2, 2, Offset2)) in ConsecutiveInstr() 409 return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); in ConsecutiveInstr()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/ |
| H A D | X86InstrInfo.h | 421 int64_t &Offset2) const override; 439 int64_t Offset2,
|
| H A D | X86InstrInfo.cpp | 6549 int64_t &Offset1, int64_t &Offset2) const { in areLoadsFromSameBasePtr() 6742 Offset2 = Disp2->getSExtValue(); in areLoadsFromSameBasePtr() 6747 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 6749 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 6750 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/tools/llvm-profgen/ |
| H A D | ProfiledBinary.cpp | 121 uint64_t Offset2 = virtualAddrToOffset(Address2); in inlineContextEqual() local 123 const FrameLocationStack &Context2 = getFrameLocationStack(Offset2); in inlineContextEqual()
|
| /netbsd-src/external/apache2/llvm/dist/clang/lib/StaticAnalyzer/Checkers/ |
| H A D | ContainerModeling.cpp | 141 SymbolRef Offset2, 975 SymbolRef Offset2, in invalidateIteratorPositions() argument 979 compare(State, Pos.getOffset(), Offset2, Opc2); in invalidateIteratorPositions()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/ARM/ |
| H A D | ARMBaseInstrInfo.h | 248 int64_t &Offset2) const override; 259 int64_t Offset1, int64_t Offset2,
|
| H A D | ARMBaseInstrInfo.cpp | 1926 int64_t &Offset2) const { in areLoadsFromSameBasePtr() 1987 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr() 2006 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 2011 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 2013 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/ |
| H A D | TargetInstrInfo.h | 1298 int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 1311 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/SystemZ/ |
| H A D | SystemZInstrInfo.cpp | 1620 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); in getOpcodeForOffset() local 1621 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { in getOpcodeForOffset() 1631 if (isInt<20>(Offset) && isInt<20>(Offset2)) { in getOpcodeForOffset()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/ |
| H A D | AArch64InstrInfo.cpp | 3060 int64_t Offset2, unsigned Opcode2) { in shouldClusterFI() argument 3077 ObjectOffset2 += Offset2; in shouldClusterFI() 3130 int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local 3131 if (hasUnscaledLdStOffset(SecondOpc) && !scaleOffset(SecondOpc, Offset2)) in shouldClusterMemOps() 3141 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && in shouldClusterMemOps() 3147 BaseOp2.getIndex(), Offset2, SecondOpc); in shouldClusterMemOps() 3150 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 3152 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
|
| H A D | AArch64InstrFormats.td | 9999 int Offset1, int Offset2, int Offset4, int Offset8> { 10025 !cast<DAGOperand>("GPR64pi" # Offset2)>; 10028 !cast<DAGOperand>("GPR64pi" # Offset2)>; 10044 defm : SIMDLdrAliases<NAME, asm, "4h", Count, Offset2, 64>; 10045 defm : SIMDLdrAliases<NAME, asm, "8h", Count, Offset2, 128>;
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/ |
| H A D | MachinePipeliner.cpp | 775 int64_t Offset1, Offset2; in addLoopCarriedDependences() local 779 TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, in addLoopCarriedDependences() 783 (int)Offset1 < (int)Offset2) { in addLoopCarriedDependences()
|
| H A D | CodeGenPrepare.cpp | 2118 uint64_t Offset2 = Offset.getLimitedValue(); in optimizeCallInst() local 2119 if ((Offset2 & (PrefAlign-1)) != 0) in optimizeCallInst() 2123 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) in optimizeCallInst() 2133 MinSize + Offset2) in optimizeCallInst()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/ |
| H A D | SIInstrInfo.h | 186 int64_t &Offset2) const override;
|
| /netbsd-src/external/apache2/llvm/dist/clang/tools/c-index-test/ |
| H A D | c-index-test.c | 1767 long long Offset2 = clang_Cursor_getOffsetOfField(cursor); in PrintTypeSize() local 1768 if (Offset == Offset2){ in PrintTypeSize() 1772 printf(" [offsetof=%lld/%lld]", Offset, Offset2); in PrintTypeSize()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/PowerPC/ |
| H A D | PPCInstrInfo.cpp | 2827 int64_t Offset1 = 0, Offset2 = 0; in shouldClusterMemOps() local 2831 !getMemOperandWithOffsetWidth(SecondLdSt, Base2, Offset2, Width2, TRI) || in shouldClusterMemOps() 2838 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 2839 return Offset1 + Width1 == Offset2; in shouldClusterMemOps()
|
| H A D | PPCISelLowering.cpp | 12751 int64_t Offset1 = 0, Offset2 = 0; in isConsecutiveLSLoc() local 12753 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); in isConsecutiveLSLoc() 12754 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) in isConsecutiveLSLoc() 12761 Offset2 = 0; in isConsecutiveLSLoc() 12763 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); in isConsecutiveLSLoc() 12765 return Offset1 == (Offset2 + Dist*Bytes); in isConsecutiveLSLoc()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Analysis/ |
| H A D | ValueTracking.cpp | 7094 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL); in isPointerOffset() local 7095 if (!Offset1 || !Offset2) in isPointerOffset() 7097 return *Offset2 - *Offset1; in isPointerOffset()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/docs/ |
| H A D | LangRef.rst | 5787 Offset2)`` if either ``(BaseTy1, Offset1)`` is reachable from ``(Base2, 5788 Offset2)`` via the ``Parent`` relation or vice versa.
|