/llvm-project/llvm/lib/Analysis/ |
H A D | MemoryLocation.cpp | 95 return getForSource(cast<AnyMemTransferInst>(MTI)); in getForSource() argument 99 return getForSource(cast<AnyMemTransferInst>(MTI)); in getForSource() argument 103 assert(MTI->getRawSource() == MTI->getArgOperand(1)); in getForSource() argument
|
H A D | AliasSetTracker.cpp | 341 addMemoryLocation(MemoryLocation::getForDest(MTI), AliasSet::ModAccess); in add() argument 387 return add(MTI); in add() local
|
H A D | StackSafetyAnalysis.cpp | 343 if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) { in getMemIntrinsicAccessRange() local 495 if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) { in analyzeAllUses() local
|
H A D | LazyValueInfo.cpp | 692 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) AddNonNullPointersByInstruction() local
|
/llvm-project/llvm/lib/Transforms/Utils/ |
H A D | VNCoercion.cpp | 269 MemTransferInst *MTI = cast<MemTransferInst>(MI); analyzeLoadFromClobberingMemInst() local 400 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); getMemInstValueForLoad() local 424 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); getConstantMemInstValueForLoad() local
|
H A D | GlobalStatus.cpp | 162 if (MTI->isVolatile()) in analyzeGlobalAux() local
|
/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | AlignmentFromAssumptions.cpp | 249 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { processAssumption() local
|
H A D | InferAddressSpaces.cpp | 512 if (auto *MTI = dyn_cast<MemTransferInst>(MI)) collectFlatAddressExpressions() local 1051 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) { handleMemIntrinsicPtrUse() local [all...] |
/llvm-project/llvm/lib/CodeGen/ |
H A D | SafeStack.cpp | 258 if (MTI->getRawSource() != U && MTI->getRawDest() != U) in IsMemIntrinsicSafe() local
|
H A D | CodeGenPrepare.cpp | 2434 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { optimizeCallInst() local
|
/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUPromoteAlloca.cpp | 604 if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) { in promoteAllocaUserToVector() local
|
/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineCalls.cpp | 1535 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { visitCallInst() local 1543 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { visitCallInst() local
|
/llvm-project/llvm/lib/Transforms/Instrumentation/ |
H A D | HWAddressSanitizer.cpp | 1060 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { ignoreMemIntrinsic() local
|
H A D | DataFlowSanitizer.cpp | 2948 auto *MTI = cast<MemTransferInst>( visitMemTransferInst() local
|
/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsFastISel.cpp | 1650 const auto *MTI = cast<MemTransferInst>(II); fastLowerIntrinsicCall() local
|
/llvm-project/llvm/lib/Transforms/IPO/ |
H A D | GlobalOpt.cpp | 230 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { CleanupPointerRootUsers() local
|
H A D | Attributor.cpp | 910 if (!AddLocationPtr(MemoryLocation::getForSource(MTI))) in isPotentiallyAffectedByBarrier() local
|
/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMFastISel.cpp | 2522 if (MTI.isVolatile()) in SelectIntrinsicCall() local [all...] |
/llvm-project/llvm/lib/IR/ |
H A D | AutoUpgrade.cpp | 4785 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) UpgradeIntrinsicCall() local
|
/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64FastISel.cpp | 3483 const auto *MTI = cast<MemTransferInst>(II); fastLowerIntrinsicCall() local [all...] |