/llvm-project/llvm/tools/llvm-reduce/deltas/ |
H A D | ReduceMemoryOperations.cpp | 26 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&I)) { in removeVolatileInFunction() local 56 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&I)) { in reduceAtomicSyncScopesInFunction() local 89 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&I)) { in reduceAtomicOrderingInFunction() local
|
/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | InferAddressSpaces.cpp | 503 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) collectFlatAddressExpressions() local 1026 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) isSimplePointerUseValidToReplace() local
|
H A D | LoopStrengthReduce.cpp | 1021 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { isAddressUse() local 1045 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { getAccessType() local
|
/llvm-project/llvm/lib/Target/BPF/ |
H A D | BPFCheckAndAdjustIR.cpp | 509 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) insertASpaceCasts() local
|
/llvm-project/llvm/lib/Analysis/ |
H A D | AliasAnalysis.cpp | 584 getModRefInfo(const AtomicRMWInst * RMW,const MemoryLocation & Loc,AAQueryInfo & AAQI) getModRefInfo() argument
|
/llvm-project/llvm/lib/Transforms/Instrumentation/ |
H A D | HWAddressSanitizer.cpp | 839 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { getInterestingMemoryOperands() local 867 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) getPointerOperandIndex() local
|
H A D | MemProfiler.cpp | 345 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { isInterestingMemoryAccess() local
|
H A D | AddressSanitizer.cpp | 1420 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { getInterestingMemoryOperands() local
|
/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUPromoteAlloca.cpp | 1180 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { collectUsesWithPtrTypes() local
|
H A D | AMDGPULowerBufferFatPointers.cpp | 1099 else if (auto *RMW = dyn_cast<AtomicRMWInst>(I)) { handleMemoryInst() local
|
H A D | SIISelLowering.cpp | 16079 emitAtomicRMWLegalRemark(const AtomicRMWInst * RMW) emitAtomicRMWLegalRemark() argument [all...] |
/llvm-project/llvm/lib/CodeGen/ |
H A D | TargetLoweringBase.cpp | 2456 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { getAtomicMemOperandFlags() local
|
H A D | CodeGenPrepare.cpp | 5241 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { FindAllMemoryUses() local 8380 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { optimizeInst() local
|
/llvm-project/llvm/lib/IR/ |
H A D | AutoUpgrade.cpp | 4096 AtomicRMWInst *RMW = upgradeAMDGCNIntrinsicCall() local [all...] |
/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | TargetLowering.h | 2319 shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW) shouldExpandAtomicRMWInIR() argument
|
/llvm-project/clang/lib/CodeGen/ |
H A D | CGBuiltin.cpp | 4751 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, EmitBuiltinExpr() local 19162 llvm::AtomicRMWInst *RMW = EmitAMDGPUBuiltinExpr() local
|
/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCISelLowering.cpp | 18822 Function *RMW = Intrinsic::getDeclaration( emitMaskedAtomicRMWIntrinsic() local
|
/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelLowering.cpp | 26246 if (const auto *RMW = dyn_cast<AtomicRMWInst>(I)) isOpSuitableForLSE128() local [all...] |