| /openbsd-src/gnu/llvm/llvm/lib/Transforms/Utils/ |
| H A D | VNCoercion.cpp | 18 bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, in canCoerceMustAliasedValueToLoad() argument 22 if (StoredTy == LoadTy) in canCoerceMustAliasedValueToLoad() 27 if (isFirstClassAggregateOrScalableType(LoadTy) || in canCoerceMustAliasedValueToLoad() 38 if (StoreSize < DL.getTypeSizeInBits(LoadTy).getFixedValue()) in canCoerceMustAliasedValueToLoad() 42 bool LoadNI = DL.isNonIntegralPointerType(LoadTy->getScalarType()); in canCoerceMustAliasedValueToLoad() 53 LoadTy->getPointerAddressSpace()) { in canCoerceMustAliasedValueToLoad() 61 if (StoredNI && StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedValue()) in canCoerceMustAliasedValueToLoad() 64 if (StoredTy->isTargetExtTy() || LoadTy->isTargetExtTy()) in canCoerceMustAliasedValueToLoad() 173 static int analyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, in analyzeLoadFromClobberingWrite() argument 179 if (isFirstClassAggregateOrScalableType(LoadTy)) in analyzeLoadFromClobberingWrite() [all …]
|
| /openbsd-src/gnu/llvm/llvm/include/llvm/Transforms/Utils/ |
| H A D | VNCoercion.h | 37 bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, 54 int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, 62 int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, 70 int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, 76 Value *getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, 81 Type *LoadTy, const DataLayout &DL); 87 Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, 92 Type *LoadTy, const DataLayout &DL); 99 Type *LoadTy, Instruction *InsertPt, 104 Type *LoadTy, const DataLayout &DL);
|
| /openbsd-src/gnu/llvm/llvm/lib/Transforms/Vectorize/ |
| H A D | LoadStoreVectorizer.cpp | 1170 Type *LoadTy = nullptr; in vectorizeLoadChain() local 1172 LoadTy = cast<LoadInst>(V)->getType(); in vectorizeLoadChain() 1173 if (LoadTy->isIntOrIntVectorTy()) in vectorizeLoadChain() 1176 if (LoadTy->isPtrOrPtrVectorTy()) { in vectorizeLoadChain() 1177 LoadTy = Type::getIntNTy(F.getParent()->getContext(), in vectorizeLoadChain() 1178 DL.getTypeSizeInBits(LoadTy)); in vectorizeLoadChain() 1182 assert(LoadTy && "Can't determine LoadInst type from chain"); in vectorizeLoadChain() 1184 unsigned Sz = DL.getTypeSizeInBits(LoadTy); in vectorizeLoadChain() 1217 auto *VecLoadTy = dyn_cast<FixedVectorType>(LoadTy); in vectorizeLoadChain() 1219 VecTy = FixedVectorType::get(LoadTy->getScalarType(), in vectorizeLoadChain() [all …]
|
| H A D | VectorCombine.cpp | 228 Type *LoadTy = Load->getType(); in vectorizeLoadInsert() local 231 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS); in vectorizeLoadInsert() 306 Type *LoadTy = Load->getType(); in widenSubvectorLoad() local 314 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS); in widenSubvectorLoad()
|
| H A D | SLPVectorizer.cpp | 6879 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); in getEntryCost() local 6883 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, in getEntryCost() 6888 Instruction::Load, LoadTy, LI->getPointerOperand(), in getEntryCost() 6895 std::nullopt, CostKind, I, LoadTy); in getEntryCost()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/ARM/ |
| H A D | ARMParallelDSP.cpp | 229 LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy); 733 IntegerType *LoadTy) { in CreateWideLoad() argument 773 LoadTy->getPointerTo(AddrSpace)); in CreateWideLoad() 774 LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign()); in CreateWideLoad() 788 Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth()); in CreateWideLoad()
|
| /openbsd-src/gnu/llvm/llvm/lib/Analysis/ |
| H A D | ConstantFolding.cpp | 546 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy, in FoldReinterpretLoadFromConst() argument 549 if (isa<ScalableVectorType>(LoadTy)) in FoldReinterpretLoadFromConst() 552 auto *IntType = dyn_cast<IntegerType>(LoadTy); in FoldReinterpretLoadFromConst() 560 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() && in FoldReinterpretLoadFromConst() 561 !LoadTy->isVectorTy()) in FoldReinterpretLoadFromConst() 565 DL.getTypeSizeInBits(LoadTy).getFixedValue()); in FoldReinterpretLoadFromConst() 567 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && in FoldReinterpretLoadFromConst() 568 !LoadTy->isX86_AMXTy()) in FoldReinterpretLoadFromConst() 570 return Constant::getNullValue(LoadTy); in FoldReinterpretLoadFromConst() 571 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; in FoldReinterpretLoadFromConst() [all …]
|
| H A D | Loads.cpp | 447 Type *LoadTy, in areNonOverlapSameBaseLoadAndStore() argument 459 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy)); in areNonOverlapSameBaseLoadAndStore()
|
| /openbsd-src/gnu/llvm/llvm/lib/Transforms/Scalar/ |
| H A D | ScalarizeMaskedMemIntrin.cpp | 957 Type *LoadTy = CI->getType(); in optimizeCallInst() local 959 LoadTy->getScalarType()); in optimizeCallInst() 960 if (TTI.isLegalMaskedGather(LoadTy, Alignment) && in optimizeCallInst() 961 !TTI.forceScalarizeMaskedGather(cast<VectorType>(LoadTy), Alignment)) in optimizeCallInst()
|
| H A D | GVN.cpp | 984 Type *LoadTy = Load->getType(); in MaterializeAdjustedValue() local 988 if (Res->getType() != LoadTy) { in MaterializeAdjustedValue() 989 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); in MaterializeAdjustedValue() 998 if (CoercedLoad->getType() == LoadTy && Offset == 0) { in MaterializeAdjustedValue() 1001 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL); in MaterializeAdjustedValue() 1014 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, in MaterializeAdjustedValue() 1113 static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, in findDominatingValue() argument 1129 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy) in findDominatingValue()
|
| H A D | SROA.cpp | 1371 Type *LoadTy = SomeLoad->getType(); in speculatePHINodeLoads() local 1373 PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(), in speculatePHINodeLoads() 1407 LoadTy, InVal, Alignment, in speculatePHINodeLoads()
|
| /openbsd-src/gnu/llvm/llvm/lib/Transforms/InstCombine/ |
| H A D | InstCombineLoadStoreAlloca.cpp | 632 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) { in isMinMaxWithLoads() argument 646 LoadTy = L1->getType(); in isMinMaxWithLoads() 690 Type *LoadTy = Load.getType(); in combineLoadToOperationType() local 692 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!"); in combineLoadToOperationType() 700 LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() && in combineLoadToOperationType()
|
| /openbsd-src/gnu/llvm/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ |
| H A D | ExprEngine.h | 692 QualType LoadTy = QualType()); 778 QualType LoadTy);
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/AMDGPU/ |
| H A D | AMDGPURegisterBankInfo.cpp | 1054 const LLT LoadTy = MRI.getType(DstReg); in applyMappingLoad() local 1055 unsigned LoadSize = LoadTy.getSizeInBits(); in applyMappingLoad() 1073 (MemSize == 32 || LoadTy.isVector() || !isScalarLoadLegal(MI))) in applyMappingLoad() 1105 std::tie(Part64, Part32) = splitUnequalType(LoadTy, 64); in applyMappingLoad() 1111 LLT WiderTy = widen96To128(LoadTy); in applyMappingLoad() 1144 unsigned NumSplitParts = LoadTy.getSizeInBits() / MaxNonSmrdLoadSize; in applyMappingLoad() 1145 const LLT LoadSplitTy = LoadTy.divide(NumSplitParts); in applyMappingLoad() 1150 if (LoadTy.isVector()) { in applyMappingLoad()
|
| H A D | AMDGPULegalizerInfo.cpp | 2571 LLT LoadTy = Ty.getSizeInBits() == 32 ? PtrTy : Ty; in legalizeGlobalValue() local 2576 LoadTy, Align(8)); in legalizeGlobalValue() 4584 LLT LoadTy = LLT::fixed_vector(NumLoadDWords, S32); in legalizeBufferLoad() local 4585 Register LoadDstReg = B.getMRI()->createGenericVirtualRegister(LoadTy); in legalizeBufferLoad()
|
| H A D | AMDGPUISelLowering.cpp | 734 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy, in isLoadBitCastBeneficial() argument 738 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); in isLoadBitCastBeneficial() 740 if (LoadTy.getScalarType() == MVT::i32) in isLoadBitCastBeneficial() 743 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); in isLoadBitCastBeneficial()
|
| /openbsd-src/gnu/llvm/llvm/lib/Target/Hexagon/ |
| H A D | HexagonISelLowering.cpp | 3148 MVT LoadTy = ty(Op); in LowerUnalignedLoad() local 3149 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).value(); in LowerUnalignedLoad() 3188 assert(LoadTy.getSizeInBits() == 8*NeedAlign); in LowerUnalignedLoad() 3221 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); in LowerUnalignedLoad() 3222 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO); in LowerUnalignedLoad() 3224 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy, in LowerUnalignedLoad()
|
| H A D | HexagonISelLoweringHVX.cpp | 3066 MVT LoadTy = MVT::getVectorVT(MVT::i8, HwLen); in WidenHvxLoad() local 3070 SDValue Load = DAG.getMaskedLoad(LoadTy, dl, Chain, Base, Offset, Mask, in WidenHvxLoad() 3071 DAG.getUNDEF(LoadTy), LoadTy, MemOp, in WidenHvxLoad()
|
| /openbsd-src/gnu/llvm/llvm/lib/CodeGen/GlobalISel/ |
| H A D | MachineIRBuilder.cpp | 411 LLT LoadTy = Dst.getLLTTy(*getMRI()); in buildLoadFromOffset() local 413 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy); in buildLoadFromOffset()
|
| H A D | LegalizerHelper.cpp | 2956 LLT LoadTy = DstTy; in lowerLoad() local 2961 LoadTy = WideMemTy; in lowerLoad() 2966 auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO); in lowerLoad() 2968 } else if (isa<GZExtLoad>(LoadMI) || WideMemTy == LoadTy) { in lowerLoad() 2969 auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO); in lowerLoad() 2977 if (DstTy != LoadTy) in lowerLoad()
|
| /openbsd-src/gnu/llvm/clang/lib/StaticAnalyzer/Core/ |
| H A D | ExprEngine.cpp | 3637 QualType LoadTy) { in evalLoad() argument 3658 if (LoadTy.isNull()) in evalLoad() 3659 LoadTy = BoundEx->getType(); in evalLoad() 3660 V = state->getSVal(location.castAs<Loc>(), LoadTy); in evalLoad()
|
| /openbsd-src/gnu/llvm/llvm/lib/CodeGen/SelectionDAG/ |
| H A D | LegalizeDAG.cpp | 1549 MVT LoadTy = TLI.getRegisterType(*DAG.getContext(), MVT::i8); in getSignAsIntValue() local 1551 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); in getSignAsIntValue() 1576 State.IntValue = DAG.getExtLoad(ISD::EXTLOAD, DL, LoadTy, State.Chain, IntPtr, in getSignAsIntValue() 1578 State.SignMask = APInt::getOneBitSet(LoadTy.getScalarSizeInBits(), 7); in getSignAsIntValue()
|
| H A D | SelectionDAGBuilder.cpp | 8026 Type *LoadTy = in getMemCmpLoad() local 8029 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements()); in getMemCmpLoad() 8032 PointerType::getUnqual(LoadTy)); in getMemCmpLoad() 8036 LoadTy, Builder.DAG.getDataLayout())) in getMemCmpLoad()
|