/netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/AggressiveInstCombine/ |
H A D | AggressiveInstCombine.cpp | 99 m_Sub(m_SpecificInt(Width), m_Value(SubAmt))))))) { in foldGuardedFunnelShift() 107 m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width), in foldGuardedFunnelShift() 332 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) && in tryToRecognizePopCount() 333 match(Op1, m_SpecificInt(MaskShift))) { in tryToRecognizePopCount() 336 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)), in tryToRecognizePopCount() 338 m_SpecificInt(Mask0F)))) { in tryToRecognizePopCount() 342 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)), in tryToRecognizePopCount() 343 m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)), in tryToRecognizePopCount() 344 m_SpecificInt(Mask33))))) { in tryToRecognizePopCount() 348 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)), in tryToRecognizePopCount() [all …]
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineAndOrXor.cpp | 975 m_SpecificInt(2))) && in foldIsPowerOf2() 983 m_SpecificInt(1))) && in foldIsPowerOf2() 2077 m_SpecificInt(Ty->getScalarSizeInBits() - 1))), in visitAnd() 2161 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) { in matchFunnelShift() 2181 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && in matchFunnelShift() 2182 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) in matchFunnelShift() 2187 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && in matchFunnelShift() 2188 match(R, m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))), in matchFunnelShift() 2189 m_SpecificInt(Mask)))) in matchFunnelShift() 2192 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && in matchFunnelShift() [all …]
|
H A D | InstCombineSelect.cpp | 756 match(TrueVal, m_Add(m_Specific(B), m_SpecificInt(-*C))))) in canonicalizeSaturatedSubtract() 760 match(TrueVal, m_Add(m_Specific(A), m_SpecificInt(-*C))))) in canonicalizeSaturatedSubtract() 865 m_Xor(m_Deferred(TrueVal), m_SpecificInt(BitWidth - 1)))) in foldSelectCtlzToCttz() 925 if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) { in foldSelectCttzCtlz() 1825 return match(Min, m_SpecificInt(MinVal)) && in foldOverflowingAddSubSelect() 1826 match(Max, m_SpecificInt(MaxVal)); in foldOverflowingAddSubSelect() 2345 if (match(SA1, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA0))))) in foldSelectFunnelShift() 2347 else if (match(SA0, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA1))))) in foldSelectFunnelShift()
|
H A D | InstCombineCasts.cpp | 565 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) in narrowFunnelShift() 577 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && in narrowFunnelShift() 578 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) in narrowFunnelShift() 582 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && in narrowFunnelShift() 583 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) in narrowFunnelShift()
|
H A D | InstCombineShifts.cpp | 1031 match(Op1, m_Sub(m_SpecificInt(BitWidth - 1), m_Value(X)))) in visitShl() 1148 if (match(Op0, m_OneUse(m_SRem(m_Value(X), m_SpecificInt(2))))) { in visitLShr()
|
H A D | InstCombineSimplifyDemanded.cpp | 1235 m_ExtractElt(m_Value(Vec), m_SpecificInt(IdxNo))) && in SimplifyDemandedVectorElts()
|
H A D | InstCombineAddSub.cpp | 2116 if (match(Op0, m_SpecificInt(Ty->getScalarSizeInBits())) && in visitSub()
|
H A D | InstCombineVectorOps.cpp | 1185 if (!match(Scalar, m_ExtractElt(m_Specific(X), m_SpecificInt(IdxC)))) in foldInsEltIntoIdentityShuffle()
|
H A D | InstCombineCalls.cpp | 1123 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { in visitCallInst()
|
H A D | InstCombineCompares.cpp | 4012 match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality()) in foldICmpBinOp()
|
/netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/IR/ |
H A D | PatternMatch.h | 885 inline specific_intval<false> m_SpecificInt(APInt V) { in m_SpecificInt() function 889 inline specific_intval<false> m_SpecificInt(uint64_t V) { in m_SpecificInt() function 890 return m_SpecificInt(APInt(64, V)); in m_SpecificInt() 2348 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth)); 2349 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth)); 2439 if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) {
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/ |
H A D | CodeGenPrepare.cpp | 7463 m_SpecificInt(HalfValBitSize)))))) in splitMergedValStore() 7724 match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) { in optimizeBranch() 7736 (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) || in optimizeBranch() 7737 match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) { in optimizeBranch()
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Analysis/ |
H A D | InstructionSimplify.cpp | 4318 m_SpecificInt(TyAllocSize))) && in SimplifyGEPInst() 5731 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne))) in simplifyIntrinsic()
|