10b57cec5SDimitry Andric //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This file defines vectorizer utilities. 100b57cec5SDimitry Andric // 110b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 120b57cec5SDimitry Andric 130b57cec5SDimitry Andric #include "llvm/Analysis/VectorUtils.h" 140b57cec5SDimitry Andric #include "llvm/ADT/EquivalenceClasses.h" 15cb14a3feSDimitry Andric #include "llvm/ADT/SmallVector.h" 160b57cec5SDimitry Andric #include "llvm/Analysis/DemandedBits.h" 170b57cec5SDimitry Andric #include "llvm/Analysis/LoopInfo.h" 180b57cec5SDimitry Andric #include "llvm/Analysis/LoopIterator.h" 190b57cec5SDimitry Andric #include "llvm/Analysis/ScalarEvolution.h" 200b57cec5SDimitry Andric #include "llvm/Analysis/ScalarEvolutionExpressions.h" 210b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h" 220b57cec5SDimitry Andric #include "llvm/Analysis/ValueTracking.h" 230b57cec5SDimitry Andric #include "llvm/IR/Constants.h" 24cb14a3feSDimitry Andric #include "llvm/IR/DerivedTypes.h" 250b57cec5SDimitry Andric #include "llvm/IR/IRBuilder.h" 26*0fca6ea1SDimitry Andric #include "llvm/IR/MemoryModelRelaxationAnnotations.h" 270b57cec5SDimitry Andric #include "llvm/IR/PatternMatch.h" 280b57cec5SDimitry Andric #include "llvm/IR/Value.h" 29480093f4SDimitry Andric #include "llvm/Support/CommandLine.h" 300b57cec5SDimitry Andric 310b57cec5SDimitry Andric #define DEBUG_TYPE "vectorutils" 320b57cec5SDimitry Andric 330b57cec5SDimitry Andric using namespace llvm; 340b57cec5SDimitry Andric using namespace llvm::PatternMatch; 350b57cec5SDimitry Andric 360b57cec5SDimitry Andric /// Maximum factor for an interleaved memory access. 370b57cec5SDimitry Andric static cl::opt<unsigned> MaxInterleaveGroupFactor( 380b57cec5SDimitry Andric "max-interleave-group-factor", cl::Hidden, 390b57cec5SDimitry Andric cl::desc("Maximum factor for an interleaved access group (default = 8)"), 400b57cec5SDimitry Andric cl::init(8)); 410b57cec5SDimitry Andric 420b57cec5SDimitry Andric /// Return true if all of the intrinsic's arguments and return type are scalars 430b57cec5SDimitry Andric /// for the scalar form of the intrinsic, and vectors for the vector form of the 440b57cec5SDimitry Andric /// intrinsic (except operands that are marked as always being scalar by 4581ad6265SDimitry Andric /// isVectorIntrinsicWithScalarOpAtArg). 460b57cec5SDimitry Andric bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 470b57cec5SDimitry Andric switch (ID) { 48e8d8bef9SDimitry Andric case Intrinsic::abs: // Begin integer bit-manipulation. 49e8d8bef9SDimitry Andric case Intrinsic::bswap: 500b57cec5SDimitry Andric case Intrinsic::bitreverse: 510b57cec5SDimitry Andric case Intrinsic::ctpop: 520b57cec5SDimitry Andric case Intrinsic::ctlz: 530b57cec5SDimitry Andric case Intrinsic::cttz: 540b57cec5SDimitry Andric case Intrinsic::fshl: 550b57cec5SDimitry Andric case Intrinsic::fshr: 56e8d8bef9SDimitry Andric case Intrinsic::smax: 57e8d8bef9SDimitry Andric case Intrinsic::smin: 58e8d8bef9SDimitry Andric case Intrinsic::umax: 59e8d8bef9SDimitry Andric case Intrinsic::umin: 600b57cec5SDimitry Andric case Intrinsic::sadd_sat: 610b57cec5SDimitry Andric case Intrinsic::ssub_sat: 620b57cec5SDimitry Andric case Intrinsic::uadd_sat: 630b57cec5SDimitry Andric case Intrinsic::usub_sat: 640b57cec5SDimitry Andric case Intrinsic::smul_fix: 650b57cec5SDimitry Andric case Intrinsic::smul_fix_sat: 660b57cec5SDimitry Andric case Intrinsic::umul_fix: 678bcb0991SDimitry Andric case Intrinsic::umul_fix_sat: 680b57cec5SDimitry Andric case Intrinsic::sqrt: // Begin floating-point. 690b57cec5SDimitry Andric case Intrinsic::sin: 700b57cec5SDimitry Andric case Intrinsic::cos: 71*0fca6ea1SDimitry Andric case Intrinsic::tan: 720b57cec5SDimitry Andric case Intrinsic::exp: 730b57cec5SDimitry Andric case Intrinsic::exp2: 740b57cec5SDimitry Andric case Intrinsic::log: 750b57cec5SDimitry Andric case Intrinsic::log10: 760b57cec5SDimitry Andric case Intrinsic::log2: 770b57cec5SDimitry Andric case Intrinsic::fabs: 780b57cec5SDimitry Andric case Intrinsic::minnum: 790b57cec5SDimitry Andric case Intrinsic::maxnum: 800b57cec5SDimitry Andric case Intrinsic::minimum: 810b57cec5SDimitry Andric case Intrinsic::maximum: 820b57cec5SDimitry Andric case Intrinsic::copysign: 830b57cec5SDimitry Andric case Intrinsic::floor: 840b57cec5SDimitry Andric case Intrinsic::ceil: 850b57cec5SDimitry Andric case Intrinsic::trunc: 860b57cec5SDimitry Andric case Intrinsic::rint: 870b57cec5SDimitry Andric case Intrinsic::nearbyint: 880b57cec5SDimitry Andric case Intrinsic::round: 895ffd83dbSDimitry Andric case Intrinsic::roundeven: 900b57cec5SDimitry Andric case Intrinsic::pow: 910b57cec5SDimitry Andric case Intrinsic::fma: 920b57cec5SDimitry Andric case Intrinsic::fmuladd: 9306c3fb27SDimitry Andric case Intrinsic::is_fpclass: 940b57cec5SDimitry Andric case Intrinsic::powi: 950b57cec5SDimitry Andric case Intrinsic::canonicalize: 9681ad6265SDimitry Andric case Intrinsic::fptosi_sat: 9781ad6265SDimitry Andric case Intrinsic::fptoui_sat: 985f757f3fSDimitry Andric case Intrinsic::lrint: 995f757f3fSDimitry Andric case Intrinsic::llrint: 1000b57cec5SDimitry Andric return true; 1010b57cec5SDimitry Andric default: 1020b57cec5SDimitry Andric return false; 1030b57cec5SDimitry Andric } 1040b57cec5SDimitry Andric } 1050b57cec5SDimitry Andric 1060b57cec5SDimitry Andric /// Identifies if the vector form of the intrinsic has a scalar operand. 10781ad6265SDimitry Andric bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 1080b57cec5SDimitry Andric unsigned ScalarOpdIdx) { 1090b57cec5SDimitry Andric switch (ID) { 110e8d8bef9SDimitry Andric case Intrinsic::abs: 1110b57cec5SDimitry Andric case Intrinsic::ctlz: 1120b57cec5SDimitry Andric case Intrinsic::cttz: 11306c3fb27SDimitry Andric case Intrinsic::is_fpclass: 1140b57cec5SDimitry Andric case Intrinsic::powi: 1150b57cec5SDimitry Andric return (ScalarOpdIdx == 1); 1160b57cec5SDimitry Andric case Intrinsic::smul_fix: 1170b57cec5SDimitry Andric case Intrinsic::smul_fix_sat: 1180b57cec5SDimitry Andric case Intrinsic::umul_fix: 1198bcb0991SDimitry Andric case Intrinsic::umul_fix_sat: 1200b57cec5SDimitry Andric return (ScalarOpdIdx == 2); 1210b57cec5SDimitry Andric default: 1220b57cec5SDimitry Andric return false; 1230b57cec5SDimitry Andric } 1240b57cec5SDimitry Andric } 1250b57cec5SDimitry Andric 12681ad6265SDimitry Andric bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 12706c3fb27SDimitry Andric int OpdIdx) { 128647cbc5dSDimitry Andric assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!"); 129647cbc5dSDimitry Andric 130fe6060f1SDimitry Andric switch (ID) { 13181ad6265SDimitry Andric case Intrinsic::fptosi_sat: 13281ad6265SDimitry Andric case Intrinsic::fptoui_sat: 1335f757f3fSDimitry Andric case Intrinsic::lrint: 1345f757f3fSDimitry Andric case Intrinsic::llrint: 13506c3fb27SDimitry Andric return OpdIdx == -1 || OpdIdx == 0; 13606c3fb27SDimitry Andric case Intrinsic::is_fpclass: 13781ad6265SDimitry Andric return OpdIdx == 0; 138fe6060f1SDimitry Andric case Intrinsic::powi: 13906c3fb27SDimitry Andric return OpdIdx == -1 || OpdIdx == 1; 140fe6060f1SDimitry Andric default: 14106c3fb27SDimitry Andric return OpdIdx == -1; 142fe6060f1SDimitry Andric } 143fe6060f1SDimitry Andric } 144fe6060f1SDimitry Andric 1450b57cec5SDimitry Andric /// Returns intrinsic ID for call. 1460b57cec5SDimitry Andric /// For the input call instruction it finds mapping intrinsic and returns 1470b57cec5SDimitry Andric /// its ID, in case it does not found it return not_intrinsic. 1480b57cec5SDimitry Andric Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 1490b57cec5SDimitry Andric const TargetLibraryInfo *TLI) { 1505ffd83dbSDimitry Andric Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); 1510b57cec5SDimitry Andric if (ID == Intrinsic::not_intrinsic) 1520b57cec5SDimitry Andric return Intrinsic::not_intrinsic; 1530b57cec5SDimitry Andric 1540b57cec5SDimitry Andric if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 1550b57cec5SDimitry Andric ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 156e8d8bef9SDimitry Andric ID == Intrinsic::experimental_noalias_scope_decl || 157e8d8bef9SDimitry Andric ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe) 1580b57cec5SDimitry Andric return ID; 1590b57cec5SDimitry Andric return Intrinsic::not_intrinsic; 1600b57cec5SDimitry Andric } 1610b57cec5SDimitry Andric 1620b57cec5SDimitry Andric /// Given a vector and an element number, see if the scalar value is 1630b57cec5SDimitry Andric /// already around as a register, for example if it were inserted then extracted 1640b57cec5SDimitry Andric /// from the vector. 1650b57cec5SDimitry Andric Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 1660b57cec5SDimitry Andric assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 1670b57cec5SDimitry Andric VectorType *VTy = cast<VectorType>(V->getType()); 168*0fca6ea1SDimitry Andric // For fixed-length vector, return poison for out of range access. 1695ffd83dbSDimitry Andric if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { 1705ffd83dbSDimitry Andric unsigned Width = FVTy->getNumElements(); 1715ffd83dbSDimitry Andric if (EltNo >= Width) 172*0fca6ea1SDimitry Andric return PoisonValue::get(FVTy->getElementType()); 1735ffd83dbSDimitry Andric } 1740b57cec5SDimitry Andric 1750b57cec5SDimitry Andric if (Constant *C = dyn_cast<Constant>(V)) 1760b57cec5SDimitry Andric return C->getAggregateElement(EltNo); 1770b57cec5SDimitry Andric 1780b57cec5SDimitry Andric if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 1790b57cec5SDimitry Andric // If this is an insert to a variable element, we don't know what it is. 1800b57cec5SDimitry Andric if (!isa<ConstantInt>(III->getOperand(2))) 1810b57cec5SDimitry Andric return nullptr; 1820b57cec5SDimitry Andric unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 1830b57cec5SDimitry Andric 1840b57cec5SDimitry Andric // If this is an insert to the element we are looking for, return the 1850b57cec5SDimitry Andric // inserted value. 1860b57cec5SDimitry Andric if (EltNo == IIElt) 1870b57cec5SDimitry Andric return III->getOperand(1); 1880b57cec5SDimitry Andric 189e8d8bef9SDimitry Andric // Guard against infinite loop on malformed, unreachable IR. 190e8d8bef9SDimitry Andric if (III == III->getOperand(0)) 191e8d8bef9SDimitry Andric return nullptr; 192e8d8bef9SDimitry Andric 1930b57cec5SDimitry Andric // Otherwise, the insertelement doesn't modify the value, recurse on its 1940b57cec5SDimitry Andric // vector input. 1950b57cec5SDimitry Andric return findScalarElement(III->getOperand(0), EltNo); 1960b57cec5SDimitry Andric } 1970b57cec5SDimitry Andric 1985ffd83dbSDimitry Andric ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); 1995ffd83dbSDimitry Andric // Restrict the following transformation to fixed-length vector. 2005ffd83dbSDimitry Andric if (SVI && isa<FixedVectorType>(SVI->getType())) { 2015ffd83dbSDimitry Andric unsigned LHSWidth = 2025ffd83dbSDimitry Andric cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); 2030b57cec5SDimitry Andric int InEl = SVI->getMaskValue(EltNo); 2040b57cec5SDimitry Andric if (InEl < 0) 205*0fca6ea1SDimitry Andric return PoisonValue::get(VTy->getElementType()); 2060b57cec5SDimitry Andric if (InEl < (int)LHSWidth) 2070b57cec5SDimitry Andric return findScalarElement(SVI->getOperand(0), InEl); 2080b57cec5SDimitry Andric return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 2090b57cec5SDimitry Andric } 2100b57cec5SDimitry Andric 2110b57cec5SDimitry Andric // Extract a value from a vector add operation with a constant zero. 2120b57cec5SDimitry Andric // TODO: Use getBinOpIdentity() to generalize this. 2130b57cec5SDimitry Andric Value *Val; Constant *C; 2140b57cec5SDimitry Andric if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 2150b57cec5SDimitry Andric if (Constant *Elt = C->getAggregateElement(EltNo)) 2160b57cec5SDimitry Andric if (Elt->isNullValue()) 2170b57cec5SDimitry Andric return findScalarElement(Val, EltNo); 2180b57cec5SDimitry Andric 219349cc55cSDimitry Andric // If the vector is a splat then we can trivially find the scalar element. 220349cc55cSDimitry Andric if (isa<ScalableVectorType>(VTy)) 221349cc55cSDimitry Andric if (Value *Splat = getSplatValue(V)) 222349cc55cSDimitry Andric if (EltNo < VTy->getElementCount().getKnownMinValue()) 223349cc55cSDimitry Andric return Splat; 224349cc55cSDimitry Andric 2250b57cec5SDimitry Andric // Otherwise, we don't know. 2260b57cec5SDimitry Andric return nullptr; 2270b57cec5SDimitry Andric } 2280b57cec5SDimitry Andric 2295ffd83dbSDimitry Andric int llvm::getSplatIndex(ArrayRef<int> Mask) { 2305ffd83dbSDimitry Andric int SplatIndex = -1; 2315ffd83dbSDimitry Andric for (int M : Mask) { 2325ffd83dbSDimitry Andric // Ignore invalid (undefined) mask elements. 2335ffd83dbSDimitry Andric if (M < 0) 2345ffd83dbSDimitry Andric continue; 2355ffd83dbSDimitry Andric 2365ffd83dbSDimitry Andric // There can be only 1 non-negative mask element value if this is a splat. 2375ffd83dbSDimitry Andric if (SplatIndex != -1 && SplatIndex != M) 2385ffd83dbSDimitry Andric return -1; 2395ffd83dbSDimitry Andric 2405ffd83dbSDimitry Andric // Initialize the splat index to the 1st non-negative mask element. 2415ffd83dbSDimitry Andric SplatIndex = M; 2425ffd83dbSDimitry Andric } 2435ffd83dbSDimitry Andric assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); 2445ffd83dbSDimitry Andric return SplatIndex; 2455ffd83dbSDimitry Andric } 2465ffd83dbSDimitry Andric 2470b57cec5SDimitry Andric /// Get splat value if the input is a splat vector or return nullptr. 2480b57cec5SDimitry Andric /// This function is not fully general. It checks only 2 cases: 2490b57cec5SDimitry Andric /// the input value is (1) a splat constant vector or (2) a sequence 2500b57cec5SDimitry Andric /// of instructions that broadcasts a scalar at element 0. 251e8d8bef9SDimitry Andric Value *llvm::getSplatValue(const Value *V) { 2520b57cec5SDimitry Andric if (isa<VectorType>(V->getType())) 2530b57cec5SDimitry Andric if (auto *C = dyn_cast<Constant>(V)) 2540b57cec5SDimitry Andric return C->getSplatValue(); 2550b57cec5SDimitry Andric 2560b57cec5SDimitry Andric // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 2570b57cec5SDimitry Andric Value *Splat; 2585ffd83dbSDimitry Andric if (match(V, 2595ffd83dbSDimitry Andric m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()), 2605ffd83dbSDimitry Andric m_Value(), m_ZeroMask()))) 2610b57cec5SDimitry Andric return Splat; 2620b57cec5SDimitry Andric 2630b57cec5SDimitry Andric return nullptr; 2640b57cec5SDimitry Andric } 2650b57cec5SDimitry Andric 2665ffd83dbSDimitry Andric bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { 267e8d8bef9SDimitry Andric assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2680b57cec5SDimitry Andric 2690b57cec5SDimitry Andric if (isa<VectorType>(V->getType())) { 2700b57cec5SDimitry Andric if (isa<UndefValue>(V)) 2710b57cec5SDimitry Andric return true; 2725ffd83dbSDimitry Andric // FIXME: We can allow undefs, but if Index was specified, we may want to 2735ffd83dbSDimitry Andric // check that the constant is defined at that index. 2740b57cec5SDimitry Andric if (auto *C = dyn_cast<Constant>(V)) 2750b57cec5SDimitry Andric return C->getSplatValue() != nullptr; 2760b57cec5SDimitry Andric } 2770b57cec5SDimitry Andric 2785ffd83dbSDimitry Andric if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { 2795ffd83dbSDimitry Andric // FIXME: We can safely allow undefs here. If Index was specified, we will 2805ffd83dbSDimitry Andric // check that the mask elt is defined at the required index. 281bdd1243dSDimitry Andric if (!all_equal(Shuf->getShuffleMask())) 2825ffd83dbSDimitry Andric return false; 2835ffd83dbSDimitry Andric 2845ffd83dbSDimitry Andric // Match any index. 2855ffd83dbSDimitry Andric if (Index == -1) 2865ffd83dbSDimitry Andric return true; 2875ffd83dbSDimitry Andric 2885ffd83dbSDimitry Andric // Match a specific element. The mask should be defined at and match the 2895ffd83dbSDimitry Andric // specified index. 2905ffd83dbSDimitry Andric return Shuf->getMaskValue(Index) == Index; 2915ffd83dbSDimitry Andric } 2920b57cec5SDimitry Andric 2930b57cec5SDimitry Andric // The remaining tests are all recursive, so bail out if we hit the limit. 294e8d8bef9SDimitry Andric if (Depth++ == MaxAnalysisRecursionDepth) 2950b57cec5SDimitry Andric return false; 2960b57cec5SDimitry Andric 2970b57cec5SDimitry Andric // If both operands of a binop are splats, the result is a splat. 2980b57cec5SDimitry Andric Value *X, *Y, *Z; 2990b57cec5SDimitry Andric if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 3005ffd83dbSDimitry Andric return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); 3010b57cec5SDimitry Andric 3020b57cec5SDimitry Andric // If all operands of a select are splats, the result is a splat. 3030b57cec5SDimitry Andric if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 3045ffd83dbSDimitry Andric return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && 3055ffd83dbSDimitry Andric isSplatValue(Z, Index, Depth); 3060b57cec5SDimitry Andric 3070b57cec5SDimitry Andric // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 3080b57cec5SDimitry Andric 3090b57cec5SDimitry Andric return false; 3100b57cec5SDimitry Andric } 3110b57cec5SDimitry Andric 312bdd1243dSDimitry Andric bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask, 313bdd1243dSDimitry Andric const APInt &DemandedElts, APInt &DemandedLHS, 314bdd1243dSDimitry Andric APInt &DemandedRHS, bool AllowUndefElts) { 315bdd1243dSDimitry Andric DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth); 316bdd1243dSDimitry Andric 317bdd1243dSDimitry Andric // Early out if we don't demand any elements. 318bdd1243dSDimitry Andric if (DemandedElts.isZero()) 319bdd1243dSDimitry Andric return true; 320bdd1243dSDimitry Andric 321bdd1243dSDimitry Andric // Simple case of a shuffle with zeroinitializer. 322bdd1243dSDimitry Andric if (all_of(Mask, [](int Elt) { return Elt == 0; })) { 323bdd1243dSDimitry Andric DemandedLHS.setBit(0); 324bdd1243dSDimitry Andric return true; 325bdd1243dSDimitry Andric } 326bdd1243dSDimitry Andric 327bdd1243dSDimitry Andric for (unsigned I = 0, E = Mask.size(); I != E; ++I) { 328bdd1243dSDimitry Andric int M = Mask[I]; 329bdd1243dSDimitry Andric assert((-1 <= M) && (M < (SrcWidth * 2)) && 330bdd1243dSDimitry Andric "Invalid shuffle mask constant"); 331bdd1243dSDimitry Andric 332bdd1243dSDimitry Andric if (!DemandedElts[I] || (AllowUndefElts && (M < 0))) 333bdd1243dSDimitry Andric continue; 334bdd1243dSDimitry Andric 335bdd1243dSDimitry Andric // For undef elements, we don't know anything about the common state of 336bdd1243dSDimitry Andric // the shuffle result. 337bdd1243dSDimitry Andric if (M < 0) 338bdd1243dSDimitry Andric return false; 339bdd1243dSDimitry Andric 340bdd1243dSDimitry Andric if (M < SrcWidth) 341bdd1243dSDimitry Andric DemandedLHS.setBit(M); 342bdd1243dSDimitry Andric else 343bdd1243dSDimitry Andric DemandedRHS.setBit(M - SrcWidth); 344bdd1243dSDimitry Andric } 345bdd1243dSDimitry Andric 346bdd1243dSDimitry Andric return true; 347bdd1243dSDimitry Andric } 348bdd1243dSDimitry Andric 3495ffd83dbSDimitry Andric void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, 3505ffd83dbSDimitry Andric SmallVectorImpl<int> &ScaledMask) { 3515ffd83dbSDimitry Andric assert(Scale > 0 && "Unexpected scaling factor"); 3525ffd83dbSDimitry Andric 3535ffd83dbSDimitry Andric // Fast-path: if no scaling, then it is just a copy. 3545ffd83dbSDimitry Andric if (Scale == 1) { 3555ffd83dbSDimitry Andric ScaledMask.assign(Mask.begin(), Mask.end()); 3565ffd83dbSDimitry Andric return; 3575ffd83dbSDimitry Andric } 3585ffd83dbSDimitry Andric 3595ffd83dbSDimitry Andric ScaledMask.clear(); 3605ffd83dbSDimitry Andric for (int MaskElt : Mask) { 3615ffd83dbSDimitry Andric if (MaskElt >= 0) { 362e8d8bef9SDimitry Andric assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX && 3635ffd83dbSDimitry Andric "Overflowed 32-bits"); 3645ffd83dbSDimitry Andric } 3655ffd83dbSDimitry Andric for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) 3665ffd83dbSDimitry Andric ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); 3675ffd83dbSDimitry Andric } 3685ffd83dbSDimitry Andric } 3695ffd83dbSDimitry Andric 3705ffd83dbSDimitry Andric bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, 3715ffd83dbSDimitry Andric SmallVectorImpl<int> &ScaledMask) { 3725ffd83dbSDimitry Andric assert(Scale > 0 && "Unexpected scaling factor"); 3735ffd83dbSDimitry Andric 3745ffd83dbSDimitry Andric // Fast-path: if no scaling, then it is just a copy. 3755ffd83dbSDimitry Andric if (Scale == 1) { 3765ffd83dbSDimitry Andric ScaledMask.assign(Mask.begin(), Mask.end()); 3775ffd83dbSDimitry Andric return true; 3785ffd83dbSDimitry Andric } 3795ffd83dbSDimitry Andric 3805ffd83dbSDimitry Andric // We must map the original elements down evenly to a type with less elements. 3815ffd83dbSDimitry Andric int NumElts = Mask.size(); 3825ffd83dbSDimitry Andric if (NumElts % Scale != 0) 3835ffd83dbSDimitry Andric return false; 3845ffd83dbSDimitry Andric 3855ffd83dbSDimitry Andric ScaledMask.clear(); 3865ffd83dbSDimitry Andric ScaledMask.reserve(NumElts / Scale); 3875ffd83dbSDimitry Andric 3885ffd83dbSDimitry Andric // Step through the input mask by splitting into Scale-sized slices. 3895ffd83dbSDimitry Andric do { 3905ffd83dbSDimitry Andric ArrayRef<int> MaskSlice = Mask.take_front(Scale); 3915ffd83dbSDimitry Andric assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); 3925ffd83dbSDimitry Andric 3935ffd83dbSDimitry Andric // The first element of the slice determines how we evaluate this slice. 3945ffd83dbSDimitry Andric int SliceFront = MaskSlice.front(); 3955ffd83dbSDimitry Andric if (SliceFront < 0) { 3965ffd83dbSDimitry Andric // Negative values (undef or other "sentinel" values) must be equal across 3975ffd83dbSDimitry Andric // the entire slice. 398bdd1243dSDimitry Andric if (!all_equal(MaskSlice)) 3995ffd83dbSDimitry Andric return false; 4005ffd83dbSDimitry Andric ScaledMask.push_back(SliceFront); 4015ffd83dbSDimitry Andric } else { 4025ffd83dbSDimitry Andric // A positive mask element must be cleanly divisible. 4035ffd83dbSDimitry Andric if (SliceFront % Scale != 0) 4045ffd83dbSDimitry Andric return false; 4055ffd83dbSDimitry Andric // Elements of the slice must be consecutive. 4065ffd83dbSDimitry Andric for (int i = 1; i < Scale; ++i) 4075ffd83dbSDimitry Andric if (MaskSlice[i] != SliceFront + i) 4085ffd83dbSDimitry Andric return false; 4095ffd83dbSDimitry Andric ScaledMask.push_back(SliceFront / Scale); 4105ffd83dbSDimitry Andric } 4115ffd83dbSDimitry Andric Mask = Mask.drop_front(Scale); 4125ffd83dbSDimitry Andric } while (!Mask.empty()); 4135ffd83dbSDimitry Andric 4145ffd83dbSDimitry Andric assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); 4155ffd83dbSDimitry Andric 4165ffd83dbSDimitry Andric // All elements of the original mask can be scaled down to map to the elements 4175ffd83dbSDimitry Andric // of a mask with wider elements. 4185ffd83dbSDimitry Andric return true; 4195ffd83dbSDimitry Andric } 4205ffd83dbSDimitry Andric 421*0fca6ea1SDimitry Andric bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask, 422*0fca6ea1SDimitry Andric SmallVectorImpl<int> &ScaledMask) { 423*0fca6ea1SDimitry Andric unsigned NumSrcElts = Mask.size(); 424*0fca6ea1SDimitry Andric assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor"); 425*0fca6ea1SDimitry Andric 426*0fca6ea1SDimitry Andric // Fast-path: if no scaling, then it is just a copy. 427*0fca6ea1SDimitry Andric if (NumSrcElts == NumDstElts) { 428*0fca6ea1SDimitry Andric ScaledMask.assign(Mask.begin(), Mask.end()); 429*0fca6ea1SDimitry Andric return true; 430*0fca6ea1SDimitry Andric } 431*0fca6ea1SDimitry Andric 432*0fca6ea1SDimitry Andric // Ensure we can find a whole scale factor. 433*0fca6ea1SDimitry Andric assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) && 434*0fca6ea1SDimitry Andric "Unexpected scaling factor"); 435*0fca6ea1SDimitry Andric 436*0fca6ea1SDimitry Andric if (NumSrcElts > NumDstElts) { 437*0fca6ea1SDimitry Andric int Scale = NumSrcElts / NumDstElts; 438*0fca6ea1SDimitry Andric return widenShuffleMaskElts(Scale, Mask, ScaledMask); 439*0fca6ea1SDimitry Andric } 440*0fca6ea1SDimitry Andric 441*0fca6ea1SDimitry Andric int Scale = NumDstElts / NumSrcElts; 442*0fca6ea1SDimitry Andric narrowShuffleMaskElts(Scale, Mask, ScaledMask); 443*0fca6ea1SDimitry Andric return true; 444*0fca6ea1SDimitry Andric } 445*0fca6ea1SDimitry Andric 446bdd1243dSDimitry Andric void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask, 447bdd1243dSDimitry Andric SmallVectorImpl<int> &ScaledMask) { 448bdd1243dSDimitry Andric std::array<SmallVector<int, 16>, 2> TmpMasks; 449bdd1243dSDimitry Andric SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1]; 450bdd1243dSDimitry Andric ArrayRef<int> InputMask = Mask; 451bdd1243dSDimitry Andric for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) { 452bdd1243dSDimitry Andric while (widenShuffleMaskElts(Scale, InputMask, *Output)) { 453bdd1243dSDimitry Andric InputMask = *Output; 454bdd1243dSDimitry Andric std::swap(Output, Tmp); 455bdd1243dSDimitry Andric } 456bdd1243dSDimitry Andric } 457bdd1243dSDimitry Andric ScaledMask.assign(InputMask.begin(), InputMask.end()); 458bdd1243dSDimitry Andric } 459bdd1243dSDimitry Andric 46081ad6265SDimitry Andric void llvm::processShuffleMasks( 46181ad6265SDimitry Andric ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, 46281ad6265SDimitry Andric unsigned NumOfUsedRegs, function_ref<void()> NoInputAction, 46381ad6265SDimitry Andric function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction, 46481ad6265SDimitry Andric function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) { 46581ad6265SDimitry Andric SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs); 46681ad6265SDimitry Andric // Try to perform better estimation of the permutation. 46781ad6265SDimitry Andric // 1. Split the source/destination vectors into real registers. 46881ad6265SDimitry Andric // 2. Do the mask analysis to identify which real registers are 46981ad6265SDimitry Andric // permuted. 47081ad6265SDimitry Andric int Sz = Mask.size(); 47181ad6265SDimitry Andric unsigned SzDest = Sz / NumOfDestRegs; 47281ad6265SDimitry Andric unsigned SzSrc = Sz / NumOfSrcRegs; 47381ad6265SDimitry Andric for (unsigned I = 0; I < NumOfDestRegs; ++I) { 47481ad6265SDimitry Andric auto &RegMasks = Res[I]; 47581ad6265SDimitry Andric RegMasks.assign(NumOfSrcRegs, {}); 47681ad6265SDimitry Andric // Check that the values in dest registers are in the one src 47781ad6265SDimitry Andric // register. 47881ad6265SDimitry Andric for (unsigned K = 0; K < SzDest; ++K) { 47981ad6265SDimitry Andric int Idx = I * SzDest + K; 48081ad6265SDimitry Andric if (Idx == Sz) 48181ad6265SDimitry Andric break; 48206c3fb27SDimitry Andric if (Mask[Idx] >= Sz || Mask[Idx] == PoisonMaskElem) 48381ad6265SDimitry Andric continue; 48481ad6265SDimitry Andric int SrcRegIdx = Mask[Idx] / SzSrc; 48581ad6265SDimitry Andric // Add a cost of PermuteTwoSrc for each new source register permute, 48681ad6265SDimitry Andric // if we have more than one source registers. 48781ad6265SDimitry Andric if (RegMasks[SrcRegIdx].empty()) 48806c3fb27SDimitry Andric RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem); 48981ad6265SDimitry Andric RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc; 49081ad6265SDimitry Andric } 49181ad6265SDimitry Andric } 49281ad6265SDimitry Andric // Process split mask. 49381ad6265SDimitry Andric for (unsigned I = 0; I < NumOfUsedRegs; ++I) { 49481ad6265SDimitry Andric auto &Dest = Res[I]; 49581ad6265SDimitry Andric int NumSrcRegs = 49681ad6265SDimitry Andric count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 49781ad6265SDimitry Andric switch (NumSrcRegs) { 49881ad6265SDimitry Andric case 0: 49981ad6265SDimitry Andric // No input vectors were used! 50081ad6265SDimitry Andric NoInputAction(); 50181ad6265SDimitry Andric break; 50281ad6265SDimitry Andric case 1: { 50381ad6265SDimitry Andric // Find the only mask with at least single undef mask elem. 50481ad6265SDimitry Andric auto *It = 50581ad6265SDimitry Andric find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 50681ad6265SDimitry Andric unsigned SrcReg = std::distance(Dest.begin(), It); 50781ad6265SDimitry Andric SingleInputAction(*It, SrcReg, I); 50881ad6265SDimitry Andric break; 50981ad6265SDimitry Andric } 51081ad6265SDimitry Andric default: { 51181ad6265SDimitry Andric // The first mask is a permutation of a single register. Since we have >2 51281ad6265SDimitry Andric // input registers to shuffle, we merge the masks for 2 first registers 51381ad6265SDimitry Andric // and generate a shuffle of 2 registers rather than the reordering of the 51481ad6265SDimitry Andric // first register and then shuffle with the second register. Next, 51581ad6265SDimitry Andric // generate the shuffles of the resulting register + the remaining 51681ad6265SDimitry Andric // registers from the list. 51781ad6265SDimitry Andric auto &&CombineMasks = [](MutableArrayRef<int> FirstMask, 51881ad6265SDimitry Andric ArrayRef<int> SecondMask) { 51981ad6265SDimitry Andric for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { 52006c3fb27SDimitry Andric if (SecondMask[Idx] != PoisonMaskElem) { 52106c3fb27SDimitry Andric assert(FirstMask[Idx] == PoisonMaskElem && 52281ad6265SDimitry Andric "Expected undefined mask element."); 52381ad6265SDimitry Andric FirstMask[Idx] = SecondMask[Idx] + VF; 52481ad6265SDimitry Andric } 52581ad6265SDimitry Andric } 52681ad6265SDimitry Andric }; 52781ad6265SDimitry Andric auto &&NormalizeMask = [](MutableArrayRef<int> Mask) { 52881ad6265SDimitry Andric for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 52906c3fb27SDimitry Andric if (Mask[Idx] != PoisonMaskElem) 53081ad6265SDimitry Andric Mask[Idx] = Idx; 53181ad6265SDimitry Andric } 53281ad6265SDimitry Andric }; 53381ad6265SDimitry Andric int SecondIdx; 53481ad6265SDimitry Andric do { 53581ad6265SDimitry Andric int FirstIdx = -1; 53681ad6265SDimitry Andric SecondIdx = -1; 53781ad6265SDimitry Andric MutableArrayRef<int> FirstMask, SecondMask; 53881ad6265SDimitry Andric for (unsigned I = 0; I < NumOfDestRegs; ++I) { 53981ad6265SDimitry Andric SmallVectorImpl<int> &RegMask = Dest[I]; 54081ad6265SDimitry Andric if (RegMask.empty()) 54181ad6265SDimitry Andric continue; 54281ad6265SDimitry Andric 54381ad6265SDimitry Andric if (FirstIdx == SecondIdx) { 54481ad6265SDimitry Andric FirstIdx = I; 54581ad6265SDimitry Andric FirstMask = RegMask; 54681ad6265SDimitry Andric continue; 54781ad6265SDimitry Andric } 54881ad6265SDimitry Andric SecondIdx = I; 54981ad6265SDimitry Andric SecondMask = RegMask; 55081ad6265SDimitry Andric CombineMasks(FirstMask, SecondMask); 55181ad6265SDimitry Andric ManyInputsAction(FirstMask, FirstIdx, SecondIdx); 55281ad6265SDimitry Andric NormalizeMask(FirstMask); 55381ad6265SDimitry Andric RegMask.clear(); 55481ad6265SDimitry Andric SecondMask = FirstMask; 55581ad6265SDimitry Andric SecondIdx = FirstIdx; 55681ad6265SDimitry Andric } 55781ad6265SDimitry Andric if (FirstIdx != SecondIdx && SecondIdx >= 0) { 55881ad6265SDimitry Andric CombineMasks(SecondMask, FirstMask); 55981ad6265SDimitry Andric ManyInputsAction(SecondMask, SecondIdx, FirstIdx); 56081ad6265SDimitry Andric Dest[FirstIdx].clear(); 56181ad6265SDimitry Andric NormalizeMask(SecondMask); 56281ad6265SDimitry Andric } 56381ad6265SDimitry Andric } while (SecondIdx >= 0); 56481ad6265SDimitry Andric break; 56581ad6265SDimitry Andric } 56681ad6265SDimitry Andric } 56781ad6265SDimitry Andric } 56881ad6265SDimitry Andric } 56981ad6265SDimitry Andric 570*0fca6ea1SDimitry Andric void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, 571*0fca6ea1SDimitry Andric const APInt &DemandedElts, 572*0fca6ea1SDimitry Andric APInt &DemandedLHS, 573*0fca6ea1SDimitry Andric APInt &DemandedRHS) { 574*0fca6ea1SDimitry Andric assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported"); 575*0fca6ea1SDimitry Andric int NumLanes = VectorBitWidth / 128; 576*0fca6ea1SDimitry Andric int NumElts = DemandedElts.getBitWidth(); 577*0fca6ea1SDimitry Andric int NumEltsPerLane = NumElts / NumLanes; 578*0fca6ea1SDimitry Andric int HalfEltsPerLane = NumEltsPerLane / 2; 579*0fca6ea1SDimitry Andric 580*0fca6ea1SDimitry Andric DemandedLHS = APInt::getZero(NumElts); 581*0fca6ea1SDimitry Andric DemandedRHS = APInt::getZero(NumElts); 582*0fca6ea1SDimitry Andric 583*0fca6ea1SDimitry Andric // Map DemandedElts to the horizontal operands. 584*0fca6ea1SDimitry Andric for (int Idx = 0; Idx != NumElts; ++Idx) { 585*0fca6ea1SDimitry Andric if (!DemandedElts[Idx]) 586*0fca6ea1SDimitry Andric continue; 587*0fca6ea1SDimitry Andric int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane; 588*0fca6ea1SDimitry Andric int LocalIdx = Idx % NumEltsPerLane; 589*0fca6ea1SDimitry Andric if (LocalIdx < HalfEltsPerLane) { 590*0fca6ea1SDimitry Andric DemandedLHS.setBit(LaneIdx + 2 * LocalIdx); 591*0fca6ea1SDimitry Andric } else { 592*0fca6ea1SDimitry Andric LocalIdx -= HalfEltsPerLane; 593*0fca6ea1SDimitry Andric DemandedRHS.setBit(LaneIdx + 2 * LocalIdx); 594*0fca6ea1SDimitry Andric } 595*0fca6ea1SDimitry Andric } 596*0fca6ea1SDimitry Andric } 597*0fca6ea1SDimitry Andric 5980b57cec5SDimitry Andric MapVector<Instruction *, uint64_t> 5990b57cec5SDimitry Andric llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 6000b57cec5SDimitry Andric const TargetTransformInfo *TTI) { 6010b57cec5SDimitry Andric 6020b57cec5SDimitry Andric // DemandedBits will give us every value's live-out bits. But we want 6030b57cec5SDimitry Andric // to ensure no extra casts would need to be inserted, so every DAG 6040b57cec5SDimitry Andric // of connected values must have the same minimum bitwidth. 6050b57cec5SDimitry Andric EquivalenceClasses<Value *> ECs; 6060b57cec5SDimitry Andric SmallVector<Value *, 16> Worklist; 6070b57cec5SDimitry Andric SmallPtrSet<Value *, 4> Roots; 6080b57cec5SDimitry Andric SmallPtrSet<Value *, 16> Visited; 6090b57cec5SDimitry Andric DenseMap<Value *, uint64_t> DBits; 6100b57cec5SDimitry Andric SmallPtrSet<Instruction *, 4> InstructionSet; 6110b57cec5SDimitry Andric MapVector<Instruction *, uint64_t> MinBWs; 6120b57cec5SDimitry Andric 6130b57cec5SDimitry Andric // Determine the roots. We work bottom-up, from truncs or icmps. 6140b57cec5SDimitry Andric bool SeenExtFromIllegalType = false; 6150b57cec5SDimitry Andric for (auto *BB : Blocks) 6160b57cec5SDimitry Andric for (auto &I : *BB) { 6170b57cec5SDimitry Andric InstructionSet.insert(&I); 6180b57cec5SDimitry Andric 6190b57cec5SDimitry Andric if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 6200b57cec5SDimitry Andric !TTI->isTypeLegal(I.getOperand(0)->getType())) 6210b57cec5SDimitry Andric SeenExtFromIllegalType = true; 6220b57cec5SDimitry Andric 6230b57cec5SDimitry Andric // Only deal with non-vector integers up to 64-bits wide. 6240b57cec5SDimitry Andric if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 6250b57cec5SDimitry Andric !I.getType()->isVectorTy() && 6260b57cec5SDimitry Andric I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 6270b57cec5SDimitry Andric // Don't make work for ourselves. If we know the loaded type is legal, 6280b57cec5SDimitry Andric // don't add it to the worklist. 6290b57cec5SDimitry Andric if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 6300b57cec5SDimitry Andric continue; 6310b57cec5SDimitry Andric 6320b57cec5SDimitry Andric Worklist.push_back(&I); 6330b57cec5SDimitry Andric Roots.insert(&I); 6340b57cec5SDimitry Andric } 6350b57cec5SDimitry Andric } 6360b57cec5SDimitry Andric // Early exit. 6370b57cec5SDimitry Andric if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 6380b57cec5SDimitry Andric return MinBWs; 6390b57cec5SDimitry Andric 6400b57cec5SDimitry Andric // Now proceed breadth-first, unioning values together. 6410b57cec5SDimitry Andric while (!Worklist.empty()) { 6420b57cec5SDimitry Andric Value *Val = Worklist.pop_back_val(); 6430b57cec5SDimitry Andric Value *Leader = ECs.getOrInsertLeaderValue(Val); 6440b57cec5SDimitry Andric 64581ad6265SDimitry Andric if (!Visited.insert(Val).second) 6460b57cec5SDimitry Andric continue; 6470b57cec5SDimitry Andric 6480b57cec5SDimitry Andric // Non-instructions terminate a chain successfully. 6490b57cec5SDimitry Andric if (!isa<Instruction>(Val)) 6500b57cec5SDimitry Andric continue; 6510b57cec5SDimitry Andric Instruction *I = cast<Instruction>(Val); 6520b57cec5SDimitry Andric 6530b57cec5SDimitry Andric // If we encounter a type that is larger than 64 bits, we can't represent 6540b57cec5SDimitry Andric // it so bail out. 6550b57cec5SDimitry Andric if (DB.getDemandedBits(I).getBitWidth() > 64) 6560b57cec5SDimitry Andric return MapVector<Instruction *, uint64_t>(); 6570b57cec5SDimitry Andric 6580b57cec5SDimitry Andric uint64_t V = DB.getDemandedBits(I).getZExtValue(); 6590b57cec5SDimitry Andric DBits[Leader] |= V; 6600b57cec5SDimitry Andric DBits[I] = V; 6610b57cec5SDimitry Andric 6620b57cec5SDimitry Andric // Casts, loads and instructions outside of our range terminate a chain 6630b57cec5SDimitry Andric // successfully. 6640b57cec5SDimitry Andric if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 6650b57cec5SDimitry Andric !InstructionSet.count(I)) 6660b57cec5SDimitry Andric continue; 6670b57cec5SDimitry Andric 6680b57cec5SDimitry Andric // Unsafe casts terminate a chain unsuccessfully. We can't do anything 6690b57cec5SDimitry Andric // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 6700b57cec5SDimitry Andric // transform anything that relies on them. 6710b57cec5SDimitry Andric if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 6720b57cec5SDimitry Andric !I->getType()->isIntegerTy()) { 6730b57cec5SDimitry Andric DBits[Leader] |= ~0ULL; 6740b57cec5SDimitry Andric continue; 6750b57cec5SDimitry Andric } 6760b57cec5SDimitry Andric 6770b57cec5SDimitry Andric // We don't modify the types of PHIs. Reductions will already have been 6780b57cec5SDimitry Andric // truncated if possible, and inductions' sizes will have been chosen by 6790b57cec5SDimitry Andric // indvars. 6800b57cec5SDimitry Andric if (isa<PHINode>(I)) 6810b57cec5SDimitry Andric continue; 6820b57cec5SDimitry Andric 6830b57cec5SDimitry Andric if (DBits[Leader] == ~0ULL) 6840b57cec5SDimitry Andric // All bits demanded, no point continuing. 6850b57cec5SDimitry Andric continue; 6860b57cec5SDimitry Andric 6870b57cec5SDimitry Andric for (Value *O : cast<User>(I)->operands()) { 6880b57cec5SDimitry Andric ECs.unionSets(Leader, O); 6890b57cec5SDimitry Andric Worklist.push_back(O); 6900b57cec5SDimitry Andric } 6910b57cec5SDimitry Andric } 6920b57cec5SDimitry Andric 6930b57cec5SDimitry Andric // Now we've discovered all values, walk them to see if there are 6940b57cec5SDimitry Andric // any users we didn't see. If there are, we can't optimize that 6950b57cec5SDimitry Andric // chain. 6960b57cec5SDimitry Andric for (auto &I : DBits) 6970b57cec5SDimitry Andric for (auto *U : I.first->users()) 6980b57cec5SDimitry Andric if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 6990b57cec5SDimitry Andric DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 7000b57cec5SDimitry Andric 7010b57cec5SDimitry Andric for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 7020b57cec5SDimitry Andric uint64_t LeaderDemandedBits = 0; 703fe6060f1SDimitry Andric for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 704fe6060f1SDimitry Andric LeaderDemandedBits |= DBits[M]; 7050b57cec5SDimitry Andric 70606c3fb27SDimitry Andric uint64_t MinBW = llvm::bit_width(LeaderDemandedBits); 7070b57cec5SDimitry Andric // Round up to a power of 2 70806c3fb27SDimitry Andric MinBW = llvm::bit_ceil(MinBW); 7090b57cec5SDimitry Andric 7100b57cec5SDimitry Andric // We don't modify the types of PHIs. Reductions will already have been 7110b57cec5SDimitry Andric // truncated if possible, and inductions' sizes will have been chosen by 7120b57cec5SDimitry Andric // indvars. 7130b57cec5SDimitry Andric // If we are required to shrink a PHI, abandon this entire equivalence class. 7140b57cec5SDimitry Andric bool Abort = false; 715fe6060f1SDimitry Andric for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 716fe6060f1SDimitry Andric if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) { 7170b57cec5SDimitry Andric Abort = true; 7180b57cec5SDimitry Andric break; 7190b57cec5SDimitry Andric } 7200b57cec5SDimitry Andric if (Abort) 7210b57cec5SDimitry Andric continue; 7220b57cec5SDimitry Andric 723fe6060f1SDimitry Andric for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) { 72406c3fb27SDimitry Andric auto *MI = dyn_cast<Instruction>(M); 72506c3fb27SDimitry Andric if (!MI) 7260b57cec5SDimitry Andric continue; 727fe6060f1SDimitry Andric Type *Ty = M->getType(); 728fe6060f1SDimitry Andric if (Roots.count(M)) 72906c3fb27SDimitry Andric Ty = MI->getOperand(0)->getType(); 73006c3fb27SDimitry Andric 73106c3fb27SDimitry Andric if (MinBW >= Ty->getScalarSizeInBits()) 73206c3fb27SDimitry Andric continue; 73306c3fb27SDimitry Andric 73406c3fb27SDimitry Andric // If any of M's operands demand more bits than MinBW then M cannot be 73506c3fb27SDimitry Andric // performed safely in MinBW. 73606c3fb27SDimitry Andric if (any_of(MI->operands(), [&DB, MinBW](Use &U) { 73706c3fb27SDimitry Andric auto *CI = dyn_cast<ConstantInt>(U); 73806c3fb27SDimitry Andric // For constants shift amounts, check if the shift would result in 73906c3fb27SDimitry Andric // poison. 74006c3fb27SDimitry Andric if (CI && 74106c3fb27SDimitry Andric isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) && 74206c3fb27SDimitry Andric U.getOperandNo() == 1) 74306c3fb27SDimitry Andric return CI->uge(MinBW); 74406c3fb27SDimitry Andric uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue()); 74506c3fb27SDimitry Andric return bit_ceil(BW) > MinBW; 74606c3fb27SDimitry Andric })) 74706c3fb27SDimitry Andric continue; 74806c3fb27SDimitry Andric 74906c3fb27SDimitry Andric MinBWs[MI] = MinBW; 7500b57cec5SDimitry Andric } 7510b57cec5SDimitry Andric } 7520b57cec5SDimitry Andric 7530b57cec5SDimitry Andric return MinBWs; 7540b57cec5SDimitry Andric } 7550b57cec5SDimitry Andric 7560b57cec5SDimitry Andric /// Add all access groups in @p AccGroups to @p List. 7570b57cec5SDimitry Andric template <typename ListT> 7580b57cec5SDimitry Andric static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 7590b57cec5SDimitry Andric // Interpret an access group as a list containing itself. 7600b57cec5SDimitry Andric if (AccGroups->getNumOperands() == 0) { 7610b57cec5SDimitry Andric assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 7620b57cec5SDimitry Andric List.insert(AccGroups); 7630b57cec5SDimitry Andric return; 7640b57cec5SDimitry Andric } 7650b57cec5SDimitry Andric 766fcaf7f86SDimitry Andric for (const auto &AccGroupListOp : AccGroups->operands()) { 7670b57cec5SDimitry Andric auto *Item = cast<MDNode>(AccGroupListOp.get()); 7680b57cec5SDimitry Andric assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 7690b57cec5SDimitry Andric List.insert(Item); 7700b57cec5SDimitry Andric } 7710b57cec5SDimitry Andric } 7720b57cec5SDimitry Andric 7730b57cec5SDimitry Andric MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 7740b57cec5SDimitry Andric if (!AccGroups1) 7750b57cec5SDimitry Andric return AccGroups2; 7760b57cec5SDimitry Andric if (!AccGroups2) 7770b57cec5SDimitry Andric return AccGroups1; 7780b57cec5SDimitry Andric if (AccGroups1 == AccGroups2) 7790b57cec5SDimitry Andric return AccGroups1; 7800b57cec5SDimitry Andric 7810b57cec5SDimitry Andric SmallSetVector<Metadata *, 4> Union; 7820b57cec5SDimitry Andric addToAccessGroupList(Union, AccGroups1); 7830b57cec5SDimitry Andric addToAccessGroupList(Union, AccGroups2); 7840b57cec5SDimitry Andric 7850b57cec5SDimitry Andric if (Union.size() == 0) 7860b57cec5SDimitry Andric return nullptr; 7870b57cec5SDimitry Andric if (Union.size() == 1) 7880b57cec5SDimitry Andric return cast<MDNode>(Union.front()); 7890b57cec5SDimitry Andric 7900b57cec5SDimitry Andric LLVMContext &Ctx = AccGroups1->getContext(); 7910b57cec5SDimitry Andric return MDNode::get(Ctx, Union.getArrayRef()); 7920b57cec5SDimitry Andric } 7930b57cec5SDimitry Andric 7940b57cec5SDimitry Andric MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 7950b57cec5SDimitry Andric const Instruction *Inst2) { 7960b57cec5SDimitry Andric bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 7970b57cec5SDimitry Andric bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 7980b57cec5SDimitry Andric 7990b57cec5SDimitry Andric if (!MayAccessMem1 && !MayAccessMem2) 8000b57cec5SDimitry Andric return nullptr; 8010b57cec5SDimitry Andric if (!MayAccessMem1) 8020b57cec5SDimitry Andric return Inst2->getMetadata(LLVMContext::MD_access_group); 8030b57cec5SDimitry Andric if (!MayAccessMem2) 8040b57cec5SDimitry Andric return Inst1->getMetadata(LLVMContext::MD_access_group); 8050b57cec5SDimitry Andric 8060b57cec5SDimitry Andric MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 8070b57cec5SDimitry Andric MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 8080b57cec5SDimitry Andric if (!MD1 || !MD2) 8090b57cec5SDimitry Andric return nullptr; 8100b57cec5SDimitry Andric if (MD1 == MD2) 8110b57cec5SDimitry Andric return MD1; 8120b57cec5SDimitry Andric 8130b57cec5SDimitry Andric // Use set for scalable 'contains' check. 8140b57cec5SDimitry Andric SmallPtrSet<Metadata *, 4> AccGroupSet2; 8150b57cec5SDimitry Andric addToAccessGroupList(AccGroupSet2, MD2); 8160b57cec5SDimitry Andric 8170b57cec5SDimitry Andric SmallVector<Metadata *, 4> Intersection; 8180b57cec5SDimitry Andric if (MD1->getNumOperands() == 0) { 8190b57cec5SDimitry Andric assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 8200b57cec5SDimitry Andric if (AccGroupSet2.count(MD1)) 8210b57cec5SDimitry Andric Intersection.push_back(MD1); 8220b57cec5SDimitry Andric } else { 8230b57cec5SDimitry Andric for (const MDOperand &Node : MD1->operands()) { 8240b57cec5SDimitry Andric auto *Item = cast<MDNode>(Node.get()); 8250b57cec5SDimitry Andric assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 8260b57cec5SDimitry Andric if (AccGroupSet2.count(Item)) 8270b57cec5SDimitry Andric Intersection.push_back(Item); 8280b57cec5SDimitry Andric } 8290b57cec5SDimitry Andric } 8300b57cec5SDimitry Andric 8310b57cec5SDimitry Andric if (Intersection.size() == 0) 8320b57cec5SDimitry Andric return nullptr; 8330b57cec5SDimitry Andric if (Intersection.size() == 1) 8340b57cec5SDimitry Andric return cast<MDNode>(Intersection.front()); 8350b57cec5SDimitry Andric 8360b57cec5SDimitry Andric LLVMContext &Ctx = Inst1->getContext(); 8370b57cec5SDimitry Andric return MDNode::get(Ctx, Intersection); 8380b57cec5SDimitry Andric } 8390b57cec5SDimitry Andric 8400b57cec5SDimitry Andric /// \returns \p I after propagating metadata from \p VL. 8410b57cec5SDimitry Andric Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 842fe6060f1SDimitry Andric if (VL.empty()) 843fe6060f1SDimitry Andric return Inst; 8440b57cec5SDimitry Andric Instruction *I0 = cast<Instruction>(VL[0]); 8450b57cec5SDimitry Andric SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 8460b57cec5SDimitry Andric I0->getAllMetadataOtherThanDebugLoc(Metadata); 8470b57cec5SDimitry Andric 8480b57cec5SDimitry Andric for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 8490b57cec5SDimitry Andric LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 8500b57cec5SDimitry Andric LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 851*0fca6ea1SDimitry Andric LLVMContext::MD_access_group, LLVMContext::MD_mmra}) { 8520b57cec5SDimitry Andric MDNode *MD = I0->getMetadata(Kind); 8530b57cec5SDimitry Andric for (int J = 1, E = VL.size(); MD && J != E; ++J) { 8540b57cec5SDimitry Andric const Instruction *IJ = cast<Instruction>(VL[J]); 8550b57cec5SDimitry Andric MDNode *IMD = IJ->getMetadata(Kind); 856*0fca6ea1SDimitry Andric 8570b57cec5SDimitry Andric switch (Kind) { 858*0fca6ea1SDimitry Andric case LLVMContext::MD_mmra: { 859*0fca6ea1SDimitry Andric MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD); 860*0fca6ea1SDimitry Andric break; 861*0fca6ea1SDimitry Andric } 8620b57cec5SDimitry Andric case LLVMContext::MD_tbaa: 8630b57cec5SDimitry Andric MD = MDNode::getMostGenericTBAA(MD, IMD); 8640b57cec5SDimitry Andric break; 8650b57cec5SDimitry Andric case LLVMContext::MD_alias_scope: 8660b57cec5SDimitry Andric MD = MDNode::getMostGenericAliasScope(MD, IMD); 8670b57cec5SDimitry Andric break; 8680b57cec5SDimitry Andric case LLVMContext::MD_fpmath: 8690b57cec5SDimitry Andric MD = MDNode::getMostGenericFPMath(MD, IMD); 8700b57cec5SDimitry Andric break; 8710b57cec5SDimitry Andric case LLVMContext::MD_noalias: 8720b57cec5SDimitry Andric case LLVMContext::MD_nontemporal: 8730b57cec5SDimitry Andric case LLVMContext::MD_invariant_load: 8740b57cec5SDimitry Andric MD = MDNode::intersect(MD, IMD); 8750b57cec5SDimitry Andric break; 8760b57cec5SDimitry Andric case LLVMContext::MD_access_group: 8770b57cec5SDimitry Andric MD = intersectAccessGroups(Inst, IJ); 8780b57cec5SDimitry Andric break; 8790b57cec5SDimitry Andric default: 8800b57cec5SDimitry Andric llvm_unreachable("unhandled metadata"); 8810b57cec5SDimitry Andric } 8820b57cec5SDimitry Andric } 8830b57cec5SDimitry Andric 8840b57cec5SDimitry Andric Inst->setMetadata(Kind, MD); 8850b57cec5SDimitry Andric } 8860b57cec5SDimitry Andric 8870b57cec5SDimitry Andric return Inst; 8880b57cec5SDimitry Andric } 8890b57cec5SDimitry Andric 8900b57cec5SDimitry Andric Constant * 8915ffd83dbSDimitry Andric llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, 8920b57cec5SDimitry Andric const InterleaveGroup<Instruction> &Group) { 8930b57cec5SDimitry Andric // All 1's means mask is not needed. 8940b57cec5SDimitry Andric if (Group.getNumMembers() == Group.getFactor()) 8950b57cec5SDimitry Andric return nullptr; 8960b57cec5SDimitry Andric 8970b57cec5SDimitry Andric // TODO: support reversed access. 8980b57cec5SDimitry Andric assert(!Group.isReverse() && "Reversed group not supported."); 8990b57cec5SDimitry Andric 9000b57cec5SDimitry Andric SmallVector<Constant *, 16> Mask; 9010b57cec5SDimitry Andric for (unsigned i = 0; i < VF; i++) 9020b57cec5SDimitry Andric for (unsigned j = 0; j < Group.getFactor(); ++j) { 9030b57cec5SDimitry Andric unsigned HasMember = Group.getMember(j) ? 1 : 0; 9040b57cec5SDimitry Andric Mask.push_back(Builder.getInt1(HasMember)); 9050b57cec5SDimitry Andric } 9060b57cec5SDimitry Andric 9070b57cec5SDimitry Andric return ConstantVector::get(Mask); 9080b57cec5SDimitry Andric } 9090b57cec5SDimitry Andric 9105ffd83dbSDimitry Andric llvm::SmallVector<int, 16> 9115ffd83dbSDimitry Andric llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { 9125ffd83dbSDimitry Andric SmallVector<int, 16> MaskVec; 9130b57cec5SDimitry Andric for (unsigned i = 0; i < VF; i++) 9140b57cec5SDimitry Andric for (unsigned j = 0; j < ReplicationFactor; j++) 9155ffd83dbSDimitry Andric MaskVec.push_back(i); 9160b57cec5SDimitry Andric 9175ffd83dbSDimitry Andric return MaskVec; 9180b57cec5SDimitry Andric } 9190b57cec5SDimitry Andric 9205ffd83dbSDimitry Andric llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, 9210b57cec5SDimitry Andric unsigned NumVecs) { 9225ffd83dbSDimitry Andric SmallVector<int, 16> Mask; 9230b57cec5SDimitry Andric for (unsigned i = 0; i < VF; i++) 9240b57cec5SDimitry Andric for (unsigned j = 0; j < NumVecs; j++) 9255ffd83dbSDimitry Andric Mask.push_back(j * VF + i); 9260b57cec5SDimitry Andric 9275ffd83dbSDimitry Andric return Mask; 9280b57cec5SDimitry Andric } 9290b57cec5SDimitry Andric 9305ffd83dbSDimitry Andric llvm::SmallVector<int, 16> 9315ffd83dbSDimitry Andric llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { 9325ffd83dbSDimitry Andric SmallVector<int, 16> Mask; 9330b57cec5SDimitry Andric for (unsigned i = 0; i < VF; i++) 9345ffd83dbSDimitry Andric Mask.push_back(Start + i * Stride); 9350b57cec5SDimitry Andric 9365ffd83dbSDimitry Andric return Mask; 9370b57cec5SDimitry Andric } 9380b57cec5SDimitry Andric 9395ffd83dbSDimitry Andric llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, 9405ffd83dbSDimitry Andric unsigned NumInts, 9415ffd83dbSDimitry Andric unsigned NumUndefs) { 9425ffd83dbSDimitry Andric SmallVector<int, 16> Mask; 9430b57cec5SDimitry Andric for (unsigned i = 0; i < NumInts; i++) 9445ffd83dbSDimitry Andric Mask.push_back(Start + i); 9450b57cec5SDimitry Andric 9460b57cec5SDimitry Andric for (unsigned i = 0; i < NumUndefs; i++) 9475ffd83dbSDimitry Andric Mask.push_back(-1); 9480b57cec5SDimitry Andric 9495ffd83dbSDimitry Andric return Mask; 9500b57cec5SDimitry Andric } 9510b57cec5SDimitry Andric 952349cc55cSDimitry Andric llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask, 953349cc55cSDimitry Andric unsigned NumElts) { 954349cc55cSDimitry Andric // Avoid casts in the loop and make sure we have a reasonable number. 955349cc55cSDimitry Andric int NumEltsSigned = NumElts; 956349cc55cSDimitry Andric assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count"); 957349cc55cSDimitry Andric 958349cc55cSDimitry Andric // If the mask chooses an element from operand 1, reduce it to choose from the 959349cc55cSDimitry Andric // corresponding element of operand 0. Undef mask elements are unchanged. 960349cc55cSDimitry Andric SmallVector<int, 16> UnaryMask; 961349cc55cSDimitry Andric for (int MaskElt : Mask) { 962349cc55cSDimitry Andric assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask"); 963349cc55cSDimitry Andric int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt; 964349cc55cSDimitry Andric UnaryMask.push_back(UnaryElt); 965349cc55cSDimitry Andric } 966349cc55cSDimitry Andric return UnaryMask; 967349cc55cSDimitry Andric } 968349cc55cSDimitry Andric 9690b57cec5SDimitry Andric /// A helper function for concatenating vectors. This function concatenates two 9700b57cec5SDimitry Andric /// vectors having the same element type. If the second vector has fewer 9710b57cec5SDimitry Andric /// elements than the first, it is padded with undefs. 9725ffd83dbSDimitry Andric static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, 9730b57cec5SDimitry Andric Value *V2) { 9740b57cec5SDimitry Andric VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 9750b57cec5SDimitry Andric VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 9760b57cec5SDimitry Andric assert(VecTy1 && VecTy2 && 9770b57cec5SDimitry Andric VecTy1->getScalarType() == VecTy2->getScalarType() && 9780b57cec5SDimitry Andric "Expect two vectors with the same element type"); 9790b57cec5SDimitry Andric 980e8d8bef9SDimitry Andric unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements(); 981e8d8bef9SDimitry Andric unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements(); 9820b57cec5SDimitry Andric assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 9830b57cec5SDimitry Andric 9840b57cec5SDimitry Andric if (NumElts1 > NumElts2) { 9850b57cec5SDimitry Andric // Extend with UNDEFs. 9865ffd83dbSDimitry Andric V2 = Builder.CreateShuffleVector( 987e8d8bef9SDimitry Andric V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); 9880b57cec5SDimitry Andric } 9890b57cec5SDimitry Andric 9905ffd83dbSDimitry Andric return Builder.CreateShuffleVector( 9915ffd83dbSDimitry Andric V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); 9920b57cec5SDimitry Andric } 9930b57cec5SDimitry Andric 9945ffd83dbSDimitry Andric Value *llvm::concatenateVectors(IRBuilderBase &Builder, 9955ffd83dbSDimitry Andric ArrayRef<Value *> Vecs) { 9960b57cec5SDimitry Andric unsigned NumVecs = Vecs.size(); 9970b57cec5SDimitry Andric assert(NumVecs > 1 && "Should be at least two vectors"); 9980b57cec5SDimitry Andric 9990b57cec5SDimitry Andric SmallVector<Value *, 8> ResList; 10000b57cec5SDimitry Andric ResList.append(Vecs.begin(), Vecs.end()); 10010b57cec5SDimitry Andric do { 10020b57cec5SDimitry Andric SmallVector<Value *, 8> TmpList; 10030b57cec5SDimitry Andric for (unsigned i = 0; i < NumVecs - 1; i += 2) { 10040b57cec5SDimitry Andric Value *V0 = ResList[i], *V1 = ResList[i + 1]; 10050b57cec5SDimitry Andric assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 10060b57cec5SDimitry Andric "Only the last vector may have a different type"); 10070b57cec5SDimitry Andric 10080b57cec5SDimitry Andric TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 10090b57cec5SDimitry Andric } 10100b57cec5SDimitry Andric 10110b57cec5SDimitry Andric // Push the last vector if the total number of vectors is odd. 10120b57cec5SDimitry Andric if (NumVecs % 2 != 0) 10130b57cec5SDimitry Andric TmpList.push_back(ResList[NumVecs - 1]); 10140b57cec5SDimitry Andric 10150b57cec5SDimitry Andric ResList = TmpList; 10160b57cec5SDimitry Andric NumVecs = ResList.size(); 10170b57cec5SDimitry Andric } while (NumVecs > 1); 10180b57cec5SDimitry Andric 10190b57cec5SDimitry Andric return ResList[0]; 10200b57cec5SDimitry Andric } 10210b57cec5SDimitry Andric 10220b57cec5SDimitry Andric bool llvm::maskIsAllZeroOrUndef(Value *Mask) { 1023e8d8bef9SDimitry Andric assert(isa<VectorType>(Mask->getType()) && 1024e8d8bef9SDimitry Andric isa<IntegerType>(Mask->getType()->getScalarType()) && 1025e8d8bef9SDimitry Andric cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1026e8d8bef9SDimitry Andric 1 && 1027e8d8bef9SDimitry Andric "Mask must be a vector of i1"); 1028e8d8bef9SDimitry Andric 10290b57cec5SDimitry Andric auto *ConstMask = dyn_cast<Constant>(Mask); 10300b57cec5SDimitry Andric if (!ConstMask) 10310b57cec5SDimitry Andric return false; 10320b57cec5SDimitry Andric if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 10330b57cec5SDimitry Andric return true; 1034e8d8bef9SDimitry Andric if (isa<ScalableVectorType>(ConstMask->getType())) 1035e8d8bef9SDimitry Andric return false; 1036e8d8bef9SDimitry Andric for (unsigned 1037e8d8bef9SDimitry Andric I = 0, 1038e8d8bef9SDimitry Andric E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 10395ffd83dbSDimitry Andric I != E; ++I) { 10400b57cec5SDimitry Andric if (auto *MaskElt = ConstMask->getAggregateElement(I)) 10410b57cec5SDimitry Andric if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 10420b57cec5SDimitry Andric continue; 10430b57cec5SDimitry Andric return false; 10440b57cec5SDimitry Andric } 10450b57cec5SDimitry Andric return true; 10460b57cec5SDimitry Andric } 10470b57cec5SDimitry Andric 10480b57cec5SDimitry Andric bool llvm::maskIsAllOneOrUndef(Value *Mask) { 1049e8d8bef9SDimitry Andric assert(isa<VectorType>(Mask->getType()) && 1050e8d8bef9SDimitry Andric isa<IntegerType>(Mask->getType()->getScalarType()) && 1051e8d8bef9SDimitry Andric cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1052e8d8bef9SDimitry Andric 1 && 1053e8d8bef9SDimitry Andric "Mask must be a vector of i1"); 1054e8d8bef9SDimitry Andric 10550b57cec5SDimitry Andric auto *ConstMask = dyn_cast<Constant>(Mask); 10560b57cec5SDimitry Andric if (!ConstMask) 10570b57cec5SDimitry Andric return false; 10580b57cec5SDimitry Andric if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 10590b57cec5SDimitry Andric return true; 1060e8d8bef9SDimitry Andric if (isa<ScalableVectorType>(ConstMask->getType())) 1061e8d8bef9SDimitry Andric return false; 1062e8d8bef9SDimitry Andric for (unsigned 1063e8d8bef9SDimitry Andric I = 0, 1064e8d8bef9SDimitry Andric E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 10655ffd83dbSDimitry Andric I != E; ++I) { 10660b57cec5SDimitry Andric if (auto *MaskElt = ConstMask->getAggregateElement(I)) 10670b57cec5SDimitry Andric if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 10680b57cec5SDimitry Andric continue; 10690b57cec5SDimitry Andric return false; 10700b57cec5SDimitry Andric } 10710b57cec5SDimitry Andric return true; 10720b57cec5SDimitry Andric } 10730b57cec5SDimitry Andric 1074439352acSDimitry Andric bool llvm::maskContainsAllOneOrUndef(Value *Mask) { 1075439352acSDimitry Andric assert(isa<VectorType>(Mask->getType()) && 1076439352acSDimitry Andric isa<IntegerType>(Mask->getType()->getScalarType()) && 1077439352acSDimitry Andric cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1078439352acSDimitry Andric 1 && 1079439352acSDimitry Andric "Mask must be a vector of i1"); 1080439352acSDimitry Andric 1081439352acSDimitry Andric auto *ConstMask = dyn_cast<Constant>(Mask); 1082439352acSDimitry Andric if (!ConstMask) 1083439352acSDimitry Andric return false; 1084439352acSDimitry Andric if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 1085439352acSDimitry Andric return true; 1086439352acSDimitry Andric if (isa<ScalableVectorType>(ConstMask->getType())) 1087439352acSDimitry Andric return false; 1088439352acSDimitry Andric for (unsigned 1089439352acSDimitry Andric I = 0, 1090439352acSDimitry Andric E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1091439352acSDimitry Andric I != E; ++I) { 1092439352acSDimitry Andric if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1093439352acSDimitry Andric if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 1094439352acSDimitry Andric return true; 1095439352acSDimitry Andric } 1096439352acSDimitry Andric return false; 1097439352acSDimitry Andric } 1098439352acSDimitry Andric 10990b57cec5SDimitry Andric /// TODO: This is a lot like known bits, but for 11000b57cec5SDimitry Andric /// vectors. Is there something we can common this with? 11010b57cec5SDimitry Andric APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 1102e8d8bef9SDimitry Andric assert(isa<FixedVectorType>(Mask->getType()) && 1103e8d8bef9SDimitry Andric isa<IntegerType>(Mask->getType()->getScalarType()) && 1104e8d8bef9SDimitry Andric cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1105e8d8bef9SDimitry Andric 1 && 1106e8d8bef9SDimitry Andric "Mask must be a fixed width vector of i1"); 11070b57cec5SDimitry Andric 1108e8d8bef9SDimitry Andric const unsigned VWidth = 1109e8d8bef9SDimitry Andric cast<FixedVectorType>(Mask->getType())->getNumElements(); 1110349cc55cSDimitry Andric APInt DemandedElts = APInt::getAllOnes(VWidth); 11110b57cec5SDimitry Andric if (auto *CV = dyn_cast<ConstantVector>(Mask)) 11120b57cec5SDimitry Andric for (unsigned i = 0; i < VWidth; i++) 11130b57cec5SDimitry Andric if (CV->getAggregateElement(i)->isNullValue()) 11140b57cec5SDimitry Andric DemandedElts.clearBit(i); 11150b57cec5SDimitry Andric return DemandedElts; 11160b57cec5SDimitry Andric } 11170b57cec5SDimitry Andric 11180b57cec5SDimitry Andric bool InterleavedAccessInfo::isStrided(int Stride) { 11190b57cec5SDimitry Andric unsigned Factor = std::abs(Stride); 11200b57cec5SDimitry Andric return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 11210b57cec5SDimitry Andric } 11220b57cec5SDimitry Andric 11230b57cec5SDimitry Andric void InterleavedAccessInfo::collectConstStrideAccesses( 11240b57cec5SDimitry Andric MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 112506c3fb27SDimitry Andric const DenseMap<Value*, const SCEV*> &Strides) { 1126*0fca6ea1SDimitry Andric auto &DL = TheLoop->getHeader()->getDataLayout(); 11270b57cec5SDimitry Andric 11280b57cec5SDimitry Andric // Since it's desired that the load/store instructions be maintained in 11290b57cec5SDimitry Andric // "program order" for the interleaved access analysis, we have to visit the 11300b57cec5SDimitry Andric // blocks in the loop in reverse postorder (i.e., in a topological order). 11310b57cec5SDimitry Andric // Such an ordering will ensure that any load/store that may be executed 11320b57cec5SDimitry Andric // before a second load/store will precede the second load/store in 11330b57cec5SDimitry Andric // AccessStrideInfo. 11340b57cec5SDimitry Andric LoopBlocksDFS DFS(TheLoop); 11350b57cec5SDimitry Andric DFS.perform(LI); 11360b57cec5SDimitry Andric for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 11370b57cec5SDimitry Andric for (auto &I : *BB) { 11380b57cec5SDimitry Andric Value *Ptr = getLoadStorePointerOperand(&I); 1139fe6060f1SDimitry Andric if (!Ptr) 1140fe6060f1SDimitry Andric continue; 1141fe6060f1SDimitry Andric Type *ElementTy = getLoadStoreType(&I); 1142fe6060f1SDimitry Andric 1143f3fd488fSDimitry Andric // Currently, codegen doesn't support cases where the type size doesn't 1144f3fd488fSDimitry Andric // match the alloc size. Skip them for now. 1145f3fd488fSDimitry Andric uint64_t Size = DL.getTypeAllocSize(ElementTy); 1146f3fd488fSDimitry Andric if (Size * 8 != DL.getTypeSizeInBits(ElementTy)) 1147f3fd488fSDimitry Andric continue; 1148f3fd488fSDimitry Andric 11490b57cec5SDimitry Andric // We don't check wrapping here because we don't know yet if Ptr will be 11500b57cec5SDimitry Andric // part of a full group or a group with gaps. Checking wrapping for all 11510b57cec5SDimitry Andric // pointers (even those that end up in groups with no gaps) will be overly 11520b57cec5SDimitry Andric // conservative. For full groups, wrapping should be ok since if we would 11530b57cec5SDimitry Andric // wrap around the address space we would do a memory access at nullptr 11540b57cec5SDimitry Andric // even without the transformation. The wrapping checks are therefore 11550b57cec5SDimitry Andric // deferred until after we've formed the interleaved groups. 1156bdd1243dSDimitry Andric int64_t Stride = 1157bdd1243dSDimitry Andric getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides, 1158bdd1243dSDimitry Andric /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0); 11590b57cec5SDimitry Andric 11600b57cec5SDimitry Andric const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 11615ffd83dbSDimitry Andric AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, 11625ffd83dbSDimitry Andric getLoadStoreAlignment(&I)); 11630b57cec5SDimitry Andric } 11640b57cec5SDimitry Andric } 11650b57cec5SDimitry Andric 11660b57cec5SDimitry Andric // Analyze interleaved accesses and collect them into interleaved load and 11670b57cec5SDimitry Andric // store groups. 11680b57cec5SDimitry Andric // 11690b57cec5SDimitry Andric // When generating code for an interleaved load group, we effectively hoist all 11700b57cec5SDimitry Andric // loads in the group to the location of the first load in program order. When 11710b57cec5SDimitry Andric // generating code for an interleaved store group, we sink all stores to the 11720b57cec5SDimitry Andric // location of the last store. This code motion can change the order of load 11730b57cec5SDimitry Andric // and store instructions and may break dependences. 11740b57cec5SDimitry Andric // 11750b57cec5SDimitry Andric // The code generation strategy mentioned above ensures that we won't violate 11760b57cec5SDimitry Andric // any write-after-read (WAR) dependences. 11770b57cec5SDimitry Andric // 11780b57cec5SDimitry Andric // E.g., for the WAR dependence: a = A[i]; // (1) 11790b57cec5SDimitry Andric // A[i] = b; // (2) 11800b57cec5SDimitry Andric // 11810b57cec5SDimitry Andric // The store group of (2) is always inserted at or below (2), and the load 11820b57cec5SDimitry Andric // group of (1) is always inserted at or above (1). Thus, the instructions will 11830b57cec5SDimitry Andric // never be reordered. All other dependences are checked to ensure the 11840b57cec5SDimitry Andric // correctness of the instruction reordering. 11850b57cec5SDimitry Andric // 11860b57cec5SDimitry Andric // The algorithm visits all memory accesses in the loop in bottom-up program 11870b57cec5SDimitry Andric // order. Program order is established by traversing the blocks in the loop in 11880b57cec5SDimitry Andric // reverse postorder when collecting the accesses. 11890b57cec5SDimitry Andric // 11900b57cec5SDimitry Andric // We visit the memory accesses in bottom-up order because it can simplify the 11910b57cec5SDimitry Andric // construction of store groups in the presence of write-after-write (WAW) 11920b57cec5SDimitry Andric // dependences. 11930b57cec5SDimitry Andric // 11940b57cec5SDimitry Andric // E.g., for the WAW dependence: A[i] = a; // (1) 11950b57cec5SDimitry Andric // A[i] = b; // (2) 11960b57cec5SDimitry Andric // A[i + 1] = c; // (3) 11970b57cec5SDimitry Andric // 11980b57cec5SDimitry Andric // We will first create a store group with (3) and (2). (1) can't be added to 11990b57cec5SDimitry Andric // this group because it and (2) are dependent. However, (1) can be grouped 12000b57cec5SDimitry Andric // with other accesses that may precede it in program order. Note that a 12010b57cec5SDimitry Andric // bottom-up order does not imply that WAW dependences should not be checked. 12020b57cec5SDimitry Andric void InterleavedAccessInfo::analyzeInterleaving( 12030b57cec5SDimitry Andric bool EnablePredicatedInterleavedMemAccesses) { 12040b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 120506c3fb27SDimitry Andric const auto &Strides = LAI->getSymbolicStrides(); 12060b57cec5SDimitry Andric 12070b57cec5SDimitry Andric // Holds all accesses with a constant stride. 12080b57cec5SDimitry Andric MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 12090b57cec5SDimitry Andric collectConstStrideAccesses(AccessStrideInfo, Strides); 12100b57cec5SDimitry Andric 12110b57cec5SDimitry Andric if (AccessStrideInfo.empty()) 12120b57cec5SDimitry Andric return; 12130b57cec5SDimitry Andric 12140b57cec5SDimitry Andric // Collect the dependences in the loop. 12150b57cec5SDimitry Andric collectDependences(); 12160b57cec5SDimitry Andric 12170b57cec5SDimitry Andric // Holds all interleaved store groups temporarily. 12180b57cec5SDimitry Andric SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 12190b57cec5SDimitry Andric // Holds all interleaved load groups temporarily. 12200b57cec5SDimitry Andric SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 122106c3fb27SDimitry Andric // Groups added to this set cannot have new members added. 122206c3fb27SDimitry Andric SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups; 12230b57cec5SDimitry Andric 12240b57cec5SDimitry Andric // Search in bottom-up program order for pairs of accesses (A and B) that can 12250b57cec5SDimitry Andric // form interleaved load or store groups. In the algorithm below, access A 12260b57cec5SDimitry Andric // precedes access B in program order. We initialize a group for B in the 12270b57cec5SDimitry Andric // outer loop of the algorithm, and then in the inner loop, we attempt to 12280b57cec5SDimitry Andric // insert each A into B's group if: 12290b57cec5SDimitry Andric // 12300b57cec5SDimitry Andric // 1. A and B have the same stride, 12310b57cec5SDimitry Andric // 2. A and B have the same memory object size, and 12320b57cec5SDimitry Andric // 3. A belongs in B's group according to its distance from B. 12330b57cec5SDimitry Andric // 12340b57cec5SDimitry Andric // Special care is taken to ensure group formation will not break any 12350b57cec5SDimitry Andric // dependences. 12360b57cec5SDimitry Andric for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 12370b57cec5SDimitry Andric BI != E; ++BI) { 12380b57cec5SDimitry Andric Instruction *B = BI->first; 12390b57cec5SDimitry Andric StrideDescriptor DesB = BI->second; 12400b57cec5SDimitry Andric 12410b57cec5SDimitry Andric // Initialize a group for B if it has an allowable stride. Even if we don't 12420b57cec5SDimitry Andric // create a group for B, we continue with the bottom-up algorithm to ensure 12430b57cec5SDimitry Andric // we don't break any of B's dependences. 124406c3fb27SDimitry Andric InterleaveGroup<Instruction> *GroupB = nullptr; 12450b57cec5SDimitry Andric if (isStrided(DesB.Stride) && 12460b57cec5SDimitry Andric (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 124706c3fb27SDimitry Andric GroupB = getInterleaveGroup(B); 124806c3fb27SDimitry Andric if (!GroupB) { 12490b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 12500b57cec5SDimitry Andric << '\n'); 125106c3fb27SDimitry Andric GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 12520b57cec5SDimitry Andric if (B->mayWriteToMemory()) 125306c3fb27SDimitry Andric StoreGroups.insert(GroupB); 12540b57cec5SDimitry Andric else 125506c3fb27SDimitry Andric LoadGroups.insert(GroupB); 12560b57cec5SDimitry Andric } 12575f757f3fSDimitry Andric } 12580b57cec5SDimitry Andric 12590b57cec5SDimitry Andric for (auto AI = std::next(BI); AI != E; ++AI) { 12600b57cec5SDimitry Andric Instruction *A = AI->first; 12610b57cec5SDimitry Andric StrideDescriptor DesA = AI->second; 12620b57cec5SDimitry Andric 12630b57cec5SDimitry Andric // Our code motion strategy implies that we can't have dependences 12640b57cec5SDimitry Andric // between accesses in an interleaved group and other accesses located 12650b57cec5SDimitry Andric // between the first and last member of the group. Note that this also 12660b57cec5SDimitry Andric // means that a group can't have more than one member at a given offset. 12670b57cec5SDimitry Andric // The accesses in a group can have dependences with other accesses, but 12680b57cec5SDimitry Andric // we must ensure we don't extend the boundaries of the group such that 12690b57cec5SDimitry Andric // we encompass those dependent accesses. 12700b57cec5SDimitry Andric // 12710b57cec5SDimitry Andric // For example, assume we have the sequence of accesses shown below in a 12720b57cec5SDimitry Andric // stride-2 loop: 12730b57cec5SDimitry Andric // 12740b57cec5SDimitry Andric // (1, 2) is a group | A[i] = a; // (1) 12750b57cec5SDimitry Andric // | A[i-1] = b; // (2) | 12760b57cec5SDimitry Andric // A[i-3] = c; // (3) 12770b57cec5SDimitry Andric // A[i] = d; // (4) | (2, 4) is not a group 12780b57cec5SDimitry Andric // 12790b57cec5SDimitry Andric // Because accesses (2) and (3) are dependent, we can group (2) with (1) 12800b57cec5SDimitry Andric // but not with (4). If we did, the dependent access (3) would be within 12810b57cec5SDimitry Andric // the boundaries of the (2, 4) group. 12825f757f3fSDimitry Andric auto DependentMember = [&](InterleaveGroup<Instruction> *Group, 12835f757f3fSDimitry Andric StrideEntry *A) -> Instruction * { 12845f757f3fSDimitry Andric for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) { 12855f757f3fSDimitry Andric Instruction *MemberOfGroupB = Group->getMember(Index); 12865f757f3fSDimitry Andric if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups( 12875f757f3fSDimitry Andric A, &*AccessStrideInfo.find(MemberOfGroupB))) 12885f757f3fSDimitry Andric return MemberOfGroupB; 12890b57cec5SDimitry Andric } 12905f757f3fSDimitry Andric return nullptr; 12915f757f3fSDimitry Andric }; 12925f757f3fSDimitry Andric 12935f757f3fSDimitry Andric auto GroupA = getInterleaveGroup(A); 12945f757f3fSDimitry Andric // If A is a load, dependencies are tolerable, there's nothing to do here. 12955f757f3fSDimitry Andric // If both A and B belong to the same (store) group, they are independent, 12965f757f3fSDimitry Andric // even if dependencies have not been recorded. 12975f757f3fSDimitry Andric // If both GroupA and GroupB are null, there's nothing to do here. 12985f757f3fSDimitry Andric if (A->mayWriteToMemory() && GroupA != GroupB) { 12995f757f3fSDimitry Andric Instruction *DependentInst = nullptr; 13005f757f3fSDimitry Andric // If GroupB is a load group, we have to compare AI against all 13015f757f3fSDimitry Andric // members of GroupB because if any load within GroupB has a dependency 13025f757f3fSDimitry Andric // on AI, we need to mark GroupB as complete and also release the 13035f757f3fSDimitry Andric // store GroupA (if A belongs to one). The former prevents incorrect 13045f757f3fSDimitry Andric // hoisting of load B above store A while the latter prevents incorrect 13055f757f3fSDimitry Andric // sinking of store A below load B. 13065f757f3fSDimitry Andric if (GroupB && LoadGroups.contains(GroupB)) 13075f757f3fSDimitry Andric DependentInst = DependentMember(GroupB, &*AI); 13085f757f3fSDimitry Andric else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) 13095f757f3fSDimitry Andric DependentInst = B; 13105f757f3fSDimitry Andric 13115f757f3fSDimitry Andric if (DependentInst) { 13125f757f3fSDimitry Andric // A has a store dependence on B (or on some load within GroupB) and 13135f757f3fSDimitry Andric // is part of a store group. Release A's group to prevent illegal 13145f757f3fSDimitry Andric // sinking of A below B. A will then be free to form another group 13155f757f3fSDimitry Andric // with instructions that precede it. 13165f757f3fSDimitry Andric if (GroupA && StoreGroups.contains(GroupA)) { 13175f757f3fSDimitry Andric LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 13185f757f3fSDimitry Andric "dependence between " 13195f757f3fSDimitry Andric << *A << " and " << *DependentInst << '\n'); 13205f757f3fSDimitry Andric StoreGroups.remove(GroupA); 13215f757f3fSDimitry Andric releaseGroup(GroupA); 13225f757f3fSDimitry Andric } 13235f757f3fSDimitry Andric // If B is a load and part of an interleave group, no earlier loads 13245f757f3fSDimitry Andric // can be added to B's interleave group, because this would mean the 13255f757f3fSDimitry Andric // DependentInst would move across store A. Mark the interleave group 13265f757f3fSDimitry Andric // as complete. 13275f757f3fSDimitry Andric if (GroupB && LoadGroups.contains(GroupB)) { 132806c3fb27SDimitry Andric LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B 132906c3fb27SDimitry Andric << " as complete.\n"); 133006c3fb27SDimitry Andric CompletedLoadGroups.insert(GroupB); 133106c3fb27SDimitry Andric } 13325f757f3fSDimitry Andric } 13335f757f3fSDimitry Andric } 13345f757f3fSDimitry Andric if (CompletedLoadGroups.contains(GroupB)) { 13355f757f3fSDimitry Andric // Skip trying to add A to B, continue to look for other conflicting A's 13365f757f3fSDimitry Andric // in groups to be released. 13375f757f3fSDimitry Andric continue; 13380b57cec5SDimitry Andric } 13390b57cec5SDimitry Andric 13400b57cec5SDimitry Andric // At this point, we've checked for illegal code motion. If either A or B 13410b57cec5SDimitry Andric // isn't strided, there's nothing left to do. 13420b57cec5SDimitry Andric if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 13430b57cec5SDimitry Andric continue; 13440b57cec5SDimitry Andric 13450b57cec5SDimitry Andric // Ignore A if it's already in a group or isn't the same kind of memory 13460b57cec5SDimitry Andric // operation as B. 13470b57cec5SDimitry Andric // Note that mayReadFromMemory() isn't mutually exclusive to 13480b57cec5SDimitry Andric // mayWriteToMemory in the case of atomic loads. We shouldn't see those 13490b57cec5SDimitry Andric // here, canVectorizeMemory() should have returned false - except for the 13500b57cec5SDimitry Andric // case we asked for optimization remarks. 13510b57cec5SDimitry Andric if (isInterleaved(A) || 13520b57cec5SDimitry Andric (A->mayReadFromMemory() != B->mayReadFromMemory()) || 13530b57cec5SDimitry Andric (A->mayWriteToMemory() != B->mayWriteToMemory())) 13540b57cec5SDimitry Andric continue; 13550b57cec5SDimitry Andric 13560b57cec5SDimitry Andric // Check rules 1 and 2. Ignore A if its stride or size is different from 13570b57cec5SDimitry Andric // that of B. 13580b57cec5SDimitry Andric if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 13590b57cec5SDimitry Andric continue; 13600b57cec5SDimitry Andric 13610b57cec5SDimitry Andric // Ignore A if the memory object of A and B don't belong to the same 13620b57cec5SDimitry Andric // address space 13630b57cec5SDimitry Andric if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 13640b57cec5SDimitry Andric continue; 13650b57cec5SDimitry Andric 13660b57cec5SDimitry Andric // Calculate the distance from A to B. 13670b57cec5SDimitry Andric const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 13680b57cec5SDimitry Andric PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 13690b57cec5SDimitry Andric if (!DistToB) 13700b57cec5SDimitry Andric continue; 13710b57cec5SDimitry Andric int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 13720b57cec5SDimitry Andric 13730b57cec5SDimitry Andric // Check rule 3. Ignore A if its distance to B is not a multiple of the 13740b57cec5SDimitry Andric // size. 13750b57cec5SDimitry Andric if (DistanceToB % static_cast<int64_t>(DesB.Size)) 13760b57cec5SDimitry Andric continue; 13770b57cec5SDimitry Andric 13780b57cec5SDimitry Andric // All members of a predicated interleave-group must have the same predicate, 13790b57cec5SDimitry Andric // and currently must reside in the same BB. 13800b57cec5SDimitry Andric BasicBlock *BlockA = A->getParent(); 13810b57cec5SDimitry Andric BasicBlock *BlockB = B->getParent(); 13820b57cec5SDimitry Andric if ((isPredicated(BlockA) || isPredicated(BlockB)) && 13830b57cec5SDimitry Andric (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 13840b57cec5SDimitry Andric continue; 13850b57cec5SDimitry Andric 13860b57cec5SDimitry Andric // The index of A is the index of B plus A's distance to B in multiples 13870b57cec5SDimitry Andric // of the size. 13880b57cec5SDimitry Andric int IndexA = 138906c3fb27SDimitry Andric GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 13900b57cec5SDimitry Andric 13910b57cec5SDimitry Andric // Try to insert A into B's group. 139206c3fb27SDimitry Andric if (GroupB->insertMember(A, IndexA, DesA.Alignment)) { 13930b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 13940b57cec5SDimitry Andric << " into the interleave group with" << *B 13950b57cec5SDimitry Andric << '\n'); 139606c3fb27SDimitry Andric InterleaveGroupMap[A] = GroupB; 13970b57cec5SDimitry Andric 13980b57cec5SDimitry Andric // Set the first load in program order as the insert position. 13990b57cec5SDimitry Andric if (A->mayReadFromMemory()) 140006c3fb27SDimitry Andric GroupB->setInsertPos(A); 14010b57cec5SDimitry Andric } 14020b57cec5SDimitry Andric } // Iteration over A accesses. 14030b57cec5SDimitry Andric } // Iteration over B accesses. 14040b57cec5SDimitry Andric 1405349cc55cSDimitry Andric auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group, 1406349cc55cSDimitry Andric int Index, 1407349cc55cSDimitry Andric std::string FirstOrLast) -> bool { 1408349cc55cSDimitry Andric Instruction *Member = Group->getMember(Index); 1409349cc55cSDimitry Andric assert(Member && "Group member does not exist"); 1410349cc55cSDimitry Andric Value *MemberPtr = getLoadStorePointerOperand(Member); 1411349cc55cSDimitry Andric Type *AccessTy = getLoadStoreType(Member); 1412349cc55cSDimitry Andric if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides, 1413bdd1243dSDimitry Andric /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0)) 1414349cc55cSDimitry Andric return false; 1415349cc55cSDimitry Andric LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 1416349cc55cSDimitry Andric << FirstOrLast 1417349cc55cSDimitry Andric << " group member potentially pointer-wrapping.\n"); 14180b57cec5SDimitry Andric releaseGroup(Group); 1419349cc55cSDimitry Andric return true; 1420349cc55cSDimitry Andric }; 1421349cc55cSDimitry Andric 1422349cc55cSDimitry Andric // Remove interleaved groups with gaps whose memory 14230b57cec5SDimitry Andric // accesses may wrap around. We have to revisit the getPtrStride analysis, 14240b57cec5SDimitry Andric // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 14250b57cec5SDimitry Andric // not check wrapping (see documentation there). 14260b57cec5SDimitry Andric // FORNOW we use Assume=false; 14270b57cec5SDimitry Andric // TODO: Change to Assume=true but making sure we don't exceed the threshold 14280b57cec5SDimitry Andric // of runtime SCEV assumptions checks (thereby potentially failing to 14290b57cec5SDimitry Andric // vectorize altogether). 14300b57cec5SDimitry Andric // Additional optional optimizations: 14310b57cec5SDimitry Andric // TODO: If we are peeling the loop and we know that the first pointer doesn't 14320b57cec5SDimitry Andric // wrap then we can deduce that all pointers in the group don't wrap. 14330b57cec5SDimitry Andric // This means that we can forcefully peel the loop in order to only have to 14340b57cec5SDimitry Andric // check the first pointer for no-wrap. When we'll change to use Assume=true 14350b57cec5SDimitry Andric // we'll only need at most one runtime check per interleaved group. 14360b57cec5SDimitry Andric for (auto *Group : LoadGroups) { 14370b57cec5SDimitry Andric // Case 1: A full group. Can Skip the checks; For full groups, if the wide 14380b57cec5SDimitry Andric // load would wrap around the address space we would do a memory access at 14390b57cec5SDimitry Andric // nullptr even without the transformation. 14400b57cec5SDimitry Andric if (Group->getNumMembers() == Group->getFactor()) 14410b57cec5SDimitry Andric continue; 14420b57cec5SDimitry Andric 14430b57cec5SDimitry Andric // Case 2: If first and last members of the group don't wrap this implies 14440b57cec5SDimitry Andric // that all the pointers in the group don't wrap. 14450b57cec5SDimitry Andric // So we check only group member 0 (which is always guaranteed to exist), 14460b57cec5SDimitry Andric // and group member Factor - 1; If the latter doesn't exist we rely on 14470b57cec5SDimitry Andric // peeling (if it is a non-reversed accsess -- see Case 3). 1448349cc55cSDimitry Andric if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 14490b57cec5SDimitry Andric continue; 1450349cc55cSDimitry Andric if (Group->getMember(Group->getFactor() - 1)) 1451349cc55cSDimitry Andric InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, 1452349cc55cSDimitry Andric std::string("last")); 1453349cc55cSDimitry Andric else { 14540b57cec5SDimitry Andric // Case 3: A non-reversed interleaved load group with gaps: We need 14550b57cec5SDimitry Andric // to execute at least one scalar epilogue iteration. This will ensure 14560b57cec5SDimitry Andric // we don't speculatively access memory out-of-bounds. We only need 14570b57cec5SDimitry Andric // to look for a member at index factor - 1, since every group must have 14580b57cec5SDimitry Andric // a member at index zero. 14590b57cec5SDimitry Andric if (Group->isReverse()) { 14600b57cec5SDimitry Andric LLVM_DEBUG( 14610b57cec5SDimitry Andric dbgs() << "LV: Invalidate candidate interleaved group due to " 14620b57cec5SDimitry Andric "a reverse access with gaps.\n"); 14630b57cec5SDimitry Andric releaseGroup(Group); 14640b57cec5SDimitry Andric continue; 14650b57cec5SDimitry Andric } 14660b57cec5SDimitry Andric LLVM_DEBUG( 14670b57cec5SDimitry Andric dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 14680b57cec5SDimitry Andric RequiresScalarEpilogue = true; 14690b57cec5SDimitry Andric } 14700b57cec5SDimitry Andric } 1471349cc55cSDimitry Andric 1472349cc55cSDimitry Andric for (auto *Group : StoreGroups) { 1473349cc55cSDimitry Andric // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1474349cc55cSDimitry Andric // store would wrap around the address space we would do a memory access at 1475349cc55cSDimitry Andric // nullptr even without the transformation. 1476349cc55cSDimitry Andric if (Group->getNumMembers() == Group->getFactor()) 1477349cc55cSDimitry Andric continue; 1478349cc55cSDimitry Andric 1479349cc55cSDimitry Andric // Interleave-store-group with gaps is implemented using masked wide store. 1480349cc55cSDimitry Andric // Remove interleaved store groups with gaps if 1481349cc55cSDimitry Andric // masked-interleaved-accesses are not enabled by the target. 1482349cc55cSDimitry Andric if (!EnablePredicatedInterleavedMemAccesses) { 1483349cc55cSDimitry Andric LLVM_DEBUG( 1484349cc55cSDimitry Andric dbgs() << "LV: Invalidate candidate interleaved store group due " 1485349cc55cSDimitry Andric "to gaps.\n"); 1486349cc55cSDimitry Andric releaseGroup(Group); 1487349cc55cSDimitry Andric continue; 1488349cc55cSDimitry Andric } 1489349cc55cSDimitry Andric 1490349cc55cSDimitry Andric // Case 2: If first and last members of the group don't wrap this implies 1491349cc55cSDimitry Andric // that all the pointers in the group don't wrap. 1492349cc55cSDimitry Andric // So we check only group member 0 (which is always guaranteed to exist), 1493349cc55cSDimitry Andric // and the last group member. Case 3 (scalar epilog) is not relevant for 1494349cc55cSDimitry Andric // stores with gaps, which are implemented with masked-store (rather than 1495349cc55cSDimitry Andric // speculative access, as in loads). 1496349cc55cSDimitry Andric if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 1497349cc55cSDimitry Andric continue; 1498349cc55cSDimitry Andric for (int Index = Group->getFactor() - 1; Index > 0; Index--) 1499349cc55cSDimitry Andric if (Group->getMember(Index)) { 1500349cc55cSDimitry Andric InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last")); 1501349cc55cSDimitry Andric break; 1502349cc55cSDimitry Andric } 1503349cc55cSDimitry Andric } 15040b57cec5SDimitry Andric } 15050b57cec5SDimitry Andric 15060b57cec5SDimitry Andric void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 15070b57cec5SDimitry Andric // If no group had triggered the requirement to create an epilogue loop, 15080b57cec5SDimitry Andric // there is nothing to do. 15090b57cec5SDimitry Andric if (!requiresScalarEpilogue()) 15100b57cec5SDimitry Andric return; 15110b57cec5SDimitry Andric 15125ffd83dbSDimitry Andric // Release groups requiring scalar epilogues. Note that this also removes them 15135ffd83dbSDimitry Andric // from InterleaveGroups. 1514*0fca6ea1SDimitry Andric bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) { 15155ffd83dbSDimitry Andric if (!Group->requiresScalarEpilogue()) 1516*0fca6ea1SDimitry Andric return false; 15170b57cec5SDimitry Andric LLVM_DEBUG( 15180b57cec5SDimitry Andric dbgs() 15190b57cec5SDimitry Andric << "LV: Invalidate candidate interleaved group due to gaps that " 15200b57cec5SDimitry Andric "require a scalar epilogue (not allowed under optsize) and cannot " 15210b57cec5SDimitry Andric "be masked (not enabled). \n"); 1522*0fca6ea1SDimitry Andric releaseGroupWithoutRemovingFromSet(Group); 1523*0fca6ea1SDimitry Andric return true; 1524*0fca6ea1SDimitry Andric }); 15255ffd83dbSDimitry Andric assert(ReleasedGroup && "At least one group must be invalidated, as a " 15265ffd83dbSDimitry Andric "scalar epilogue was required"); 15275ffd83dbSDimitry Andric (void)ReleasedGroup; 15280b57cec5SDimitry Andric RequiresScalarEpilogue = false; 15290b57cec5SDimitry Andric } 15300b57cec5SDimitry Andric 15310b57cec5SDimitry Andric template <typename InstT> 15320b57cec5SDimitry Andric void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 15330b57cec5SDimitry Andric llvm_unreachable("addMetadata can only be used for Instruction"); 15340b57cec5SDimitry Andric } 15350b57cec5SDimitry Andric 15360b57cec5SDimitry Andric namespace llvm { 15370b57cec5SDimitry Andric template <> 15380b57cec5SDimitry Andric void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 15390b57cec5SDimitry Andric SmallVector<Value *, 4> VL; 15400b57cec5SDimitry Andric std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 15410b57cec5SDimitry Andric [](std::pair<int, Instruction *> p) { return p.second; }); 15420b57cec5SDimitry Andric propagateMetadata(NewInst, VL); 15430b57cec5SDimitry Andric } 15447a6dacacSDimitry Andric } // namespace llvm 1545