xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp (revision 6c8f41d3367476d35ac730abf9f980291737193b)
1 //===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "VPlanUtils.h"
10 #include "VPlanPatternMatch.h"
11 #include "llvm/ADT/TypeSwitch.h"
12 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
13 
14 using namespace llvm;
15 
16 bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
17   return all_of(Def->users(),
18                 [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Def); });
19 }
20 
21 bool vputils::onlyFirstPartUsed(const VPValue *Def) {
22   return all_of(Def->users(),
23                 [Def](const VPUser *U) { return U->onlyFirstPartUsed(Def); });
24 }
25 
26 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
27                                                 ScalarEvolution &SE) {
28   if (auto *Expanded = Plan.getSCEVExpansion(Expr))
29     return Expanded;
30   VPValue *Expanded = nullptr;
31   if (auto *E = dyn_cast<SCEVConstant>(Expr))
32     Expanded = Plan.getOrAddLiveIn(E->getValue());
33   else if (auto *E = dyn_cast<SCEVUnknown>(Expr))
34     Expanded = Plan.getOrAddLiveIn(E->getValue());
35   else {
36     Expanded = new VPExpandSCEVRecipe(Expr, SE);
37     Plan.getEntry()->appendRecipe(Expanded->getDefiningRecipe());
38   }
39   Plan.addSCEVExpansion(Expr, Expanded);
40   return Expanded;
41 }
42 
43 bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) {
44   if (isa<VPActiveLaneMaskPHIRecipe>(V))
45     return true;
46 
47   auto IsWideCanonicalIV = [](VPValue *A) {
48     return isa<VPWidenCanonicalIVRecipe>(A) ||
49            (isa<VPWidenIntOrFpInductionRecipe>(A) &&
50             cast<VPWidenIntOrFpInductionRecipe>(A)->isCanonical());
51   };
52 
53   VPValue *A, *B;
54   using namespace VPlanPatternMatch;
55 
56   if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B))))
57     return B == Plan.getTripCount() &&
58            (match(A, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))) ||
59             IsWideCanonicalIV(A));
60 
61   return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) &&
62          IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
63 }
64 
65 const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) {
66   if (V->isLiveIn())
67     return SE.getSCEV(V->getLiveInIRValue());
68 
69   // TODO: Support constructing SCEVs for more recipes as needed.
70   return TypeSwitch<const VPRecipeBase *, const SCEV *>(V->getDefiningRecipe())
71       .Case<VPExpandSCEVRecipe>(
72           [](const VPExpandSCEVRecipe *R) { return R->getSCEV(); })
73       .Default([&SE](const VPRecipeBase *) { return SE.getCouldNotCompute(); });
74 }
75 
76 bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) {
77   using namespace VPlanPatternMatch;
78   // Live-ins are uniform.
79   if (V->isLiveIn())
80     return true;
81 
82   VPRecipeBase *R = V->getDefiningRecipe();
83   if (R && V->isDefinedOutsideLoopRegions()) {
84     if (match(V->getDefiningRecipe(),
85               m_VPInstruction<VPInstruction::CanonicalIVIncrementForPart>(
86                   m_VPValue())))
87       return false;
88     return all_of(R->operands(),
89                   [](VPValue *Op) { return isUniformAcrossVFsAndUFs(Op); });
90   }
91 
92   auto *CanonicalIV = R->getParent()->getPlan()->getCanonicalIV();
93   // Canonical IV chain is uniform.
94   if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue())
95     return true;
96 
97   return TypeSwitch<const VPRecipeBase *, bool>(R)
98       .Case<VPDerivedIVRecipe>([](const auto *R) { return true; })
99       .Case<VPReplicateRecipe>([](const auto *R) {
100         // Loads and stores that are uniform across VF lanes are handled by
101         // VPReplicateRecipe.IsUniform. They are also uniform across UF parts if
102         // all their operands are invariant.
103         // TODO: Further relax the restrictions.
104         return R->isUniform() &&
105                (isa<LoadInst, StoreInst>(R->getUnderlyingValue())) &&
106                all_of(R->operands(),
107                       [](VPValue *Op) { return isUniformAcrossVFsAndUFs(Op); });
108       })
109       .Case<VPScalarCastRecipe, VPWidenCastRecipe>([](const auto *R) {
110         // A cast is uniform according to its operand.
111         return isUniformAcrossVFsAndUFs(R->getOperand(0));
112       })
113       .Default([](const VPRecipeBase *) { // A value is considered non-uniform
114                                           // unless proven otherwise.
115         return false;
116       });
117 }
118