1 //===- LoopVectorizationLegality.cpp --------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides loop vectorization legality analysis. Original code 10 // resided in LoopVectorize.cpp for a long time. 11 // 12 // At this point, it is implemented as a utility class, not as an analysis 13 // pass. It should be easy to create an analysis pass around it if there 14 // is a need (but D45420 needs to happen first). 15 // 16 17 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 18 #include "llvm/Analysis/Loads.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/TargetTransformInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Transforms/Utils/SizeOpts.h" 29 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 30 31 using namespace llvm; 32 using namespace PatternMatch; 33 34 #define LV_NAME "loop-vectorize" 35 #define DEBUG_TYPE LV_NAME 36 37 static cl::opt<bool> 38 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 39 cl::desc("Enable if-conversion during vectorization.")); 40 41 static cl::opt<bool> 42 AllowStridedPointerIVs("lv-strided-pointer-ivs", cl::init(false), cl::Hidden, 43 cl::desc("Enable recognition of non-constant strided " 44 "pointer induction variables.")); 45 46 namespace llvm { 47 cl::opt<bool> 48 HintsAllowReordering("hints-allow-reordering", cl::init(true), cl::Hidden, 49 cl::desc("Allow enabling loop hints to reorder " 50 "FP operations during vectorization.")); 51 } // namespace llvm 52 53 // TODO: Move size-based thresholds out of legality checking, make cost based 54 // decisions instead of hard thresholds. 55 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 56 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 57 cl::desc("The maximum number of SCEV checks allowed.")); 58 59 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 60 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 61 cl::desc("The maximum number of SCEV checks allowed with a " 62 "vectorize(enable) pragma")); 63 64 static cl::opt<LoopVectorizeHints::ScalableForceKind> 65 ForceScalableVectorization( 66 "scalable-vectorization", cl::init(LoopVectorizeHints::SK_Unspecified), 67 cl::Hidden, 68 cl::desc("Control whether the compiler can use scalable vectors to " 69 "vectorize a loop"), 70 cl::values( 71 clEnumValN(LoopVectorizeHints::SK_FixedWidthOnly, "off", 72 "Scalable vectorization is disabled."), 73 clEnumValN( 74 LoopVectorizeHints::SK_PreferScalable, "preferred", 75 "Scalable vectorization is available and favored when the " 76 "cost is inconclusive."), 77 clEnumValN( 78 LoopVectorizeHints::SK_PreferScalable, "on", 79 "Scalable vectorization is available and favored when the " 80 "cost is inconclusive."))); 81 82 static cl::opt<bool> EnableHistogramVectorization( 83 "enable-histogram-loop-vectorization", cl::init(false), cl::Hidden, 84 cl::desc("Enables autovectorization of some loops containing histograms")); 85 86 /// Maximum vectorization interleave count. 87 static const unsigned MaxInterleaveFactor = 16; 88 89 namespace llvm { 90 91 bool LoopVectorizeHints::Hint::validate(unsigned Val) { 92 switch (Kind) { 93 case HK_WIDTH: 94 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 95 case HK_INTERLEAVE: 96 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 97 case HK_FORCE: 98 return (Val <= 1); 99 case HK_ISVECTORIZED: 100 case HK_PREDICATE: 101 case HK_SCALABLE: 102 return (Val == 0 || Val == 1); 103 } 104 return false; 105 } 106 107 LoopVectorizeHints::LoopVectorizeHints(const Loop *L, 108 bool InterleaveOnlyWhenForced, 109 OptimizationRemarkEmitter &ORE, 110 const TargetTransformInfo *TTI) 111 : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH), 112 Interleave("interleave.count", InterleaveOnlyWhenForced, HK_INTERLEAVE), 113 Force("vectorize.enable", FK_Undefined, HK_FORCE), 114 IsVectorized("isvectorized", 0, HK_ISVECTORIZED), 115 Predicate("vectorize.predicate.enable", FK_Undefined, HK_PREDICATE), 116 Scalable("vectorize.scalable.enable", SK_Unspecified, HK_SCALABLE), 117 TheLoop(L), ORE(ORE) { 118 // Populate values with existing loop metadata. 119 getHintsFromMetadata(); 120 121 // force-vector-interleave overrides DisableInterleaving. 122 if (VectorizerParams::isInterleaveForced()) 123 Interleave.Value = VectorizerParams::VectorizationInterleave; 124 125 // If the metadata doesn't explicitly specify whether to enable scalable 126 // vectorization, then decide based on the following criteria (increasing 127 // level of priority): 128 // - Target default 129 // - Metadata width 130 // - Force option (always overrides) 131 if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) { 132 if (TTI) 133 Scalable.Value = TTI->enableScalableVectorization() ? SK_PreferScalable 134 : SK_FixedWidthOnly; 135 136 if (Width.Value) 137 // If the width is set, but the metadata says nothing about the scalable 138 // property, then assume it concerns only a fixed-width UserVF. 139 // If width is not set, the flag takes precedence. 140 Scalable.Value = SK_FixedWidthOnly; 141 } 142 143 // If the flag is set to force any use of scalable vectors, override the loop 144 // hints. 145 if (ForceScalableVectorization.getValue() != 146 LoopVectorizeHints::SK_Unspecified) 147 Scalable.Value = ForceScalableVectorization.getValue(); 148 149 // Scalable vectorization is disabled if no preference is specified. 150 if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) 151 Scalable.Value = SK_FixedWidthOnly; 152 153 if (IsVectorized.Value != 1) 154 // If the vectorization width and interleaving count are both 1 then 155 // consider the loop to have been already vectorized because there's 156 // nothing more that we can do. 157 IsVectorized.Value = 158 getWidth() == ElementCount::getFixed(1) && getInterleave() == 1; 159 LLVM_DEBUG(if (InterleaveOnlyWhenForced && getInterleave() == 1) dbgs() 160 << "LV: Interleaving disabled by the pass manager\n"); 161 } 162 163 void LoopVectorizeHints::setAlreadyVectorized() { 164 LLVMContext &Context = TheLoop->getHeader()->getContext(); 165 166 MDNode *IsVectorizedMD = MDNode::get( 167 Context, 168 {MDString::get(Context, "llvm.loop.isvectorized"), 169 ConstantAsMetadata::get(ConstantInt::get(Context, APInt(32, 1)))}); 170 MDNode *LoopID = TheLoop->getLoopID(); 171 MDNode *NewLoopID = 172 makePostTransformationMetadata(Context, LoopID, 173 {Twine(Prefix(), "vectorize.").str(), 174 Twine(Prefix(), "interleave.").str()}, 175 {IsVectorizedMD}); 176 TheLoop->setLoopID(NewLoopID); 177 178 // Update internal cache. 179 IsVectorized.Value = 1; 180 } 181 182 bool LoopVectorizeHints::allowVectorization( 183 Function *F, Loop *L, bool VectorizeOnlyWhenForced) const { 184 if (getForce() == LoopVectorizeHints::FK_Disabled) { 185 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 186 emitRemarkWithHints(); 187 return false; 188 } 189 190 if (VectorizeOnlyWhenForced && getForce() != LoopVectorizeHints::FK_Enabled) { 191 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 192 emitRemarkWithHints(); 193 return false; 194 } 195 196 if (getIsVectorized() == 1) { 197 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 198 // FIXME: Add interleave.disable metadata. This will allow 199 // vectorize.disable to be used without disabling the pass and errors 200 // to differentiate between disabled vectorization and a width of 1. 201 ORE.emit([&]() { 202 return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 203 "AllDisabled", L->getStartLoc(), 204 L->getHeader()) 205 << "loop not vectorized: vectorization and interleaving are " 206 "explicitly disabled, or the loop has already been " 207 "vectorized"; 208 }); 209 return false; 210 } 211 212 return true; 213 } 214 215 void LoopVectorizeHints::emitRemarkWithHints() const { 216 using namespace ore; 217 218 ORE.emit([&]() { 219 if (Force.Value == LoopVectorizeHints::FK_Disabled) 220 return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 221 TheLoop->getStartLoc(), 222 TheLoop->getHeader()) 223 << "loop not vectorized: vectorization is explicitly disabled"; 224 225 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", TheLoop->getStartLoc(), 226 TheLoop->getHeader()); 227 R << "loop not vectorized"; 228 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 229 R << " (Force=" << NV("Force", true); 230 if (Width.Value != 0) 231 R << ", Vector Width=" << NV("VectorWidth", getWidth()); 232 if (getInterleave() != 0) 233 R << ", Interleave Count=" << NV("InterleaveCount", getInterleave()); 234 R << ")"; 235 } 236 return R; 237 }); 238 } 239 240 const char *LoopVectorizeHints::vectorizeAnalysisPassName() const { 241 if (getWidth() == ElementCount::getFixed(1)) 242 return LV_NAME; 243 if (getForce() == LoopVectorizeHints::FK_Disabled) 244 return LV_NAME; 245 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth().isZero()) 246 return LV_NAME; 247 return OptimizationRemarkAnalysis::AlwaysPrint; 248 } 249 250 bool LoopVectorizeHints::allowReordering() const { 251 // Allow the vectorizer to change the order of operations if enabling 252 // loop hints are provided 253 ElementCount EC = getWidth(); 254 return HintsAllowReordering && 255 (getForce() == LoopVectorizeHints::FK_Enabled || 256 EC.getKnownMinValue() > 1); 257 } 258 259 void LoopVectorizeHints::getHintsFromMetadata() { 260 MDNode *LoopID = TheLoop->getLoopID(); 261 if (!LoopID) 262 return; 263 264 // First operand should refer to the loop id itself. 265 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 266 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 267 268 for (const MDOperand &MDO : llvm::drop_begin(LoopID->operands())) { 269 const MDString *S = nullptr; 270 SmallVector<Metadata *, 4> Args; 271 272 // The expected hint is either a MDString or a MDNode with the first 273 // operand a MDString. 274 if (const MDNode *MD = dyn_cast<MDNode>(MDO)) { 275 if (!MD || MD->getNumOperands() == 0) 276 continue; 277 S = dyn_cast<MDString>(MD->getOperand(0)); 278 for (unsigned Idx = 1; Idx < MD->getNumOperands(); ++Idx) 279 Args.push_back(MD->getOperand(Idx)); 280 } else { 281 S = dyn_cast<MDString>(MDO); 282 assert(Args.size() == 0 && "too many arguments for MDString"); 283 } 284 285 if (!S) 286 continue; 287 288 // Check if the hint starts with the loop metadata prefix. 289 StringRef Name = S->getString(); 290 if (Args.size() == 1) 291 setHint(Name, Args[0]); 292 } 293 } 294 295 void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) { 296 if (!Name.starts_with(Prefix())) 297 return; 298 Name = Name.substr(Prefix().size(), StringRef::npos); 299 300 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 301 if (!C) 302 return; 303 unsigned Val = C->getZExtValue(); 304 305 Hint *Hints[] = {&Width, &Interleave, &Force, 306 &IsVectorized, &Predicate, &Scalable}; 307 for (auto *H : Hints) { 308 if (Name == H->Name) { 309 if (H->validate(Val)) 310 H->Value = Val; 311 else 312 LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 313 break; 314 } 315 } 316 } 317 318 // Return true if the inner loop \p Lp is uniform with regard to the outer loop 319 // \p OuterLp (i.e., if the outer loop is vectorized, all the vector lanes 320 // executing the inner loop will execute the same iterations). This check is 321 // very constrained for now but it will be relaxed in the future. \p Lp is 322 // considered uniform if it meets all the following conditions: 323 // 1) it has a canonical IV (starting from 0 and with stride 1), 324 // 2) its latch terminator is a conditional branch and, 325 // 3) its latch condition is a compare instruction whose operands are the 326 // canonical IV and an OuterLp invariant. 327 // This check doesn't take into account the uniformity of other conditions not 328 // related to the loop latch because they don't affect the loop uniformity. 329 // 330 // NOTE: We decided to keep all these checks and its associated documentation 331 // together so that we can easily have a picture of the current supported loop 332 // nests. However, some of the current checks don't depend on \p OuterLp and 333 // would be redundantly executed for each \p Lp if we invoked this function for 334 // different candidate outer loops. This is not the case for now because we 335 // don't currently have the infrastructure to evaluate multiple candidate outer 336 // loops and \p OuterLp will be a fixed parameter while we only support explicit 337 // outer loop vectorization. It's also very likely that these checks go away 338 // before introducing the aforementioned infrastructure. However, if this is not 339 // the case, we should move the \p OuterLp independent checks to a separate 340 // function that is only executed once for each \p Lp. 341 static bool isUniformLoop(Loop *Lp, Loop *OuterLp) { 342 assert(Lp->getLoopLatch() && "Expected loop with a single latch."); 343 344 // If Lp is the outer loop, it's uniform by definition. 345 if (Lp == OuterLp) 346 return true; 347 assert(OuterLp->contains(Lp) && "OuterLp must contain Lp."); 348 349 // 1. 350 PHINode *IV = Lp->getCanonicalInductionVariable(); 351 if (!IV) { 352 LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n"); 353 return false; 354 } 355 356 // 2. 357 BasicBlock *Latch = Lp->getLoopLatch(); 358 auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 359 if (!LatchBr || LatchBr->isUnconditional()) { 360 LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n"); 361 return false; 362 } 363 364 // 3. 365 auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition()); 366 if (!LatchCmp) { 367 LLVM_DEBUG( 368 dbgs() << "LV: Loop latch condition is not a compare instruction.\n"); 369 return false; 370 } 371 372 Value *CondOp0 = LatchCmp->getOperand(0); 373 Value *CondOp1 = LatchCmp->getOperand(1); 374 Value *IVUpdate = IV->getIncomingValueForBlock(Latch); 375 if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) && 376 !(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) { 377 LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n"); 378 return false; 379 } 380 381 return true; 382 } 383 384 // Return true if \p Lp and all its nested loops are uniform with regard to \p 385 // OuterLp. 386 static bool isUniformLoopNest(Loop *Lp, Loop *OuterLp) { 387 if (!isUniformLoop(Lp, OuterLp)) 388 return false; 389 390 // Check if nested loops are uniform. 391 for (Loop *SubLp : *Lp) 392 if (!isUniformLoopNest(SubLp, OuterLp)) 393 return false; 394 395 return true; 396 } 397 398 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 399 if (Ty->isPointerTy()) 400 return DL.getIntPtrType(Ty); 401 402 // It is possible that char's or short's overflow when we ask for the loop's 403 // trip count, work around this by changing the type size. 404 if (Ty->getScalarSizeInBits() < 32) 405 return Type::getInt32Ty(Ty->getContext()); 406 407 return Ty; 408 } 409 410 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 411 Ty0 = convertPointerToIntegerType(DL, Ty0); 412 Ty1 = convertPointerToIntegerType(DL, Ty1); 413 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 414 return Ty0; 415 return Ty1; 416 } 417 418 /// Check that the instruction has outside loop users and is not an 419 /// identified reduction variable. 420 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 421 SmallPtrSetImpl<Value *> &AllowedExit) { 422 // Reductions, Inductions and non-header phis are allowed to have exit users. All 423 // other instructions must not have external users. 424 if (!AllowedExit.count(Inst)) 425 // Check that all of the users of the loop are inside the BB. 426 for (User *U : Inst->users()) { 427 Instruction *UI = cast<Instruction>(U); 428 // This user may be a reduction exit value. 429 if (!TheLoop->contains(UI)) { 430 LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 431 return true; 432 } 433 } 434 return false; 435 } 436 437 /// Returns true if A and B have same pointer operands or same SCEVs addresses 438 static bool storeToSameAddress(ScalarEvolution *SE, StoreInst *A, 439 StoreInst *B) { 440 // Compare store 441 if (A == B) 442 return true; 443 444 // Otherwise Compare pointers 445 Value *APtr = A->getPointerOperand(); 446 Value *BPtr = B->getPointerOperand(); 447 if (APtr == BPtr) 448 return true; 449 450 // Otherwise compare address SCEVs 451 return SE->getSCEV(APtr) == SE->getSCEV(BPtr); 452 } 453 454 int LoopVectorizationLegality::isConsecutivePtr(Type *AccessTy, 455 Value *Ptr) const { 456 // FIXME: Currently, the set of symbolic strides is sometimes queried before 457 // it's collected. This happens from canVectorizeWithIfConvert, when the 458 // pointer is checked to reference consecutive elements suitable for a 459 // masked access. 460 const auto &Strides = 461 LAI ? LAI->getSymbolicStrides() : DenseMap<Value *, const SCEV *>(); 462 463 bool CanAddPredicate = !llvm::shouldOptimizeForSize( 464 TheLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 465 int Stride = getPtrStride(PSE, AccessTy, Ptr, TheLoop, Strides, 466 CanAddPredicate, false).value_or(0); 467 if (Stride == 1 || Stride == -1) 468 return Stride; 469 return 0; 470 } 471 472 bool LoopVectorizationLegality::isInvariant(Value *V) const { 473 return LAI->isInvariant(V); 474 } 475 476 namespace { 477 /// A rewriter to build the SCEVs for each of the VF lanes in the expected 478 /// vectorized loop, which can then be compared to detect their uniformity. This 479 /// is done by replacing the AddRec SCEVs of the original scalar loop (TheLoop) 480 /// with new AddRecs where the step is multiplied by StepMultiplier and Offset * 481 /// Step is added. Also checks if all sub-expressions are analyzable w.r.t. 482 /// uniformity. 483 class SCEVAddRecForUniformityRewriter 484 : public SCEVRewriteVisitor<SCEVAddRecForUniformityRewriter> { 485 /// Multiplier to be applied to the step of AddRecs in TheLoop. 486 unsigned StepMultiplier; 487 488 /// Offset to be added to the AddRecs in TheLoop. 489 unsigned Offset; 490 491 /// Loop for which to rewrite AddRecsFor. 492 Loop *TheLoop; 493 494 /// Is any sub-expressions not analyzable w.r.t. uniformity? 495 bool CannotAnalyze = false; 496 497 bool canAnalyze() const { return !CannotAnalyze; } 498 499 public: 500 SCEVAddRecForUniformityRewriter(ScalarEvolution &SE, unsigned StepMultiplier, 501 unsigned Offset, Loop *TheLoop) 502 : SCEVRewriteVisitor(SE), StepMultiplier(StepMultiplier), Offset(Offset), 503 TheLoop(TheLoop) {} 504 505 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 506 assert(Expr->getLoop() == TheLoop && 507 "addrec outside of TheLoop must be invariant and should have been " 508 "handled earlier"); 509 // Build a new AddRec by multiplying the step by StepMultiplier and 510 // incrementing the start by Offset * step. 511 Type *Ty = Expr->getType(); 512 const SCEV *Step = Expr->getStepRecurrence(SE); 513 if (!SE.isLoopInvariant(Step, TheLoop)) { 514 CannotAnalyze = true; 515 return Expr; 516 } 517 const SCEV *NewStep = 518 SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier)); 519 const SCEV *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset)); 520 const SCEV *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset); 521 return SE.getAddRecExpr(NewStart, NewStep, TheLoop, SCEV::FlagAnyWrap); 522 } 523 524 const SCEV *visit(const SCEV *S) { 525 if (CannotAnalyze || SE.isLoopInvariant(S, TheLoop)) 526 return S; 527 return SCEVRewriteVisitor<SCEVAddRecForUniformityRewriter>::visit(S); 528 } 529 530 const SCEV *visitUnknown(const SCEVUnknown *S) { 531 if (SE.isLoopInvariant(S, TheLoop)) 532 return S; 533 // The value could vary across iterations. 534 CannotAnalyze = true; 535 return S; 536 } 537 538 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *S) { 539 // Could not analyze the expression. 540 CannotAnalyze = true; 541 return S; 542 } 543 544 static const SCEV *rewrite(const SCEV *S, ScalarEvolution &SE, 545 unsigned StepMultiplier, unsigned Offset, 546 Loop *TheLoop) { 547 /// Bail out if the expression does not contain an UDiv expression. 548 /// Uniform values which are not loop invariant require operations to strip 549 /// out the lowest bits. For now just look for UDivs and use it to avoid 550 /// re-writing UDIV-free expressions for other lanes to limit compile time. 551 if (!SCEVExprContains(S, 552 [](const SCEV *S) { return isa<SCEVUDivExpr>(S); })) 553 return SE.getCouldNotCompute(); 554 555 SCEVAddRecForUniformityRewriter Rewriter(SE, StepMultiplier, Offset, 556 TheLoop); 557 const SCEV *Result = Rewriter.visit(S); 558 559 if (Rewriter.canAnalyze()) 560 return Result; 561 return SE.getCouldNotCompute(); 562 } 563 }; 564 565 } // namespace 566 567 bool LoopVectorizationLegality::isUniform(Value *V, ElementCount VF) const { 568 if (isInvariant(V)) 569 return true; 570 if (VF.isScalable()) 571 return false; 572 if (VF.isScalar()) 573 return true; 574 575 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 576 // never considered uniform. 577 auto *SE = PSE.getSE(); 578 if (!SE->isSCEVable(V->getType())) 579 return false; 580 const SCEV *S = SE->getSCEV(V); 581 582 // Rewrite AddRecs in TheLoop to step by VF and check if the expression for 583 // lane 0 matches the expressions for all other lanes. 584 unsigned FixedVF = VF.getKnownMinValue(); 585 const SCEV *FirstLaneExpr = 586 SCEVAddRecForUniformityRewriter::rewrite(S, *SE, FixedVF, 0, TheLoop); 587 if (isa<SCEVCouldNotCompute>(FirstLaneExpr)) 588 return false; 589 590 // Make sure the expressions for lanes FixedVF-1..1 match the expression for 591 // lane 0. We check lanes in reverse order for compile-time, as frequently 592 // checking the last lane is sufficient to rule out uniformity. 593 return all_of(reverse(seq<unsigned>(1, FixedVF)), [&](unsigned I) { 594 const SCEV *IthLaneExpr = 595 SCEVAddRecForUniformityRewriter::rewrite(S, *SE, FixedVF, I, TheLoop); 596 return FirstLaneExpr == IthLaneExpr; 597 }); 598 } 599 600 bool LoopVectorizationLegality::isUniformMemOp(Instruction &I, 601 ElementCount VF) const { 602 Value *Ptr = getLoadStorePointerOperand(&I); 603 if (!Ptr) 604 return false; 605 // Note: There's nothing inherent which prevents predicated loads and 606 // stores from being uniform. The current lowering simply doesn't handle 607 // it; in particular, the cost model distinguishes scatter/gather from 608 // scalar w/predication, and we currently rely on the scalar path. 609 return isUniform(Ptr, VF) && !blockNeedsPredication(I.getParent()); 610 } 611 612 bool LoopVectorizationLegality::canVectorizeOuterLoop() { 613 assert(!TheLoop->isInnermost() && "We are not vectorizing an outer loop."); 614 // Store the result and return it at the end instead of exiting early, in case 615 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 616 bool Result = true; 617 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 618 619 for (BasicBlock *BB : TheLoop->blocks()) { 620 // Check whether the BB terminator is a BranchInst. Any other terminator is 621 // not supported yet. 622 auto *Br = dyn_cast<BranchInst>(BB->getTerminator()); 623 if (!Br) { 624 reportVectorizationFailure("Unsupported basic block terminator", 625 "loop control flow is not understood by vectorizer", 626 "CFGNotUnderstood", ORE, TheLoop); 627 if (DoExtraAnalysis) 628 Result = false; 629 else 630 return false; 631 } 632 633 // Check whether the BranchInst is a supported one. Only unconditional 634 // branches, conditional branches with an outer loop invariant condition or 635 // backedges are supported. 636 // FIXME: We skip these checks when VPlan predication is enabled as we 637 // want to allow divergent branches. This whole check will be removed 638 // once VPlan predication is on by default. 639 if (Br && Br->isConditional() && 640 !TheLoop->isLoopInvariant(Br->getCondition()) && 641 !LI->isLoopHeader(Br->getSuccessor(0)) && 642 !LI->isLoopHeader(Br->getSuccessor(1))) { 643 reportVectorizationFailure("Unsupported conditional branch", 644 "loop control flow is not understood by vectorizer", 645 "CFGNotUnderstood", ORE, TheLoop); 646 if (DoExtraAnalysis) 647 Result = false; 648 else 649 return false; 650 } 651 } 652 653 // Check whether inner loops are uniform. At this point, we only support 654 // simple outer loops scenarios with uniform nested loops. 655 if (!isUniformLoopNest(TheLoop /*loop nest*/, 656 TheLoop /*context outer loop*/)) { 657 reportVectorizationFailure("Outer loop contains divergent loops", 658 "loop control flow is not understood by vectorizer", 659 "CFGNotUnderstood", ORE, TheLoop); 660 if (DoExtraAnalysis) 661 Result = false; 662 else 663 return false; 664 } 665 666 // Check whether we are able to set up outer loop induction. 667 if (!setupOuterLoopInductions()) { 668 reportVectorizationFailure("Unsupported outer loop Phi(s)", 669 "UnsupportedPhi", ORE, TheLoop); 670 if (DoExtraAnalysis) 671 Result = false; 672 else 673 return false; 674 } 675 676 return Result; 677 } 678 679 void LoopVectorizationLegality::addInductionPhi( 680 PHINode *Phi, const InductionDescriptor &ID, 681 SmallPtrSetImpl<Value *> &AllowedExit) { 682 Inductions[Phi] = ID; 683 684 // In case this induction also comes with casts that we know we can ignore 685 // in the vectorized loop body, record them here. All casts could be recorded 686 // here for ignoring, but suffices to record only the first (as it is the 687 // only one that may bw used outside the cast sequence). 688 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 689 if (!Casts.empty()) 690 InductionCastsToIgnore.insert(*Casts.begin()); 691 692 Type *PhiTy = Phi->getType(); 693 const DataLayout &DL = Phi->getDataLayout(); 694 695 // Get the widest type. 696 if (!PhiTy->isFloatingPointTy()) { 697 if (!WidestIndTy) 698 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 699 else 700 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 701 } 702 703 // Int inductions are special because we only allow one IV. 704 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 705 ID.getConstIntStepValue() && ID.getConstIntStepValue()->isOne() && 706 isa<Constant>(ID.getStartValue()) && 707 cast<Constant>(ID.getStartValue())->isNullValue()) { 708 709 // Use the phi node with the widest type as induction. Use the last 710 // one if there are multiple (no good reason for doing this other 711 // than it is expedient). We've checked that it begins at zero and 712 // steps by one, so this is a canonical induction variable. 713 if (!PrimaryInduction || PhiTy == WidestIndTy) 714 PrimaryInduction = Phi; 715 } 716 717 // Both the PHI node itself, and the "post-increment" value feeding 718 // back into the PHI node may have external users. 719 // We can allow those uses, except if the SCEVs we have for them rely 720 // on predicates that only hold within the loop, since allowing the exit 721 // currently means re-using this SCEV outside the loop (see PR33706 for more 722 // details). 723 if (PSE.getPredicate().isAlwaysTrue()) { 724 AllowedExit.insert(Phi); 725 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 726 } 727 728 LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n"); 729 } 730 731 bool LoopVectorizationLegality::setupOuterLoopInductions() { 732 BasicBlock *Header = TheLoop->getHeader(); 733 734 // Returns true if a given Phi is a supported induction. 735 auto IsSupportedPhi = [&](PHINode &Phi) -> bool { 736 InductionDescriptor ID; 737 if (InductionDescriptor::isInductionPHI(&Phi, TheLoop, PSE, ID) && 738 ID.getKind() == InductionDescriptor::IK_IntInduction) { 739 addInductionPhi(&Phi, ID, AllowedExit); 740 return true; 741 } 742 // Bail out for any Phi in the outer loop header that is not a supported 743 // induction. 744 LLVM_DEBUG( 745 dbgs() << "LV: Found unsupported PHI for outer loop vectorization.\n"); 746 return false; 747 }; 748 749 return llvm::all_of(Header->phis(), IsSupportedPhi); 750 } 751 752 /// Checks if a function is scalarizable according to the TLI, in 753 /// the sense that it should be vectorized and then expanded in 754 /// multiple scalar calls. This is represented in the 755 /// TLI via mappings that do not specify a vector name, as in the 756 /// following example: 757 /// 758 /// const VecDesc VecIntrinsics[] = { 759 /// {"llvm.phx.abs.i32", "", 4} 760 /// }; 761 static bool isTLIScalarize(const TargetLibraryInfo &TLI, const CallInst &CI) { 762 const StringRef ScalarName = CI.getCalledFunction()->getName(); 763 bool Scalarize = TLI.isFunctionVectorizable(ScalarName); 764 // Check that all known VFs are not associated to a vector 765 // function, i.e. the vector name is emty. 766 if (Scalarize) { 767 ElementCount WidestFixedVF, WidestScalableVF; 768 TLI.getWidestVF(ScalarName, WidestFixedVF, WidestScalableVF); 769 for (ElementCount VF = ElementCount::getFixed(2); 770 ElementCount::isKnownLE(VF, WidestFixedVF); VF *= 2) 771 Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF); 772 for (ElementCount VF = ElementCount::getScalable(1); 773 ElementCount::isKnownLE(VF, WidestScalableVF); VF *= 2) 774 Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF); 775 assert((WidestScalableVF.isZero() || !Scalarize) && 776 "Caller may decide to scalarize a variant using a scalable VF"); 777 } 778 return Scalarize; 779 } 780 781 /// Returns true if the call return type `Ty` can be widened by the loop 782 /// vectorizer. 783 static bool canWidenCallReturnType(Type *Ty) { 784 auto *StructTy = dyn_cast<StructType>(Ty); 785 // TODO: Remove the homogeneous types restriction. This is just an initial 786 // simplification. When we want to support things like the overflow intrinsics 787 // we will have to lift this restriction. 788 if (StructTy && !StructTy->containsHomogeneousTypes()) 789 return false; 790 return canVectorizeTy(StructTy); 791 } 792 793 bool LoopVectorizationLegality::canVectorizeInstrs() { 794 BasicBlock *Header = TheLoop->getHeader(); 795 796 // For each block in the loop. 797 for (BasicBlock *BB : TheLoop->blocks()) { 798 // Scan the instructions in the block and look for hazards. 799 for (Instruction &I : *BB) { 800 if (auto *Phi = dyn_cast<PHINode>(&I)) { 801 Type *PhiTy = Phi->getType(); 802 // Check that this PHI type is allowed. 803 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 804 !PhiTy->isPointerTy()) { 805 reportVectorizationFailure("Found a non-int non-pointer PHI", 806 "loop control flow is not understood by vectorizer", 807 "CFGNotUnderstood", ORE, TheLoop); 808 return false; 809 } 810 811 // If this PHINode is not in the header block, then we know that we 812 // can convert it to select during if-conversion. No need to check if 813 // the PHIs in this block are induction or reduction variables. 814 if (BB != Header) { 815 // Non-header phi nodes that have outside uses can be vectorized. Add 816 // them to the list of allowed exits. 817 // Unsafe cyclic dependencies with header phis are identified during 818 // legalization for reduction, induction and fixed order 819 // recurrences. 820 AllowedExit.insert(&I); 821 continue; 822 } 823 824 // We only allow if-converted PHIs with exactly two incoming values. 825 if (Phi->getNumIncomingValues() != 2) { 826 reportVectorizationFailure("Found an invalid PHI", 827 "loop control flow is not understood by vectorizer", 828 "CFGNotUnderstood", ORE, TheLoop, Phi); 829 return false; 830 } 831 832 RecurrenceDescriptor RedDes; 833 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC, 834 DT, PSE.getSE())) { 835 Requirements->addExactFPMathInst(RedDes.getExactFPMathInst()); 836 AllowedExit.insert(RedDes.getLoopExitInstr()); 837 Reductions[Phi] = RedDes; 838 continue; 839 } 840 841 // We prevent matching non-constant strided pointer IVS to preserve 842 // historical vectorizer behavior after a generalization of the 843 // IVDescriptor code. The intent is to remove this check, but we 844 // have to fix issues around code quality for such loops first. 845 auto IsDisallowedStridedPointerInduction = 846 [](const InductionDescriptor &ID) { 847 if (AllowStridedPointerIVs) 848 return false; 849 return ID.getKind() == InductionDescriptor::IK_PtrInduction && 850 ID.getConstIntStepValue() == nullptr; 851 }; 852 853 // TODO: Instead of recording the AllowedExit, it would be good to 854 // record the complementary set: NotAllowedExit. These include (but may 855 // not be limited to): 856 // 1. Reduction phis as they represent the one-before-last value, which 857 // is not available when vectorized 858 // 2. Induction phis and increment when SCEV predicates cannot be used 859 // outside the loop - see addInductionPhi 860 // 3. Non-Phis with outside uses when SCEV predicates cannot be used 861 // outside the loop - see call to hasOutsideLoopUser in the non-phi 862 // handling below 863 // 4. FixedOrderRecurrence phis that can possibly be handled by 864 // extraction. 865 // By recording these, we can then reason about ways to vectorize each 866 // of these NotAllowedExit. 867 InductionDescriptor ID; 868 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID) && 869 !IsDisallowedStridedPointerInduction(ID)) { 870 addInductionPhi(Phi, ID, AllowedExit); 871 Requirements->addExactFPMathInst(ID.getExactFPMathInst()); 872 continue; 873 } 874 875 if (RecurrenceDescriptor::isFixedOrderRecurrence(Phi, TheLoop, DT)) { 876 AllowedExit.insert(Phi); 877 FixedOrderRecurrences.insert(Phi); 878 continue; 879 } 880 881 // As a last resort, coerce the PHI to a AddRec expression 882 // and re-try classifying it a an induction PHI. 883 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true) && 884 !IsDisallowedStridedPointerInduction(ID)) { 885 addInductionPhi(Phi, ID, AllowedExit); 886 continue; 887 } 888 889 reportVectorizationFailure("Found an unidentified PHI", 890 "value that could not be identified as " 891 "reduction is used outside the loop", 892 "NonReductionValueUsedOutsideLoop", ORE, TheLoop, Phi); 893 return false; 894 } // end of PHI handling 895 896 // We handle calls that: 897 // * Are debug info intrinsics. 898 // * Have a mapping to an IR intrinsic. 899 // * Have a vector version available. 900 auto *CI = dyn_cast<CallInst>(&I); 901 902 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 903 !isa<DbgInfoIntrinsic>(CI) && 904 !(CI->getCalledFunction() && TLI && 905 (!VFDatabase::getMappings(*CI).empty() || 906 isTLIScalarize(*TLI, *CI)))) { 907 // If the call is a recognized math libary call, it is likely that 908 // we can vectorize it given loosened floating-point constraints. 909 LibFunc Func; 910 bool IsMathLibCall = 911 TLI && CI->getCalledFunction() && 912 CI->getType()->isFloatingPointTy() && 913 TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) && 914 TLI->hasOptimizedCodeGen(Func); 915 916 if (IsMathLibCall) { 917 // TODO: Ideally, we should not use clang-specific language here, 918 // but it's hard to provide meaningful yet generic advice. 919 // Also, should this be guarded by allowExtraAnalysis() and/or be part 920 // of the returned info from isFunctionVectorizable()? 921 reportVectorizationFailure( 922 "Found a non-intrinsic callsite", 923 "library call cannot be vectorized. " 924 "Try compiling with -fno-math-errno, -ffast-math, " 925 "or similar flags", 926 "CantVectorizeLibcall", ORE, TheLoop, CI); 927 } else { 928 reportVectorizationFailure("Found a non-intrinsic callsite", 929 "call instruction cannot be vectorized", 930 "CantVectorizeLibcall", ORE, TheLoop, CI); 931 } 932 return false; 933 } 934 935 // Some intrinsics have scalar arguments and should be same in order for 936 // them to be vectorized (i.e. loop invariant). 937 if (CI) { 938 auto *SE = PSE.getSE(); 939 Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI); 940 for (unsigned Idx = 0; Idx < CI->arg_size(); ++Idx) 941 if (isVectorIntrinsicWithScalarOpAtArg(IntrinID, Idx, TTI)) { 942 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(Idx)), 943 TheLoop)) { 944 reportVectorizationFailure("Found unvectorizable intrinsic", 945 "intrinsic instruction cannot be vectorized", 946 "CantVectorizeIntrinsic", ORE, TheLoop, CI); 947 return false; 948 } 949 } 950 } 951 952 // If we found a vectorized variant of a function, note that so LV can 953 // make better decisions about maximum VF. 954 if (CI && !VFDatabase::getMappings(*CI).empty()) 955 VecCallVariantsFound = true; 956 957 auto CanWidenInstructionTy = [this](Instruction const &Inst) { 958 Type *InstTy = Inst.getType(); 959 if (!isa<StructType>(InstTy)) 960 return canVectorizeTy(InstTy); 961 962 // For now, we only recognize struct values returned from calls where 963 // all users are extractvalue as vectorizable. All element types of the 964 // struct must be types that can be widened. 965 if (isa<CallInst>(Inst) && canWidenCallReturnType(InstTy) && 966 all_of(Inst.users(), IsaPred<ExtractValueInst>)) { 967 // TODO: Remove the `StructVecCallFound` flag once vectorizing calls 968 // with struct returns is supported. 969 StructVecCallFound = true; 970 return true; 971 } 972 973 return false; 974 }; 975 976 // Check that the instruction return type is vectorizable. 977 // We can't vectorize casts from vector type to scalar type. 978 // Also, we can't vectorize extractelement instructions. 979 if (!CanWidenInstructionTy(I) || 980 (isa<CastInst>(I) && 981 !VectorType::isValidElementType(I.getOperand(0)->getType())) || 982 isa<ExtractElementInst>(I)) { 983 reportVectorizationFailure("Found unvectorizable type", 984 "instruction return type cannot be vectorized", 985 "CantVectorizeInstructionReturnType", ORE, TheLoop, &I); 986 return false; 987 } 988 989 // Check that the stored type is vectorizable. 990 if (auto *ST = dyn_cast<StoreInst>(&I)) { 991 Type *T = ST->getValueOperand()->getType(); 992 if (!VectorType::isValidElementType(T)) { 993 reportVectorizationFailure("Store instruction cannot be vectorized", 994 "CantVectorizeStore", ORE, TheLoop, ST); 995 return false; 996 } 997 998 // For nontemporal stores, check that a nontemporal vector version is 999 // supported on the target. 1000 if (ST->getMetadata(LLVMContext::MD_nontemporal)) { 1001 // Arbitrarily try a vector of 2 elements. 1002 auto *VecTy = FixedVectorType::get(T, /*NumElts=*/2); 1003 assert(VecTy && "did not find vectorized version of stored type"); 1004 if (!TTI->isLegalNTStore(VecTy, ST->getAlign())) { 1005 reportVectorizationFailure( 1006 "nontemporal store instruction cannot be vectorized", 1007 "CantVectorizeNontemporalStore", ORE, TheLoop, ST); 1008 return false; 1009 } 1010 } 1011 1012 } else if (auto *LD = dyn_cast<LoadInst>(&I)) { 1013 if (LD->getMetadata(LLVMContext::MD_nontemporal)) { 1014 // For nontemporal loads, check that a nontemporal vector version is 1015 // supported on the target (arbitrarily try a vector of 2 elements). 1016 auto *VecTy = FixedVectorType::get(I.getType(), /*NumElts=*/2); 1017 assert(VecTy && "did not find vectorized version of load type"); 1018 if (!TTI->isLegalNTLoad(VecTy, LD->getAlign())) { 1019 reportVectorizationFailure( 1020 "nontemporal load instruction cannot be vectorized", 1021 "CantVectorizeNontemporalLoad", ORE, TheLoop, LD); 1022 return false; 1023 } 1024 } 1025 1026 // FP instructions can allow unsafe algebra, thus vectorizable by 1027 // non-IEEE-754 compliant SIMD units. 1028 // This applies to floating-point math operations and calls, not memory 1029 // operations, shuffles, or casts, as they don't change precision or 1030 // semantics. 1031 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 1032 !I.isFast()) { 1033 LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 1034 Hints->setPotentiallyUnsafe(); 1035 } 1036 1037 // Reduction instructions are allowed to have exit users. 1038 // All other instructions must not have external users. 1039 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 1040 // We can safely vectorize loops where instructions within the loop are 1041 // used outside the loop only if the SCEV predicates within the loop is 1042 // same as outside the loop. Allowing the exit means reusing the SCEV 1043 // outside the loop. 1044 if (PSE.getPredicate().isAlwaysTrue()) { 1045 AllowedExit.insert(&I); 1046 continue; 1047 } 1048 reportVectorizationFailure("Value cannot be used outside the loop", 1049 "ValueUsedOutsideLoop", ORE, TheLoop, &I); 1050 return false; 1051 } 1052 } // next instr. 1053 } 1054 1055 if (!PrimaryInduction) { 1056 if (Inductions.empty()) { 1057 reportVectorizationFailure("Did not find one integer induction var", 1058 "loop induction variable could not be identified", 1059 "NoInductionVariable", ORE, TheLoop); 1060 return false; 1061 } 1062 if (!WidestIndTy) { 1063 reportVectorizationFailure("Did not find one integer induction var", 1064 "integer loop induction variable could not be identified", 1065 "NoIntegerInductionVariable", ORE, TheLoop); 1066 return false; 1067 } 1068 LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 1069 } 1070 1071 // Now we know the widest induction type, check if our found induction 1072 // is the same size. If it's not, unset it here and InnerLoopVectorizer 1073 // will create another. 1074 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType()) 1075 PrimaryInduction = nullptr; 1076 1077 return true; 1078 } 1079 1080 /// Find histogram operations that match high-level code in loops: 1081 /// \code 1082 /// buckets[indices[i]]+=step; 1083 /// \endcode 1084 /// 1085 /// It matches a pattern starting from \p HSt, which Stores to the 'buckets' 1086 /// array the computed histogram. It uses a BinOp to sum all counts, storing 1087 /// them using a loop-variant index Load from the 'indices' input array. 1088 /// 1089 /// On successful matches it updates the STATISTIC 'HistogramsDetected', 1090 /// regardless of hardware support. When there is support, it additionally 1091 /// stores the BinOp/Load pairs in \p HistogramCounts, as well the pointers 1092 /// used to update histogram in \p HistogramPtrs. 1093 static bool findHistogram(LoadInst *LI, StoreInst *HSt, Loop *TheLoop, 1094 const PredicatedScalarEvolution &PSE, 1095 SmallVectorImpl<HistogramInfo> &Histograms) { 1096 1097 // Store value must come from a Binary Operation. 1098 Instruction *HPtrInstr = nullptr; 1099 BinaryOperator *HBinOp = nullptr; 1100 if (!match(HSt, m_Store(m_BinOp(HBinOp), m_Instruction(HPtrInstr)))) 1101 return false; 1102 1103 // BinOp must be an Add or a Sub modifying the bucket value by a 1104 // loop invariant amount. 1105 // FIXME: We assume the loop invariant term is on the RHS. 1106 // Fine for an immediate/constant, but maybe not a generic value? 1107 Value *HIncVal = nullptr; 1108 if (!match(HBinOp, m_Add(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal))) && 1109 !match(HBinOp, m_Sub(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal)))) 1110 return false; 1111 1112 // Make sure the increment value is loop invariant. 1113 if (!TheLoop->isLoopInvariant(HIncVal)) 1114 return false; 1115 1116 // The address to store is calculated through a GEP Instruction. 1117 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(HPtrInstr); 1118 if (!GEP) 1119 return false; 1120 1121 // Restrict address calculation to constant indices except for the last term. 1122 Value *HIdx = nullptr; 1123 for (Value *Index : GEP->indices()) { 1124 if (HIdx) 1125 return false; 1126 if (!isa<ConstantInt>(Index)) 1127 HIdx = Index; 1128 } 1129 1130 if (!HIdx) 1131 return false; 1132 1133 // Check that the index is calculated by loading from another array. Ignore 1134 // any extensions. 1135 // FIXME: Support indices from other sources than a linear load from memory? 1136 // We're currently trying to match an operation looping over an array 1137 // of indices, but there could be additional levels of indirection 1138 // in place, or possibly some additional calculation to form the index 1139 // from the loaded data. 1140 Value *VPtrVal; 1141 if (!match(HIdx, m_ZExtOrSExtOrSelf(m_Load(m_Value(VPtrVal))))) 1142 return false; 1143 1144 // Make sure the index address varies in this loop, not an outer loop. 1145 const auto *AR = dyn_cast<SCEVAddRecExpr>(PSE.getSE()->getSCEV(VPtrVal)); 1146 if (!AR || AR->getLoop() != TheLoop) 1147 return false; 1148 1149 // Ensure we'll have the same mask by checking that all parts of the histogram 1150 // (gather load, update, scatter store) are in the same block. 1151 LoadInst *IndexedLoad = cast<LoadInst>(HBinOp->getOperand(0)); 1152 BasicBlock *LdBB = IndexedLoad->getParent(); 1153 if (LdBB != HBinOp->getParent() || LdBB != HSt->getParent()) 1154 return false; 1155 1156 LLVM_DEBUG(dbgs() << "LV: Found histogram for: " << *HSt << "\n"); 1157 1158 // Store the operations that make up the histogram. 1159 Histograms.emplace_back(IndexedLoad, HBinOp, HSt); 1160 return true; 1161 } 1162 1163 bool LoopVectorizationLegality::canVectorizeIndirectUnsafeDependences() { 1164 // For now, we only support an IndirectUnsafe dependency that calculates 1165 // a histogram 1166 if (!EnableHistogramVectorization) 1167 return false; 1168 1169 // Find a single IndirectUnsafe dependency. 1170 const MemoryDepChecker::Dependence *IUDep = nullptr; 1171 const MemoryDepChecker &DepChecker = LAI->getDepChecker(); 1172 const auto *Deps = DepChecker.getDependences(); 1173 // If there were too many dependences, LAA abandons recording them. We can't 1174 // proceed safely if we don't know what the dependences are. 1175 if (!Deps) 1176 return false; 1177 1178 for (const MemoryDepChecker::Dependence &Dep : *Deps) { 1179 // Ignore dependencies that are either known to be safe or can be 1180 // checked at runtime. 1181 if (MemoryDepChecker::Dependence::isSafeForVectorization(Dep.Type) != 1182 MemoryDepChecker::VectorizationSafetyStatus::Unsafe) 1183 continue; 1184 1185 // We're only interested in IndirectUnsafe dependencies here, where the 1186 // address might come from a load from memory. We also only want to handle 1187 // one such dependency, at least for now. 1188 if (Dep.Type != MemoryDepChecker::Dependence::IndirectUnsafe || IUDep) 1189 return false; 1190 1191 IUDep = &Dep; 1192 } 1193 if (!IUDep) 1194 return false; 1195 1196 // For now only normal loads and stores are supported. 1197 LoadInst *LI = dyn_cast<LoadInst>(IUDep->getSource(DepChecker)); 1198 StoreInst *SI = dyn_cast<StoreInst>(IUDep->getDestination(DepChecker)); 1199 1200 if (!LI || !SI) 1201 return false; 1202 1203 LLVM_DEBUG(dbgs() << "LV: Checking for a histogram on: " << *SI << "\n"); 1204 return findHistogram(LI, SI, TheLoop, LAI->getPSE(), Histograms); 1205 } 1206 1207 bool LoopVectorizationLegality::canVectorizeMemory() { 1208 LAI = &LAIs.getInfo(*TheLoop); 1209 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 1210 if (LAR) { 1211 ORE->emit([&]() { 1212 return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(), 1213 "loop not vectorized: ", *LAR); 1214 }); 1215 } 1216 1217 if (!LAI->canVectorizeMemory()) 1218 return canVectorizeIndirectUnsafeDependences(); 1219 1220 if (LAI->hasLoadStoreDependenceInvolvingLoopInvariantAddress()) { 1221 reportVectorizationFailure("We don't allow storing to uniform addresses", 1222 "write to a loop invariant address could not " 1223 "be vectorized", 1224 "CantVectorizeStoreToLoopInvariantAddress", ORE, 1225 TheLoop); 1226 return false; 1227 } 1228 1229 // We can vectorize stores to invariant address when final reduction value is 1230 // guaranteed to be stored at the end of the loop. Also, if decision to 1231 // vectorize loop is made, runtime checks are added so as to make sure that 1232 // invariant address won't alias with any other objects. 1233 if (!LAI->getStoresToInvariantAddresses().empty()) { 1234 // For each invariant address, check if last stored value is unconditional 1235 // and the address is not calculated inside the loop. 1236 for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) { 1237 if (!isInvariantStoreOfReduction(SI)) 1238 continue; 1239 1240 if (blockNeedsPredication(SI->getParent())) { 1241 reportVectorizationFailure( 1242 "We don't allow storing to uniform addresses", 1243 "write of conditional recurring variant value to a loop " 1244 "invariant address could not be vectorized", 1245 "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop); 1246 return false; 1247 } 1248 1249 // Invariant address should be defined outside of loop. LICM pass usually 1250 // makes sure it happens, but in rare cases it does not, we do not want 1251 // to overcomplicate vectorization to support this case. 1252 if (Instruction *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) { 1253 if (TheLoop->contains(Ptr)) { 1254 reportVectorizationFailure( 1255 "Invariant address is calculated inside the loop", 1256 "write to a loop invariant address could not " 1257 "be vectorized", 1258 "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop); 1259 return false; 1260 } 1261 } 1262 } 1263 1264 if (LAI->hasStoreStoreDependenceInvolvingLoopInvariantAddress()) { 1265 // For each invariant address, check its last stored value is the result 1266 // of one of our reductions. 1267 // 1268 // We do not check if dependence with loads exists because that is already 1269 // checked via hasLoadStoreDependenceInvolvingLoopInvariantAddress. 1270 ScalarEvolution *SE = PSE.getSE(); 1271 SmallVector<StoreInst *, 4> UnhandledStores; 1272 for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) { 1273 if (isInvariantStoreOfReduction(SI)) { 1274 // Earlier stores to this address are effectively deadcode. 1275 // With opaque pointers it is possible for one pointer to be used with 1276 // different sizes of stored values: 1277 // store i32 0, ptr %x 1278 // store i8 0, ptr %x 1279 // The latest store doesn't complitely overwrite the first one in the 1280 // example. That is why we have to make sure that types of stored 1281 // values are same. 1282 // TODO: Check that bitwidth of unhandled store is smaller then the 1283 // one that overwrites it and add a test. 1284 erase_if(UnhandledStores, [SE, SI](StoreInst *I) { 1285 return storeToSameAddress(SE, SI, I) && 1286 I->getValueOperand()->getType() == 1287 SI->getValueOperand()->getType(); 1288 }); 1289 continue; 1290 } 1291 UnhandledStores.push_back(SI); 1292 } 1293 1294 bool IsOK = UnhandledStores.empty(); 1295 // TODO: we should also validate against InvariantMemSets. 1296 if (!IsOK) { 1297 reportVectorizationFailure( 1298 "We don't allow storing to uniform addresses", 1299 "write to a loop invariant address could not " 1300 "be vectorized", 1301 "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop); 1302 return false; 1303 } 1304 } 1305 } 1306 1307 PSE.addPredicate(LAI->getPSE().getPredicate()); 1308 return true; 1309 } 1310 1311 bool LoopVectorizationLegality::canVectorizeFPMath( 1312 bool EnableStrictReductions) { 1313 1314 // First check if there is any ExactFP math or if we allow reassociations 1315 if (!Requirements->getExactFPInst() || Hints->allowReordering()) 1316 return true; 1317 1318 // If the above is false, we have ExactFPMath & do not allow reordering. 1319 // If the EnableStrictReductions flag is set, first check if we have any 1320 // Exact FP induction vars, which we cannot vectorize. 1321 if (!EnableStrictReductions || 1322 any_of(getInductionVars(), [&](auto &Induction) -> bool { 1323 InductionDescriptor IndDesc = Induction.second; 1324 return IndDesc.getExactFPMathInst(); 1325 })) 1326 return false; 1327 1328 // We can now only vectorize if all reductions with Exact FP math also 1329 // have the isOrdered flag set, which indicates that we can move the 1330 // reduction operations in-loop. 1331 return (all_of(getReductionVars(), [&](auto &Reduction) -> bool { 1332 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1333 return !RdxDesc.hasExactFPMath() || RdxDesc.isOrdered(); 1334 })); 1335 } 1336 1337 bool LoopVectorizationLegality::isInvariantStoreOfReduction(StoreInst *SI) { 1338 return any_of(getReductionVars(), [&](auto &Reduction) -> bool { 1339 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1340 return RdxDesc.IntermediateStore == SI; 1341 }); 1342 } 1343 1344 bool LoopVectorizationLegality::isInvariantAddressOfReduction(Value *V) { 1345 return any_of(getReductionVars(), [&](auto &Reduction) -> bool { 1346 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1347 if (!RdxDesc.IntermediateStore) 1348 return false; 1349 1350 ScalarEvolution *SE = PSE.getSE(); 1351 Value *InvariantAddress = RdxDesc.IntermediateStore->getPointerOperand(); 1352 return V == InvariantAddress || 1353 SE->getSCEV(V) == SE->getSCEV(InvariantAddress); 1354 }); 1355 } 1356 1357 bool LoopVectorizationLegality::isInductionPhi(const Value *V) const { 1358 Value *In0 = const_cast<Value *>(V); 1359 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 1360 if (!PN) 1361 return false; 1362 1363 return Inductions.count(PN); 1364 } 1365 1366 const InductionDescriptor * 1367 LoopVectorizationLegality::getIntOrFpInductionDescriptor(PHINode *Phi) const { 1368 if (!isInductionPhi(Phi)) 1369 return nullptr; 1370 auto &ID = getInductionVars().find(Phi)->second; 1371 if (ID.getKind() == InductionDescriptor::IK_IntInduction || 1372 ID.getKind() == InductionDescriptor::IK_FpInduction) 1373 return &ID; 1374 return nullptr; 1375 } 1376 1377 const InductionDescriptor * 1378 LoopVectorizationLegality::getPointerInductionDescriptor(PHINode *Phi) const { 1379 if (!isInductionPhi(Phi)) 1380 return nullptr; 1381 auto &ID = getInductionVars().find(Phi)->second; 1382 if (ID.getKind() == InductionDescriptor::IK_PtrInduction) 1383 return &ID; 1384 return nullptr; 1385 } 1386 1387 bool LoopVectorizationLegality::isCastedInductionVariable( 1388 const Value *V) const { 1389 auto *Inst = dyn_cast<Instruction>(V); 1390 return (Inst && InductionCastsToIgnore.count(Inst)); 1391 } 1392 1393 bool LoopVectorizationLegality::isInductionVariable(const Value *V) const { 1394 return isInductionPhi(V) || isCastedInductionVariable(V); 1395 } 1396 1397 bool LoopVectorizationLegality::isFixedOrderRecurrence( 1398 const PHINode *Phi) const { 1399 return FixedOrderRecurrences.count(Phi); 1400 } 1401 1402 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) const { 1403 // When vectorizing early exits, create predicates for the latch block only. 1404 // The early exiting block must be a direct predecessor of the latch at the 1405 // moment. 1406 BasicBlock *Latch = TheLoop->getLoopLatch(); 1407 if (hasUncountableEarlyExit()) { 1408 assert( 1409 is_contained(predecessors(Latch), getUncountableEarlyExitingBlock()) && 1410 "Uncountable exiting block must be a direct predecessor of latch"); 1411 return BB == Latch; 1412 } 1413 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1414 } 1415 1416 bool LoopVectorizationLegality::blockCanBePredicated( 1417 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs, 1418 SmallPtrSetImpl<const Instruction *> &MaskedOp) const { 1419 for (Instruction &I : *BB) { 1420 // We can predicate blocks with calls to assume, as long as we drop them in 1421 // case we flatten the CFG via predication. 1422 if (match(&I, m_Intrinsic<Intrinsic::assume>())) { 1423 MaskedOp.insert(&I); 1424 continue; 1425 } 1426 1427 // Do not let llvm.experimental.noalias.scope.decl block the vectorization. 1428 // TODO: there might be cases that it should block the vectorization. Let's 1429 // ignore those for now. 1430 if (isa<NoAliasScopeDeclInst>(&I)) 1431 continue; 1432 1433 // We can allow masked calls if there's at least one vector variant, even 1434 // if we end up scalarizing due to the cost model calculations. 1435 // TODO: Allow other calls if they have appropriate attributes... readonly 1436 // and argmemonly? 1437 if (CallInst *CI = dyn_cast<CallInst>(&I)) 1438 if (VFDatabase::hasMaskedVariant(*CI)) { 1439 MaskedOp.insert(CI); 1440 continue; 1441 } 1442 1443 // Loads are handled via masking (or speculated if safe to do so.) 1444 if (auto *LI = dyn_cast<LoadInst>(&I)) { 1445 if (!SafePtrs.count(LI->getPointerOperand())) 1446 MaskedOp.insert(LI); 1447 continue; 1448 } 1449 1450 // Predicated store requires some form of masking: 1451 // 1) masked store HW instruction, 1452 // 2) emulation via load-blend-store (only if safe and legal to do so, 1453 // be aware on the race conditions), or 1454 // 3) element-by-element predicate check and scalar store. 1455 if (auto *SI = dyn_cast<StoreInst>(&I)) { 1456 MaskedOp.insert(SI); 1457 continue; 1458 } 1459 1460 if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow()) 1461 return false; 1462 } 1463 1464 return true; 1465 } 1466 1467 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 1468 if (!EnableIfConversion) { 1469 reportVectorizationFailure("If-conversion is disabled", 1470 "IfConversionDisabled", ORE, TheLoop); 1471 return false; 1472 } 1473 1474 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 1475 1476 // A list of pointers which are known to be dereferenceable within scope of 1477 // the loop body for each iteration of the loop which executes. That is, 1478 // the memory pointed to can be dereferenced (with the access size implied by 1479 // the value's type) unconditionally within the loop header without 1480 // introducing a new fault. 1481 SmallPtrSet<Value *, 8> SafePointers; 1482 1483 // Collect safe addresses. 1484 for (BasicBlock *BB : TheLoop->blocks()) { 1485 if (!blockNeedsPredication(BB)) { 1486 for (Instruction &I : *BB) 1487 if (auto *Ptr = getLoadStorePointerOperand(&I)) 1488 SafePointers.insert(Ptr); 1489 continue; 1490 } 1491 1492 // For a block which requires predication, a address may be safe to access 1493 // in the loop w/o predication if we can prove dereferenceability facts 1494 // sufficient to ensure it'll never fault within the loop. For the moment, 1495 // we restrict this to loads; stores are more complicated due to 1496 // concurrency restrictions. 1497 ScalarEvolution &SE = *PSE.getSE(); 1498 SmallVector<const SCEVPredicate *, 4> Predicates; 1499 for (Instruction &I : *BB) { 1500 LoadInst *LI = dyn_cast<LoadInst>(&I); 1501 // Pass the Predicates pointer to isDereferenceableAndAlignedInLoop so 1502 // that it will consider loops that need guarding by SCEV checks. The 1503 // vectoriser will generate these checks if we decide to vectorise. 1504 if (LI && !LI->getType()->isVectorTy() && !mustSuppressSpeculation(*LI) && 1505 isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT, AC, 1506 &Predicates)) 1507 SafePointers.insert(LI->getPointerOperand()); 1508 Predicates.clear(); 1509 } 1510 } 1511 1512 // Collect the blocks that need predication. 1513 for (BasicBlock *BB : TheLoop->blocks()) { 1514 // We support only branches and switch statements as terminators inside the 1515 // loop. 1516 if (isa<SwitchInst>(BB->getTerminator())) { 1517 if (TheLoop->isLoopExiting(BB)) { 1518 reportVectorizationFailure("Loop contains an unsupported switch", 1519 "LoopContainsUnsupportedSwitch", ORE, 1520 TheLoop, BB->getTerminator()); 1521 return false; 1522 } 1523 } else if (!isa<BranchInst>(BB->getTerminator())) { 1524 reportVectorizationFailure("Loop contains an unsupported terminator", 1525 "LoopContainsUnsupportedTerminator", ORE, 1526 TheLoop, BB->getTerminator()); 1527 return false; 1528 } 1529 1530 // We must be able to predicate all blocks that need to be predicated. 1531 if (blockNeedsPredication(BB) && 1532 !blockCanBePredicated(BB, SafePointers, MaskedOp)) { 1533 reportVectorizationFailure( 1534 "Control flow cannot be substituted for a select", "NoCFGForSelect", 1535 ORE, TheLoop, BB->getTerminator()); 1536 return false; 1537 } 1538 } 1539 1540 // We can if-convert this loop. 1541 return true; 1542 } 1543 1544 // Helper function to canVectorizeLoopNestCFG. 1545 bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp, 1546 bool UseVPlanNativePath) { 1547 assert((UseVPlanNativePath || Lp->isInnermost()) && 1548 "VPlan-native path is not enabled."); 1549 1550 // TODO: ORE should be improved to show more accurate information when an 1551 // outer loop can't be vectorized because a nested loop is not understood or 1552 // legal. Something like: "outer_loop_location: loop not vectorized: 1553 // (inner_loop_location) loop control flow is not understood by vectorizer". 1554 1555 // Store the result and return it at the end instead of exiting early, in case 1556 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 1557 bool Result = true; 1558 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 1559 1560 // We must have a loop in canonical form. Loops with indirectbr in them cannot 1561 // be canonicalized. 1562 if (!Lp->getLoopPreheader()) { 1563 reportVectorizationFailure("Loop doesn't have a legal pre-header", 1564 "loop control flow is not understood by vectorizer", 1565 "CFGNotUnderstood", ORE, TheLoop); 1566 if (DoExtraAnalysis) 1567 Result = false; 1568 else 1569 return false; 1570 } 1571 1572 // We must have a single backedge. 1573 if (Lp->getNumBackEdges() != 1) { 1574 reportVectorizationFailure("The loop must have a single backedge", 1575 "loop control flow is not understood by vectorizer", 1576 "CFGNotUnderstood", ORE, TheLoop); 1577 if (DoExtraAnalysis) 1578 Result = false; 1579 else 1580 return false; 1581 } 1582 1583 return Result; 1584 } 1585 1586 bool LoopVectorizationLegality::canVectorizeLoopNestCFG( 1587 Loop *Lp, bool UseVPlanNativePath) { 1588 // Store the result and return it at the end instead of exiting early, in case 1589 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 1590 bool Result = true; 1591 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 1592 if (!canVectorizeLoopCFG(Lp, UseVPlanNativePath)) { 1593 if (DoExtraAnalysis) 1594 Result = false; 1595 else 1596 return false; 1597 } 1598 1599 // Recursively check whether the loop control flow of nested loops is 1600 // understood. 1601 for (Loop *SubLp : *Lp) 1602 if (!canVectorizeLoopNestCFG(SubLp, UseVPlanNativePath)) { 1603 if (DoExtraAnalysis) 1604 Result = false; 1605 else 1606 return false; 1607 } 1608 1609 return Result; 1610 } 1611 1612 bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() { 1613 BasicBlock *LatchBB = TheLoop->getLoopLatch(); 1614 if (!LatchBB) { 1615 reportVectorizationFailure("Loop does not have a latch", 1616 "Cannot vectorize early exit loop", 1617 "NoLatchEarlyExit", ORE, TheLoop); 1618 return false; 1619 } 1620 1621 if (Reductions.size() || FixedOrderRecurrences.size()) { 1622 reportVectorizationFailure( 1623 "Found reductions or recurrences in early-exit loop", 1624 "Cannot vectorize early exit loop with reductions or recurrences", 1625 "RecurrencesInEarlyExitLoop", ORE, TheLoop); 1626 return false; 1627 } 1628 1629 SmallVector<BasicBlock *, 8> ExitingBlocks; 1630 TheLoop->getExitingBlocks(ExitingBlocks); 1631 1632 // Keep a record of all the exiting blocks. 1633 SmallVector<const SCEVPredicate *, 4> Predicates; 1634 std::optional<std::pair<BasicBlock *, BasicBlock *>> SingleUncountableEdge; 1635 for (BasicBlock *BB : ExitingBlocks) { 1636 const SCEV *EC = 1637 PSE.getSE()->getPredicatedExitCount(TheLoop, BB, &Predicates); 1638 if (isa<SCEVCouldNotCompute>(EC)) { 1639 SmallVector<BasicBlock *, 2> Succs(successors(BB)); 1640 if (Succs.size() != 2) { 1641 reportVectorizationFailure( 1642 "Early exiting block does not have exactly two successors", 1643 "Incorrect number of successors from early exiting block", 1644 "EarlyExitTooManySuccessors", ORE, TheLoop); 1645 return false; 1646 } 1647 1648 BasicBlock *ExitBlock; 1649 if (!TheLoop->contains(Succs[0])) 1650 ExitBlock = Succs[0]; 1651 else { 1652 assert(!TheLoop->contains(Succs[1])); 1653 ExitBlock = Succs[1]; 1654 } 1655 1656 if (SingleUncountableEdge) { 1657 reportVectorizationFailure( 1658 "Loop has too many uncountable exits", 1659 "Cannot vectorize early exit loop with more than one early exit", 1660 "TooManyUncountableEarlyExits", ORE, TheLoop); 1661 return false; 1662 } 1663 1664 SingleUncountableEdge = {BB, ExitBlock}; 1665 } else 1666 CountableExitingBlocks.push_back(BB); 1667 } 1668 // We can safely ignore the predicates here because when vectorizing the loop 1669 // the PredicatatedScalarEvolution class will keep track of all predicates 1670 // for each exiting block anyway. This happens when calling 1671 // PSE.getSymbolicMaxBackedgeTakenCount() below. 1672 Predicates.clear(); 1673 1674 if (!SingleUncountableEdge) { 1675 LLVM_DEBUG(dbgs() << "LV: Cound not find any uncountable exits"); 1676 return false; 1677 } 1678 1679 // The only supported early exit loops so far are ones where the early 1680 // exiting block is a unique predecessor of the latch block. 1681 BasicBlock *LatchPredBB = LatchBB->getUniquePredecessor(); 1682 if (LatchPredBB != SingleUncountableEdge->first) { 1683 reportVectorizationFailure("Early exit is not the latch predecessor", 1684 "Cannot vectorize early exit loop", 1685 "EarlyExitNotLatchPredecessor", ORE, TheLoop); 1686 return false; 1687 } 1688 1689 // The latch block must have a countable exit. 1690 if (isa<SCEVCouldNotCompute>( 1691 PSE.getSE()->getPredicatedExitCount(TheLoop, LatchBB, &Predicates))) { 1692 reportVectorizationFailure( 1693 "Cannot determine exact exit count for latch block", 1694 "Cannot vectorize early exit loop", 1695 "UnknownLatchExitCountEarlyExitLoop", ORE, TheLoop); 1696 return false; 1697 } 1698 assert(llvm::is_contained(CountableExitingBlocks, LatchBB) && 1699 "Latch block not found in list of countable exits!"); 1700 1701 // Check to see if there are instructions that could potentially generate 1702 // exceptions or have side-effects. 1703 auto IsSafeOperation = [](Instruction *I) -> bool { 1704 switch (I->getOpcode()) { 1705 case Instruction::Load: 1706 case Instruction::Store: 1707 case Instruction::PHI: 1708 case Instruction::Br: 1709 // These are checked separately. 1710 return true; 1711 default: 1712 return isSafeToSpeculativelyExecute(I); 1713 } 1714 }; 1715 1716 for (auto *BB : TheLoop->blocks()) 1717 for (auto &I : *BB) { 1718 if (I.mayWriteToMemory()) { 1719 // We don't support writes to memory. 1720 reportVectorizationFailure( 1721 "Writes to memory unsupported in early exit loops", 1722 "Cannot vectorize early exit loop with writes to memory", 1723 "WritesInEarlyExitLoop", ORE, TheLoop); 1724 return false; 1725 } else if (!IsSafeOperation(&I)) { 1726 reportVectorizationFailure("Early exit loop contains operations that " 1727 "cannot be speculatively executed", 1728 "UnsafeOperationsEarlyExitLoop", ORE, 1729 TheLoop); 1730 return false; 1731 } 1732 } 1733 1734 // The vectoriser cannot handle loads that occur after the early exit block. 1735 assert(LatchBB->getUniquePredecessor() == SingleUncountableEdge->first && 1736 "Expected latch predecessor to be the early exiting block"); 1737 1738 // TODO: Handle loops that may fault. 1739 Predicates.clear(); 1740 if (!isDereferenceableReadOnlyLoop(TheLoop, PSE.getSE(), DT, AC, 1741 &Predicates)) { 1742 reportVectorizationFailure( 1743 "Loop may fault", 1744 "Cannot vectorize potentially faulting early exit loop", 1745 "PotentiallyFaultingEarlyExitLoop", ORE, TheLoop); 1746 return false; 1747 } 1748 1749 [[maybe_unused]] const SCEV *SymbolicMaxBTC = 1750 PSE.getSymbolicMaxBackedgeTakenCount(); 1751 // Since we have an exact exit count for the latch and the early exit 1752 // dominates the latch, then this should guarantee a computed SCEV value. 1753 assert(!isa<SCEVCouldNotCompute>(SymbolicMaxBTC) && 1754 "Failed to get symbolic expression for backedge taken count"); 1755 LLVM_DEBUG(dbgs() << "LV: Found an early exit loop with symbolic max " 1756 "backedge taken count: " 1757 << *SymbolicMaxBTC << '\n'); 1758 UncountableEdge = SingleUncountableEdge; 1759 return true; 1760 } 1761 1762 bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { 1763 // Store the result and return it at the end instead of exiting early, in case 1764 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 1765 bool Result = true; 1766 1767 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 1768 // Check whether the loop-related control flow in the loop nest is expected by 1769 // vectorizer. 1770 if (!canVectorizeLoopNestCFG(TheLoop, UseVPlanNativePath)) { 1771 if (DoExtraAnalysis) { 1772 LLVM_DEBUG(dbgs() << "LV: legality check failed: loop nest"); 1773 Result = false; 1774 } else { 1775 return false; 1776 } 1777 } 1778 1779 // We need to have a loop header. 1780 LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 1781 << '\n'); 1782 1783 // Specific checks for outer loops. We skip the remaining legal checks at this 1784 // point because they don't support outer loops. 1785 if (!TheLoop->isInnermost()) { 1786 assert(UseVPlanNativePath && "VPlan-native path is not enabled."); 1787 1788 if (!canVectorizeOuterLoop()) { 1789 reportVectorizationFailure("Unsupported outer loop", 1790 "UnsupportedOuterLoop", ORE, TheLoop); 1791 // TODO: Implement DoExtraAnalysis when subsequent legal checks support 1792 // outer loops. 1793 return false; 1794 } 1795 1796 LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n"); 1797 return Result; 1798 } 1799 1800 assert(TheLoop->isInnermost() && "Inner loop expected."); 1801 // Check if we can if-convert non-single-bb loops. 1802 unsigned NumBlocks = TheLoop->getNumBlocks(); 1803 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 1804 LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 1805 if (DoExtraAnalysis) 1806 Result = false; 1807 else 1808 return false; 1809 } 1810 1811 // Check if we can vectorize the instructions and CFG in this loop. 1812 if (!canVectorizeInstrs()) { 1813 LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 1814 if (DoExtraAnalysis) 1815 Result = false; 1816 else 1817 return false; 1818 } 1819 1820 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 1821 if (TheLoop->getExitingBlock()) { 1822 reportVectorizationFailure("Cannot vectorize uncountable loop", 1823 "UnsupportedUncountableLoop", ORE, TheLoop); 1824 if (DoExtraAnalysis) 1825 Result = false; 1826 else 1827 return false; 1828 } else { 1829 if (!isVectorizableEarlyExitLoop()) { 1830 UncountableEdge = std::nullopt; 1831 if (DoExtraAnalysis) 1832 Result = false; 1833 else 1834 return false; 1835 } 1836 } 1837 } 1838 1839 // Go over each instruction and look at memory deps. 1840 if (!canVectorizeMemory()) { 1841 LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 1842 if (DoExtraAnalysis) 1843 Result = false; 1844 else 1845 return false; 1846 } 1847 1848 if (Result) { 1849 LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop" 1850 << (LAI->getRuntimePointerChecking()->Need 1851 ? " (with a runtime bound check)" 1852 : "") 1853 << "!\n"); 1854 } 1855 1856 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 1857 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 1858 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 1859 1860 if (PSE.getPredicate().getComplexity() > SCEVThreshold) { 1861 LLVM_DEBUG(dbgs() << "LV: Vectorization not profitable " 1862 "due to SCEVThreshold"); 1863 reportVectorizationFailure("Too many SCEV checks needed", 1864 "Too many SCEV assumptions need to be made and checked at runtime", 1865 "TooManySCEVRunTimeChecks", ORE, TheLoop); 1866 if (DoExtraAnalysis) 1867 Result = false; 1868 else 1869 return false; 1870 } 1871 1872 // Okay! We've done all the tests. If any have failed, return false. Otherwise 1873 // we can vectorize, and at this point we don't have any other mem analysis 1874 // which may limit our maximum vectorization factor, so just return true with 1875 // no restrictions. 1876 return Result; 1877 } 1878 1879 bool LoopVectorizationLegality::canFoldTailByMasking() const { 1880 1881 LLVM_DEBUG(dbgs() << "LV: checking if tail can be folded by masking.\n"); 1882 1883 SmallPtrSet<const Value *, 8> ReductionLiveOuts; 1884 1885 for (const auto &Reduction : getReductionVars()) 1886 ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr()); 1887 1888 // TODO: handle non-reduction outside users when tail is folded by masking. 1889 for (auto *AE : AllowedExit) { 1890 // Check that all users of allowed exit values are inside the loop or 1891 // are the live-out of a reduction. 1892 if (ReductionLiveOuts.count(AE)) 1893 continue; 1894 for (User *U : AE->users()) { 1895 Instruction *UI = cast<Instruction>(U); 1896 if (TheLoop->contains(UI)) 1897 continue; 1898 LLVM_DEBUG( 1899 dbgs() 1900 << "LV: Cannot fold tail by masking, loop has an outside user for " 1901 << *UI << "\n"); 1902 return false; 1903 } 1904 } 1905 1906 for (const auto &Entry : getInductionVars()) { 1907 PHINode *OrigPhi = Entry.first; 1908 for (User *U : OrigPhi->users()) { 1909 auto *UI = cast<Instruction>(U); 1910 if (!TheLoop->contains(UI)) { 1911 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking, loop IV has an " 1912 "outside user for " 1913 << *UI << "\n"); 1914 return false; 1915 } 1916 } 1917 } 1918 1919 // The list of pointers that we can safely read and write to remains empty. 1920 SmallPtrSet<Value *, 8> SafePointers; 1921 1922 // Check all blocks for predication, including those that ordinarily do not 1923 // need predication such as the header block. 1924 SmallPtrSet<const Instruction *, 8> TmpMaskedOp; 1925 for (BasicBlock *BB : TheLoop->blocks()) { 1926 if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp)) { 1927 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking.\n"); 1928 return false; 1929 } 1930 } 1931 1932 LLVM_DEBUG(dbgs() << "LV: can fold tail by masking.\n"); 1933 1934 return true; 1935 } 1936 1937 void LoopVectorizationLegality::prepareToFoldTailByMasking() { 1938 // The list of pointers that we can safely read and write to remains empty. 1939 SmallPtrSet<Value *, 8> SafePointers; 1940 1941 // Mark all blocks for predication, including those that ordinarily do not 1942 // need predication such as the header block. 1943 for (BasicBlock *BB : TheLoop->blocks()) { 1944 [[maybe_unused]] bool R = blockCanBePredicated(BB, SafePointers, MaskedOp); 1945 assert(R && "Must be able to predicate block when tail-folding."); 1946 } 1947 } 1948 1949 } // namespace llvm 1950