1 //===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements a set of utility VPlan to VPlan transformations. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "VPlanTransforms.h" 15 #include "VPRecipeBuilder.h" 16 #include "VPlan.h" 17 #include "VPlanAnalysis.h" 18 #include "VPlanCFG.h" 19 #include "VPlanDominatorTree.h" 20 #include "VPlanPatternMatch.h" 21 #include "VPlanUtils.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/TypeSwitch.h" 26 #include "llvm/Analysis/IVDescriptors.h" 27 #include "llvm/Analysis/VectorUtils.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/PatternMatch.h" 30 31 using namespace llvm; 32 33 void VPlanTransforms::VPInstructionsToVPRecipes( 34 VPlanPtr &Plan, 35 function_ref<const InductionDescriptor *(PHINode *)> 36 GetIntOrFpInductionDescriptor, 37 ScalarEvolution &SE, const TargetLibraryInfo &TLI) { 38 39 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( 40 Plan->getVectorLoopRegion()); 41 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) { 42 // Skip blocks outside region 43 if (!VPBB->getParent()) 44 break; 45 VPRecipeBase *Term = VPBB->getTerminator(); 46 auto EndIter = Term ? Term->getIterator() : VPBB->end(); 47 // Introduce each ingredient into VPlan. 48 for (VPRecipeBase &Ingredient : 49 make_early_inc_range(make_range(VPBB->begin(), EndIter))) { 50 51 VPValue *VPV = Ingredient.getVPSingleValue(); 52 Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue()); 53 54 VPRecipeBase *NewRecipe = nullptr; 55 if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) { 56 auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue()); 57 const auto *II = GetIntOrFpInductionDescriptor(Phi); 58 if (!II) 59 continue; 60 61 VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue()); 62 VPValue *Step = 63 vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE); 64 NewRecipe = new VPWidenIntOrFpInductionRecipe( 65 Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc()); 66 } else { 67 assert(isa<VPInstruction>(&Ingredient) && 68 "only VPInstructions expected here"); 69 assert(!isa<PHINode>(Inst) && "phis should be handled above"); 70 // Create VPWidenMemoryRecipe for loads and stores. 71 if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 72 NewRecipe = new VPWidenLoadRecipe( 73 *Load, Ingredient.getOperand(0), nullptr /*Mask*/, 74 false /*Consecutive*/, false /*Reverse*/, 75 Ingredient.getDebugLoc()); 76 } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 77 NewRecipe = new VPWidenStoreRecipe( 78 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0), 79 nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/, 80 Ingredient.getDebugLoc()); 81 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 82 NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands()); 83 } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) { 84 NewRecipe = new VPWidenIntrinsicRecipe( 85 *CI, getVectorIntrinsicIDForCall(CI, &TLI), 86 {Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(), 87 CI->getDebugLoc()); 88 } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) { 89 NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands()); 90 } else if (auto *CI = dyn_cast<CastInst>(Inst)) { 91 NewRecipe = new VPWidenCastRecipe( 92 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI); 93 } else { 94 NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands()); 95 } 96 } 97 98 NewRecipe->insertBefore(&Ingredient); 99 if (NewRecipe->getNumDefinedValues() == 1) 100 VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue()); 101 else 102 assert(NewRecipe->getNumDefinedValues() == 0 && 103 "Only recpies with zero or one defined values expected"); 104 Ingredient.eraseFromParent(); 105 } 106 } 107 } 108 109 static bool sinkScalarOperands(VPlan &Plan) { 110 auto Iter = vp_depth_first_deep(Plan.getEntry()); 111 bool Changed = false; 112 // First, collect the operands of all recipes in replicate blocks as seeds for 113 // sinking. 114 SetVector<std::pair<VPBasicBlock *, VPSingleDefRecipe *>> WorkList; 115 for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) { 116 VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock(); 117 if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2) 118 continue; 119 VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]); 120 if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock()) 121 continue; 122 for (auto &Recipe : *VPBB) { 123 for (VPValue *Op : Recipe.operands()) 124 if (auto *Def = 125 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe())) 126 WorkList.insert(std::make_pair(VPBB, Def)); 127 } 128 } 129 130 bool ScalarVFOnly = Plan.hasScalarVFOnly(); 131 // Try to sink each replicate or scalar IV steps recipe in the worklist. 132 for (unsigned I = 0; I != WorkList.size(); ++I) { 133 VPBasicBlock *SinkTo; 134 VPSingleDefRecipe *SinkCandidate; 135 std::tie(SinkTo, SinkCandidate) = WorkList[I]; 136 if (SinkCandidate->getParent() == SinkTo || 137 SinkCandidate->mayHaveSideEffects() || 138 SinkCandidate->mayReadOrWriteMemory()) 139 continue; 140 if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) { 141 if (!ScalarVFOnly && RepR->isUniform()) 142 continue; 143 } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate)) 144 continue; 145 146 bool NeedsDuplicating = false; 147 // All recipe users of the sink candidate must be in the same block SinkTo 148 // or all users outside of SinkTo must be uniform-after-vectorization ( 149 // i.e., only first lane is used) . In the latter case, we need to duplicate 150 // SinkCandidate. 151 auto CanSinkWithUser = [SinkTo, &NeedsDuplicating, 152 SinkCandidate](VPUser *U) { 153 auto *UI = cast<VPRecipeBase>(U); 154 if (UI->getParent() == SinkTo) 155 return true; 156 NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate); 157 // We only know how to duplicate VPRecipeRecipes for now. 158 return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate); 159 }; 160 if (!all_of(SinkCandidate->users(), CanSinkWithUser)) 161 continue; 162 163 if (NeedsDuplicating) { 164 if (ScalarVFOnly) 165 continue; 166 Instruction *I = SinkCandidate->getUnderlyingInstr(); 167 auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true); 168 // TODO: add ".cloned" suffix to name of Clone's VPValue. 169 170 Clone->insertBefore(SinkCandidate); 171 SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) { 172 return cast<VPRecipeBase>(&U)->getParent() != SinkTo; 173 }); 174 } 175 SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi()); 176 for (VPValue *Op : SinkCandidate->operands()) 177 if (auto *Def = 178 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe())) 179 WorkList.insert(std::make_pair(SinkTo, Def)); 180 Changed = true; 181 } 182 return Changed; 183 } 184 185 /// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return 186 /// the mask. 187 VPValue *getPredicatedMask(VPRegionBlock *R) { 188 auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry()); 189 if (!EntryBB || EntryBB->size() != 1 || 190 !isa<VPBranchOnMaskRecipe>(EntryBB->begin())) 191 return nullptr; 192 193 return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0); 194 } 195 196 /// If \p R is a triangle region, return the 'then' block of the triangle. 197 static VPBasicBlock *getPredicatedThenBlock(VPRegionBlock *R) { 198 auto *EntryBB = cast<VPBasicBlock>(R->getEntry()); 199 if (EntryBB->getNumSuccessors() != 2) 200 return nullptr; 201 202 auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]); 203 auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]); 204 if (!Succ0 || !Succ1) 205 return nullptr; 206 207 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1) 208 return nullptr; 209 if (Succ0->getSingleSuccessor() == Succ1) 210 return Succ0; 211 if (Succ1->getSingleSuccessor() == Succ0) 212 return Succ1; 213 return nullptr; 214 } 215 216 // Merge replicate regions in their successor region, if a replicate region 217 // is connected to a successor replicate region with the same predicate by a 218 // single, empty VPBasicBlock. 219 static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan) { 220 SetVector<VPRegionBlock *> DeletedRegions; 221 222 // Collect replicate regions followed by an empty block, followed by another 223 // replicate region with matching masks to process front. This is to avoid 224 // iterator invalidation issues while merging regions. 225 SmallVector<VPRegionBlock *, 8> WorkList; 226 for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>( 227 vp_depth_first_deep(Plan.getEntry()))) { 228 if (!Region1->isReplicator()) 229 continue; 230 auto *MiddleBasicBlock = 231 dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor()); 232 if (!MiddleBasicBlock || !MiddleBasicBlock->empty()) 233 continue; 234 235 auto *Region2 = 236 dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor()); 237 if (!Region2 || !Region2->isReplicator()) 238 continue; 239 240 VPValue *Mask1 = getPredicatedMask(Region1); 241 VPValue *Mask2 = getPredicatedMask(Region2); 242 if (!Mask1 || Mask1 != Mask2) 243 continue; 244 245 assert(Mask1 && Mask2 && "both region must have conditions"); 246 WorkList.push_back(Region1); 247 } 248 249 // Move recipes from Region1 to its successor region, if both are triangles. 250 for (VPRegionBlock *Region1 : WorkList) { 251 if (DeletedRegions.contains(Region1)) 252 continue; 253 auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor()); 254 auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor()); 255 256 VPBasicBlock *Then1 = getPredicatedThenBlock(Region1); 257 VPBasicBlock *Then2 = getPredicatedThenBlock(Region2); 258 if (!Then1 || !Then2) 259 continue; 260 261 // Note: No fusion-preventing memory dependencies are expected in either 262 // region. Such dependencies should be rejected during earlier dependence 263 // checks, which guarantee accesses can be re-ordered for vectorization. 264 // 265 // Move recipes to the successor region. 266 for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1))) 267 ToMove.moveBefore(*Then2, Then2->getFirstNonPhi()); 268 269 auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor()); 270 auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor()); 271 272 // Move VPPredInstPHIRecipes from the merge block to the successor region's 273 // merge block. Update all users inside the successor region to use the 274 // original values. 275 for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) { 276 VPValue *PredInst1 = 277 cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0); 278 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue(); 279 Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) { 280 return cast<VPRecipeBase>(&U)->getParent() == Then2; 281 }); 282 283 // Remove phi recipes that are unused after merging the regions. 284 if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) { 285 Phi1ToMove.eraseFromParent(); 286 continue; 287 } 288 Phi1ToMove.moveBefore(*Merge2, Merge2->begin()); 289 } 290 291 // Finally, remove the first region. 292 for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) { 293 VPBlockUtils::disconnectBlocks(Pred, Region1); 294 VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock); 295 } 296 VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock); 297 DeletedRegions.insert(Region1); 298 } 299 300 for (VPRegionBlock *ToDelete : DeletedRegions) 301 delete ToDelete; 302 return !DeletedRegions.empty(); 303 } 304 305 static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe, 306 VPlan &Plan) { 307 Instruction *Instr = PredRecipe->getUnderlyingInstr(); 308 // Build the triangular if-then region. 309 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 310 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 311 auto *BlockInMask = PredRecipe->getMask(); 312 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 313 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 314 315 // Replace predicated replicate recipe with a replicate recipe without a 316 // mask but in the replicate region. 317 auto *RecipeWithoutMask = new VPReplicateRecipe( 318 PredRecipe->getUnderlyingInstr(), 319 make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())), 320 PredRecipe->isUniform()); 321 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask); 322 323 VPPredInstPHIRecipe *PHIRecipe = nullptr; 324 if (PredRecipe->getNumUsers() != 0) { 325 PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask, 326 RecipeWithoutMask->getDebugLoc()); 327 PredRecipe->replaceAllUsesWith(PHIRecipe); 328 PHIRecipe->setOperand(0, RecipeWithoutMask); 329 } 330 PredRecipe->eraseFromParent(); 331 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 332 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true); 333 334 // Note: first set Entry as region entry and then connect successors starting 335 // from it in order, to propagate the "parent" of each VPBasicBlock. 336 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry); 337 VPBlockUtils::connectBlocks(Pred, Exiting); 338 339 return Region; 340 } 341 342 static void addReplicateRegions(VPlan &Plan) { 343 SmallVector<VPReplicateRecipe *> WorkList; 344 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( 345 vp_depth_first_deep(Plan.getEntry()))) { 346 for (VPRecipeBase &R : *VPBB) 347 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) { 348 if (RepR->isPredicated()) 349 WorkList.push_back(RepR); 350 } 351 } 352 353 unsigned BBNum = 0; 354 for (VPReplicateRecipe *RepR : WorkList) { 355 VPBasicBlock *CurrentBlock = RepR->getParent(); 356 VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator()); 357 358 BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent(); 359 SplitBlock->setName( 360 OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : ""); 361 // Record predicated instructions for above packing optimizations. 362 VPBlockBase *Region = createReplicateRegion(RepR, Plan); 363 Region->setParent(CurrentBlock->getParent()); 364 VPBlockUtils::insertOnEdge(CurrentBlock, SplitBlock, Region); 365 } 366 } 367 368 /// Remove redundant VPBasicBlocks by merging them into their predecessor if 369 /// the predecessor has a single successor. 370 static bool mergeBlocksIntoPredecessors(VPlan &Plan) { 371 SmallVector<VPBasicBlock *> WorkList; 372 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( 373 vp_depth_first_deep(Plan.getEntry()))) { 374 // Don't fold the blocks in the skeleton of the Plan into their single 375 // predecessors for now. 376 // TODO: Remove restriction once more of the skeleton is modeled in VPlan. 377 if (!VPBB->getParent()) 378 continue; 379 auto *PredVPBB = 380 dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor()); 381 if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 || 382 isa<VPIRBasicBlock>(PredVPBB)) 383 continue; 384 WorkList.push_back(VPBB); 385 } 386 387 for (VPBasicBlock *VPBB : WorkList) { 388 VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor()); 389 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) 390 R.moveBefore(*PredVPBB, PredVPBB->end()); 391 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB); 392 auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent()); 393 if (ParentRegion && ParentRegion->getExiting() == VPBB) 394 ParentRegion->setExiting(PredVPBB); 395 for (auto *Succ : to_vector(VPBB->successors())) { 396 VPBlockUtils::disconnectBlocks(VPBB, Succ); 397 VPBlockUtils::connectBlocks(PredVPBB, Succ); 398 } 399 delete VPBB; 400 } 401 return !WorkList.empty(); 402 } 403 404 void VPlanTransforms::createAndOptimizeReplicateRegions(VPlan &Plan) { 405 // Convert masked VPReplicateRecipes to if-then region blocks. 406 addReplicateRegions(Plan); 407 408 bool ShouldSimplify = true; 409 while (ShouldSimplify) { 410 ShouldSimplify = sinkScalarOperands(Plan); 411 ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan); 412 ShouldSimplify |= mergeBlocksIntoPredecessors(Plan); 413 } 414 } 415 416 /// Remove redundant casts of inductions. 417 /// 418 /// Such redundant casts are casts of induction variables that can be ignored, 419 /// because we already proved that the casted phi is equal to the uncasted phi 420 /// in the vectorized loop. There is no need to vectorize the cast - the same 421 /// value can be used for both the phi and casts in the vector loop. 422 static void removeRedundantInductionCasts(VPlan &Plan) { 423 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 424 auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi); 425 if (!IV || IV->getTruncInst()) 426 continue; 427 428 // A sequence of IR Casts has potentially been recorded for IV, which 429 // *must be bypassed* when the IV is vectorized, because the vectorized IV 430 // will produce the desired casted value. This sequence forms a def-use 431 // chain and is provided in reverse order, ending with the cast that uses 432 // the IV phi. Search for the recipe of the last cast in the chain and 433 // replace it with the original IV. Note that only the final cast is 434 // expected to have users outside the cast-chain and the dead casts left 435 // over will be cleaned up later. 436 auto &Casts = IV->getInductionDescriptor().getCastInsts(); 437 VPValue *FindMyCast = IV; 438 for (Instruction *IRCast : reverse(Casts)) { 439 VPSingleDefRecipe *FoundUserCast = nullptr; 440 for (auto *U : FindMyCast->users()) { 441 auto *UserCast = dyn_cast<VPSingleDefRecipe>(U); 442 if (UserCast && UserCast->getUnderlyingValue() == IRCast) { 443 FoundUserCast = UserCast; 444 break; 445 } 446 } 447 FindMyCast = FoundUserCast; 448 } 449 FindMyCast->replaceAllUsesWith(IV); 450 } 451 } 452 453 /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV 454 /// recipe, if it exists. 455 static void removeRedundantCanonicalIVs(VPlan &Plan) { 456 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV(); 457 VPWidenCanonicalIVRecipe *WidenNewIV = nullptr; 458 for (VPUser *U : CanonicalIV->users()) { 459 WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U); 460 if (WidenNewIV) 461 break; 462 } 463 464 if (!WidenNewIV) 465 return; 466 467 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); 468 for (VPRecipeBase &Phi : HeaderVPBB->phis()) { 469 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi); 470 471 if (!WidenOriginalIV || !WidenOriginalIV->isCanonical()) 472 continue; 473 474 // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides 475 // everything WidenNewIV's users need. That is, WidenOriginalIV will 476 // generate a vector phi or all users of WidenNewIV demand the first lane 477 // only. 478 if (any_of(WidenOriginalIV->users(), 479 [WidenOriginalIV](VPUser *U) { 480 return !U->usesScalars(WidenOriginalIV); 481 }) || 482 vputils::onlyFirstLaneUsed(WidenNewIV)) { 483 WidenNewIV->replaceAllUsesWith(WidenOriginalIV); 484 WidenNewIV->eraseFromParent(); 485 return; 486 } 487 } 488 } 489 490 /// Returns true if \p R is dead and can be removed. 491 static bool isDeadRecipe(VPRecipeBase &R) { 492 using namespace llvm::PatternMatch; 493 // Do remove conditional assume instructions as their conditions may be 494 // flattened. 495 auto *RepR = dyn_cast<VPReplicateRecipe>(&R); 496 bool IsConditionalAssume = 497 RepR && RepR->isPredicated() && 498 match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>()); 499 if (IsConditionalAssume) 500 return true; 501 502 if (R.mayHaveSideEffects()) 503 return false; 504 505 // Recipe is dead if no user keeps the recipe alive. 506 return all_of(R.definedValues(), 507 [](VPValue *V) { return V->getNumUsers() == 0; }); 508 } 509 510 void VPlanTransforms::removeDeadRecipes(VPlan &Plan) { 511 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( 512 Plan.getEntry()); 513 514 for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) { 515 // The recipes in the block are processed in reverse order, to catch chains 516 // of dead recipes. 517 for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) { 518 if (isDeadRecipe(R)) 519 R.eraseFromParent(); 520 } 521 } 522 } 523 524 static VPScalarIVStepsRecipe * 525 createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, 526 Instruction::BinaryOps InductionOpcode, 527 FPMathOperator *FPBinOp, Instruction *TruncI, 528 VPValue *StartV, VPValue *Step, VPBuilder &Builder) { 529 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); 530 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV(); 531 VPSingleDefRecipe *BaseIV = Builder.createDerivedIV( 532 Kind, FPBinOp, StartV, CanonicalIV, Step, "offset.idx"); 533 534 // Truncate base induction if needed. 535 Type *CanonicalIVType = CanonicalIV->getScalarType(); 536 VPTypeAnalysis TypeInfo(CanonicalIVType); 537 Type *ResultTy = TypeInfo.inferScalarType(BaseIV); 538 if (TruncI) { 539 Type *TruncTy = TruncI->getType(); 540 assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() && 541 "Not truncating."); 542 assert(ResultTy->isIntegerTy() && "Truncation requires an integer type"); 543 BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy); 544 ResultTy = TruncTy; 545 } 546 547 // Truncate step if needed. 548 Type *StepTy = TypeInfo.inferScalarType(Step); 549 if (ResultTy != StepTy) { 550 assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() && 551 "Not truncating."); 552 assert(StepTy->isIntegerTy() && "Truncation requires an integer type"); 553 auto *VecPreheader = 554 cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor()); 555 VPBuilder::InsertPointGuard Guard(Builder); 556 Builder.setInsertPoint(VecPreheader); 557 Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy); 558 } 559 return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step); 560 } 561 562 /// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd 563 /// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as 564 /// VPWidenPointerInductionRecipe will generate vectors only. If some users 565 /// require vectors while other require scalars, the scalar uses need to extract 566 /// the scalars from the generated vectors (Note that this is different to how 567 /// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe, 568 /// if any of its users needs scalar values, by providing them scalar steps 569 /// built on the canonical scalar IV and update the original IV's users. This is 570 /// an optional optimization to reduce the needs of vector extracts. 571 static void legalizeAndOptimizeInductions(VPlan &Plan) { 572 SmallVector<VPRecipeBase *> ToRemove; 573 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); 574 bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1)); 575 VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 576 for (VPRecipeBase &Phi : HeaderVPBB->phis()) { 577 // Replace wide pointer inductions which have only their scalars used by 578 // PtrAdd(IndStart, ScalarIVSteps (0, Step)). 579 if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) { 580 if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF())) 581 continue; 582 583 const InductionDescriptor &ID = PtrIV->getInductionDescriptor(); 584 VPValue *StartV = 585 Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0)); 586 VPValue *StepV = PtrIV->getOperand(1); 587 VPScalarIVStepsRecipe *Steps = createScalarIVSteps( 588 Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr, 589 nullptr, StartV, StepV, Builder); 590 591 VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps, 592 PtrIV->getDebugLoc(), "next.gep"); 593 594 PtrIV->replaceAllUsesWith(PtrAdd); 595 continue; 596 } 597 598 // Replace widened induction with scalar steps for users that only use 599 // scalars. 600 auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi); 601 if (!WideIV) 602 continue; 603 if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) { 604 return U->usesScalars(WideIV); 605 })) 606 continue; 607 608 const InductionDescriptor &ID = WideIV->getInductionDescriptor(); 609 VPScalarIVStepsRecipe *Steps = createScalarIVSteps( 610 Plan, ID.getKind(), ID.getInductionOpcode(), 611 dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), 612 WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(), 613 Builder); 614 615 // Update scalar users of IV to use Step instead. 616 if (!HasOnlyVectorVFs) 617 WideIV->replaceAllUsesWith(Steps); 618 else 619 WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) { 620 return U.usesScalars(WideIV); 621 }); 622 } 623 } 624 625 /// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing 626 /// them with already existing recipes expanding the same SCEV expression. 627 static void removeRedundantExpandSCEVRecipes(VPlan &Plan) { 628 DenseMap<const SCEV *, VPValue *> SCEV2VPV; 629 630 for (VPRecipeBase &R : 631 make_early_inc_range(*Plan.getEntry()->getEntryBasicBlock())) { 632 auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R); 633 if (!ExpR) 634 continue; 635 636 auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR}); 637 if (I.second) 638 continue; 639 ExpR->replaceAllUsesWith(I.first->second); 640 ExpR->eraseFromParent(); 641 } 642 } 643 644 static void recursivelyDeleteDeadRecipes(VPValue *V) { 645 SmallVector<VPValue *> WorkList; 646 SmallPtrSet<VPValue *, 8> Seen; 647 WorkList.push_back(V); 648 649 while (!WorkList.empty()) { 650 VPValue *Cur = WorkList.pop_back_val(); 651 if (!Seen.insert(Cur).second) 652 continue; 653 VPRecipeBase *R = Cur->getDefiningRecipe(); 654 if (!R) 655 continue; 656 if (!isDeadRecipe(*R)) 657 continue; 658 WorkList.append(R->op_begin(), R->op_end()); 659 R->eraseFromParent(); 660 } 661 } 662 663 void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, 664 unsigned BestUF, 665 PredicatedScalarEvolution &PSE) { 666 assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan"); 667 assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan"); 668 VPBasicBlock *ExitingVPBB = 669 Plan.getVectorLoopRegion()->getExitingBasicBlock(); 670 auto *Term = &ExitingVPBB->back(); 671 // Try to simplify the branch condition if TC <= VF * UF when preparing to 672 // execute the plan for the main vector loop. We only do this if the 673 // terminator is: 674 // 1. BranchOnCount, or 675 // 2. BranchOnCond where the input is Not(ActiveLaneMask). 676 using namespace llvm::VPlanPatternMatch; 677 if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) && 678 !match(Term, 679 m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue()))))) 680 return; 681 682 ScalarEvolution &SE = *PSE.getSE(); 683 const SCEV *TripCount = 684 vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE); 685 assert(!isa<SCEVCouldNotCompute>(TripCount) && 686 "Trip count SCEV must be computable"); 687 ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF); 688 const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements); 689 if (TripCount->isZero() || 690 !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C)) 691 return; 692 693 LLVMContext &Ctx = SE.getContext(); 694 auto *BOC = new VPInstruction( 695 VPInstruction::BranchOnCond, 696 {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))}, Term->getDebugLoc()); 697 698 SmallVector<VPValue *> PossiblyDead(Term->operands()); 699 Term->eraseFromParent(); 700 for (VPValue *Op : PossiblyDead) 701 recursivelyDeleteDeadRecipes(Op); 702 ExitingVPBB->appendRecipe(BOC); 703 Plan.setVF(BestVF); 704 Plan.setUF(BestUF); 705 // TODO: Further simplifications are possible 706 // 1. Replace inductions with constants. 707 // 2. Replace vector loop region with VPBasicBlock. 708 } 709 710 /// Sink users of \p FOR after the recipe defining the previous value \p 711 /// Previous of the recurrence. \returns true if all users of \p FOR could be 712 /// re-arranged as needed or false if it is not possible. 713 static bool 714 sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, 715 VPRecipeBase *Previous, 716 VPDominatorTree &VPDT) { 717 // Collect recipes that need sinking. 718 SmallVector<VPRecipeBase *> WorkList; 719 SmallPtrSet<VPRecipeBase *, 8> Seen; 720 Seen.insert(Previous); 721 auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) { 722 // The previous value must not depend on the users of the recurrence phi. In 723 // that case, FOR is not a fixed order recurrence. 724 if (SinkCandidate == Previous) 725 return false; 726 727 if (isa<VPHeaderPHIRecipe>(SinkCandidate) || 728 !Seen.insert(SinkCandidate).second || 729 VPDT.properlyDominates(Previous, SinkCandidate)) 730 return true; 731 732 if (SinkCandidate->mayHaveSideEffects()) 733 return false; 734 735 WorkList.push_back(SinkCandidate); 736 return true; 737 }; 738 739 // Recursively sink users of FOR after Previous. 740 WorkList.push_back(FOR); 741 for (unsigned I = 0; I != WorkList.size(); ++I) { 742 VPRecipeBase *Current = WorkList[I]; 743 assert(Current->getNumDefinedValues() == 1 && 744 "only recipes with a single defined value expected"); 745 746 for (VPUser *User : Current->getVPSingleValue()->users()) { 747 if (!TryToPushSinkCandidate(cast<VPRecipeBase>(User))) 748 return false; 749 } 750 } 751 752 // Keep recipes to sink ordered by dominance so earlier instructions are 753 // processed first. 754 sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) { 755 return VPDT.properlyDominates(A, B); 756 }); 757 758 for (VPRecipeBase *SinkCandidate : WorkList) { 759 if (SinkCandidate == FOR) 760 continue; 761 762 SinkCandidate->moveAfter(Previous); 763 Previous = SinkCandidate; 764 } 765 return true; 766 } 767 768 /// Try to hoist \p Previous and its operands before all users of \p FOR. 769 static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR, 770 VPRecipeBase *Previous, 771 VPDominatorTree &VPDT) { 772 if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory()) 773 return false; 774 775 // Collect recipes that need hoisting. 776 SmallVector<VPRecipeBase *> HoistCandidates; 777 SmallPtrSet<VPRecipeBase *, 8> Visited; 778 VPRecipeBase *HoistPoint = nullptr; 779 // Find the closest hoist point by looking at all users of FOR and selecting 780 // the recipe dominating all other users. 781 for (VPUser *U : FOR->users()) { 782 auto *R = cast<VPRecipeBase>(U); 783 if (!HoistPoint || VPDT.properlyDominates(R, HoistPoint)) 784 HoistPoint = R; 785 } 786 assert(all_of(FOR->users(), 787 [&VPDT, HoistPoint](VPUser *U) { 788 auto *R = cast<VPRecipeBase>(U); 789 return HoistPoint == R || 790 VPDT.properlyDominates(HoistPoint, R); 791 }) && 792 "HoistPoint must dominate all users of FOR"); 793 794 auto NeedsHoisting = [HoistPoint, &VPDT, 795 &Visited](VPValue *HoistCandidateV) -> VPRecipeBase * { 796 VPRecipeBase *HoistCandidate = HoistCandidateV->getDefiningRecipe(); 797 if (!HoistCandidate) 798 return nullptr; 799 VPRegionBlock *EnclosingLoopRegion = 800 HoistCandidate->getParent()->getEnclosingLoopRegion(); 801 assert((!HoistCandidate->getParent()->getParent() || 802 HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) && 803 "CFG in VPlan should still be flat, without replicate regions"); 804 // Hoist candidate was already visited, no need to hoist. 805 if (!Visited.insert(HoistCandidate).second) 806 return nullptr; 807 808 // Candidate is outside loop region or a header phi, dominates FOR users w/o 809 // hoisting. 810 if (!EnclosingLoopRegion || isa<VPHeaderPHIRecipe>(HoistCandidate)) 811 return nullptr; 812 813 // If we reached a recipe that dominates HoistPoint, we don't need to 814 // hoist the recipe. 815 if (VPDT.properlyDominates(HoistCandidate, HoistPoint)) 816 return nullptr; 817 return HoistCandidate; 818 }; 819 auto CanHoist = [&](VPRecipeBase *HoistCandidate) { 820 // Avoid hoisting candidates with side-effects, as we do not yet analyze 821 // associated dependencies. 822 return !HoistCandidate->mayHaveSideEffects(); 823 }; 824 825 if (!NeedsHoisting(Previous->getVPSingleValue())) 826 return true; 827 828 // Recursively try to hoist Previous and its operands before all users of FOR. 829 HoistCandidates.push_back(Previous); 830 831 for (unsigned I = 0; I != HoistCandidates.size(); ++I) { 832 VPRecipeBase *Current = HoistCandidates[I]; 833 assert(Current->getNumDefinedValues() == 1 && 834 "only recipes with a single defined value expected"); 835 if (!CanHoist(Current)) 836 return false; 837 838 for (VPValue *Op : Current->operands()) { 839 // If we reach FOR, it means the original Previous depends on some other 840 // recurrence that in turn depends on FOR. If that is the case, we would 841 // also need to hoist recipes involving the other FOR, which may break 842 // dependencies. 843 if (Op == FOR) 844 return false; 845 846 if (auto *R = NeedsHoisting(Op)) 847 HoistCandidates.push_back(R); 848 } 849 } 850 851 // Order recipes to hoist by dominance so earlier instructions are processed 852 // first. 853 sort(HoistCandidates, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) { 854 return VPDT.properlyDominates(A, B); 855 }); 856 857 for (VPRecipeBase *HoistCandidate : HoistCandidates) { 858 HoistCandidate->moveBefore(*HoistPoint->getParent(), 859 HoistPoint->getIterator()); 860 } 861 862 return true; 863 } 864 865 bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan, 866 VPBuilder &LoopBuilder) { 867 VPDominatorTree VPDT; 868 VPDT.recalculate(Plan); 869 870 SmallVector<VPFirstOrderRecurrencePHIRecipe *> RecurrencePhis; 871 for (VPRecipeBase &R : 872 Plan.getVectorLoopRegion()->getEntry()->getEntryBasicBlock()->phis()) 873 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 874 RecurrencePhis.push_back(FOR); 875 876 for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) { 877 SmallPtrSet<VPFirstOrderRecurrencePHIRecipe *, 4> SeenPhis; 878 VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe(); 879 // Fixed-order recurrences do not contain cycles, so this loop is guaranteed 880 // to terminate. 881 while (auto *PrevPhi = 882 dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) { 883 assert(PrevPhi->getParent() == FOR->getParent()); 884 assert(SeenPhis.insert(PrevPhi).second); 885 Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe(); 886 } 887 888 if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT) && 889 !hoistPreviousBeforeFORUsers(FOR, Previous, VPDT)) 890 return false; 891 892 // Introduce a recipe to combine the incoming and previous values of a 893 // fixed-order recurrence. 894 VPBasicBlock *InsertBlock = Previous->getParent(); 895 if (isa<VPHeaderPHIRecipe>(Previous)) 896 LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 897 else 898 LoopBuilder.setInsertPoint(InsertBlock, 899 std::next(Previous->getIterator())); 900 901 auto *RecurSplice = cast<VPInstruction>( 902 LoopBuilder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 903 {FOR, FOR->getBackedgeValue()})); 904 905 FOR->replaceAllUsesWith(RecurSplice); 906 // Set the first operand of RecurSplice to FOR again, after replacing 907 // all users. 908 RecurSplice->setOperand(0, FOR); 909 } 910 return true; 911 } 912 913 static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) { 914 SetVector<VPUser *> Users(V->user_begin(), V->user_end()); 915 for (unsigned I = 0; I != Users.size(); ++I) { 916 VPRecipeBase *Cur = cast<VPRecipeBase>(Users[I]); 917 if (isa<VPHeaderPHIRecipe>(Cur)) 918 continue; 919 for (VPValue *V : Cur->definedValues()) 920 Users.insert(V->user_begin(), V->user_end()); 921 } 922 return Users.takeVector(); 923 } 924 925 void VPlanTransforms::clearReductionWrapFlags(VPlan &Plan) { 926 for (VPRecipeBase &R : 927 Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 928 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 929 if (!PhiR) 930 continue; 931 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 932 RecurKind RK = RdxDesc.getRecurrenceKind(); 933 if (RK != RecurKind::Add && RK != RecurKind::Mul) 934 continue; 935 936 for (VPUser *U : collectUsersRecursively(PhiR)) 937 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) { 938 RecWithFlags->dropPoisonGeneratingFlags(); 939 } 940 } 941 } 942 943 /// Try to simplify recipe \p R. 944 static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { 945 using namespace llvm::VPlanPatternMatch; 946 947 if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) { 948 // Try to remove redundant blend recipes. 949 SmallPtrSet<VPValue *, 4> UniqueValues; 950 if (Blend->isNormalized() || !match(Blend->getMask(0), m_False())) 951 UniqueValues.insert(Blend->getIncomingValue(0)); 952 for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I) 953 if (!match(Blend->getMask(I), m_False())) 954 UniqueValues.insert(Blend->getIncomingValue(I)); 955 956 if (UniqueValues.size() == 1) { 957 Blend->replaceAllUsesWith(*UniqueValues.begin()); 958 Blend->eraseFromParent(); 959 return; 960 } 961 962 if (Blend->isNormalized()) 963 return; 964 965 // Normalize the blend so its first incoming value is used as the initial 966 // value with the others blended into it. 967 968 unsigned StartIndex = 0; 969 for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) { 970 // If a value's mask is used only by the blend then is can be deadcoded. 971 // TODO: Find the most expensive mask that can be deadcoded, or a mask 972 // that's used by multiple blends where it can be removed from them all. 973 VPValue *Mask = Blend->getMask(I); 974 if (Mask->getNumUsers() == 1 && !match(Mask, m_False())) { 975 StartIndex = I; 976 break; 977 } 978 } 979 980 SmallVector<VPValue *, 4> OperandsWithMask; 981 OperandsWithMask.push_back(Blend->getIncomingValue(StartIndex)); 982 983 for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) { 984 if (I == StartIndex) 985 continue; 986 OperandsWithMask.push_back(Blend->getIncomingValue(I)); 987 OperandsWithMask.push_back(Blend->getMask(I)); 988 } 989 990 auto *NewBlend = new VPBlendRecipe( 991 cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask); 992 NewBlend->insertBefore(&R); 993 994 VPValue *DeadMask = Blend->getMask(StartIndex); 995 Blend->replaceAllUsesWith(NewBlend); 996 Blend->eraseFromParent(); 997 recursivelyDeleteDeadRecipes(DeadMask); 998 return; 999 } 1000 1001 VPValue *A; 1002 if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) { 1003 VPValue *Trunc = R.getVPSingleValue(); 1004 Type *TruncTy = TypeInfo.inferScalarType(Trunc); 1005 Type *ATy = TypeInfo.inferScalarType(A); 1006 if (TruncTy == ATy) { 1007 Trunc->replaceAllUsesWith(A); 1008 } else { 1009 // Don't replace a scalarizing recipe with a widened cast. 1010 if (isa<VPReplicateRecipe>(&R)) 1011 return; 1012 if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) { 1013 1014 unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue())) 1015 ? Instruction::SExt 1016 : Instruction::ZExt; 1017 auto *VPC = 1018 new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy); 1019 if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) { 1020 // UnderlyingExt has distinct return type, used to retain legacy cost. 1021 VPC->setUnderlyingValue(UnderlyingExt); 1022 } 1023 VPC->insertBefore(&R); 1024 Trunc->replaceAllUsesWith(VPC); 1025 } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) { 1026 auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy); 1027 VPC->insertBefore(&R); 1028 Trunc->replaceAllUsesWith(VPC); 1029 } 1030 } 1031 #ifndef NDEBUG 1032 // Verify that the cached type info is for both A and its users is still 1033 // accurate by comparing it to freshly computed types. 1034 VPTypeAnalysis TypeInfo2( 1035 R.getParent()->getPlan()->getCanonicalIV()->getScalarType()); 1036 assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A)); 1037 for (VPUser *U : A->users()) { 1038 auto *R = cast<VPRecipeBase>(U); 1039 for (VPValue *VPV : R->definedValues()) 1040 assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV)); 1041 } 1042 #endif 1043 } 1044 1045 // Simplify (X && Y) || (X && !Y) -> X. 1046 // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X 1047 // && (Y || Z) and (X || !X) into true. This requires queuing newly created 1048 // recipes to be visited during simplification. 1049 VPValue *X, *Y, *X1, *Y1; 1050 if (match(&R, 1051 m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)), 1052 m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) && 1053 X == X1 && Y == Y1) { 1054 R.getVPSingleValue()->replaceAllUsesWith(X); 1055 R.eraseFromParent(); 1056 return; 1057 } 1058 1059 if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1)))) 1060 return R.getVPSingleValue()->replaceAllUsesWith(A); 1061 1062 if (match(&R, m_Not(m_Not(m_VPValue(A))))) 1063 return R.getVPSingleValue()->replaceAllUsesWith(A); 1064 1065 // Remove redundant DerviedIVs, that is 0 + A * 1 -> A and 0 + 0 * x -> 0. 1066 if ((match(&R, 1067 m_DerivedIV(m_SpecificInt(0), m_VPValue(A), m_SpecificInt(1))) || 1068 match(&R, 1069 m_DerivedIV(m_SpecificInt(0), m_SpecificInt(0), m_VPValue()))) && 1070 TypeInfo.inferScalarType(R.getOperand(1)) == 1071 TypeInfo.inferScalarType(R.getVPSingleValue())) 1072 return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1)); 1073 } 1074 1075 /// Move loop-invariant recipes out of the vector loop region in \p Plan. 1076 static void licm(VPlan &Plan) { 1077 VPBasicBlock *Preheader = Plan.getVectorPreheader(); 1078 1079 // Return true if we do not know how to (mechanically) hoist a given recipe 1080 // out of a loop region. Does not address legality concerns such as aliasing 1081 // or speculation safety. 1082 auto CannotHoistRecipe = [](VPRecipeBase &R) { 1083 // Allocas cannot be hoisted. 1084 auto *RepR = dyn_cast<VPReplicateRecipe>(&R); 1085 return RepR && RepR->getOpcode() == Instruction::Alloca; 1086 }; 1087 1088 // Hoist any loop invariant recipes from the vector loop region to the 1089 // preheader. Preform a shallow traversal of the vector loop region, to 1090 // exclude recipes in replicate regions. 1091 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); 1092 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( 1093 vp_depth_first_shallow(LoopRegion->getEntry()))) { 1094 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { 1095 if (CannotHoistRecipe(R)) 1096 continue; 1097 // TODO: Relax checks in the future, e.g. we could also hoist reads, if 1098 // their memory location is not modified in the vector loop. 1099 if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi() || 1100 any_of(R.operands(), [](VPValue *Op) { 1101 return !Op->isDefinedOutsideLoopRegions(); 1102 })) 1103 continue; 1104 R.moveBefore(*Preheader, Preheader->end()); 1105 } 1106 } 1107 } 1108 1109 /// Try to simplify the recipes in \p Plan. 1110 static void simplifyRecipes(VPlan &Plan) { 1111 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( 1112 Plan.getEntry()); 1113 Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); 1114 VPTypeAnalysis TypeInfo(CanonicalIVType); 1115 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) { 1116 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { 1117 simplifyRecipe(R, TypeInfo); 1118 } 1119 } 1120 } 1121 1122 void VPlanTransforms::truncateToMinimalBitwidths( 1123 VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs) { 1124 #ifndef NDEBUG 1125 // Count the processed recipes and cross check the count later with MinBWs 1126 // size, to make sure all entries in MinBWs have been handled. 1127 unsigned NumProcessedRecipes = 0; 1128 #endif 1129 // Keep track of created truncates, so they can be re-used. Note that we 1130 // cannot use RAUW after creating a new truncate, as this would could make 1131 // other uses have different types for their operands, making them invalidly 1132 // typed. 1133 DenseMap<VPValue *, VPWidenCastRecipe *> ProcessedTruncs; 1134 Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); 1135 VPTypeAnalysis TypeInfo(CanonicalIVType); 1136 VPBasicBlock *PH = Plan.getVectorPreheader(); 1137 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( 1138 vp_depth_first_deep(Plan.getVectorLoopRegion()))) { 1139 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { 1140 if (!isa<VPWidenRecipe, VPWidenCastRecipe, VPReplicateRecipe, 1141 VPWidenSelectRecipe, VPWidenLoadRecipe>(&R)) 1142 continue; 1143 1144 VPValue *ResultVPV = R.getVPSingleValue(); 1145 auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue()); 1146 unsigned NewResSizeInBits = MinBWs.lookup(UI); 1147 if (!NewResSizeInBits) 1148 continue; 1149 1150 #ifndef NDEBUG 1151 NumProcessedRecipes++; 1152 #endif 1153 // If the value wasn't vectorized, we must maintain the original scalar 1154 // type. Skip those here, after incrementing NumProcessedRecipes. Also 1155 // skip casts which do not need to be handled explicitly here, as 1156 // redundant casts will be removed during recipe simplification. 1157 if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) { 1158 #ifndef NDEBUG 1159 // If any of the operands is a live-in and not used by VPWidenRecipe or 1160 // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as 1161 // processed as well. When MinBWs is currently constructed, there is no 1162 // information about whether recipes are widened or replicated and in 1163 // case they are reciplicated the operands are not truncated. Counting 1164 // them them here ensures we do not miss any recipes in MinBWs. 1165 // TODO: Remove once the analysis is done on VPlan. 1166 for (VPValue *Op : R.operands()) { 1167 if (!Op->isLiveIn()) 1168 continue; 1169 auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue()); 1170 if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) && 1171 none_of(Op->users(), 1172 IsaPred<VPWidenRecipe, VPWidenSelectRecipe>)) { 1173 // Add an entry to ProcessedTruncs to avoid counting the same 1174 // operand multiple times. 1175 ProcessedTruncs[Op] = nullptr; 1176 NumProcessedRecipes += 1; 1177 } 1178 } 1179 #endif 1180 continue; 1181 } 1182 1183 Type *OldResTy = TypeInfo.inferScalarType(ResultVPV); 1184 unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits(); 1185 assert(OldResTy->isIntegerTy() && "only integer types supported"); 1186 (void)OldResSizeInBits; 1187 1188 LLVMContext &Ctx = CanonicalIVType->getContext(); 1189 auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits); 1190 1191 // Any wrapping introduced by shrinking this operation shouldn't be 1192 // considered undefined behavior. So, we can't unconditionally copy 1193 // arithmetic wrapping flags to VPW. 1194 if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R)) 1195 VPW->dropPoisonGeneratingFlags(); 1196 1197 using namespace llvm::VPlanPatternMatch; 1198 if (OldResSizeInBits != NewResSizeInBits && 1199 !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) { 1200 // Extend result to original width. 1201 auto *Ext = 1202 new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy); 1203 Ext->insertAfter(&R); 1204 ResultVPV->replaceAllUsesWith(Ext); 1205 Ext->setOperand(0, ResultVPV); 1206 assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?"); 1207 } else { 1208 assert( 1209 match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) && 1210 "Only ICmps should not need extending the result."); 1211 } 1212 1213 assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed"); 1214 if (isa<VPWidenLoadRecipe>(&R)) 1215 continue; 1216 1217 // Shrink operands by introducing truncates as needed. 1218 unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0; 1219 for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) { 1220 auto *Op = R.getOperand(Idx); 1221 unsigned OpSizeInBits = 1222 TypeInfo.inferScalarType(Op)->getScalarSizeInBits(); 1223 if (OpSizeInBits == NewResSizeInBits) 1224 continue; 1225 assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate"); 1226 auto [ProcessedIter, IterIsEmpty] = 1227 ProcessedTruncs.insert({Op, nullptr}); 1228 VPWidenCastRecipe *NewOp = 1229 IterIsEmpty 1230 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy) 1231 : ProcessedIter->second; 1232 R.setOperand(Idx, NewOp); 1233 if (!IterIsEmpty) 1234 continue; 1235 ProcessedIter->second = NewOp; 1236 if (!Op->isLiveIn()) { 1237 NewOp->insertBefore(&R); 1238 } else { 1239 PH->appendRecipe(NewOp); 1240 #ifndef NDEBUG 1241 auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue()); 1242 bool IsContained = MinBWs.contains(OpInst); 1243 NumProcessedRecipes += IsContained; 1244 #endif 1245 } 1246 } 1247 1248 } 1249 } 1250 1251 assert(MinBWs.size() == NumProcessedRecipes && 1252 "some entries in MinBWs haven't been processed"); 1253 } 1254 1255 void VPlanTransforms::optimize(VPlan &Plan) { 1256 removeRedundantCanonicalIVs(Plan); 1257 removeRedundantInductionCasts(Plan); 1258 1259 simplifyRecipes(Plan); 1260 legalizeAndOptimizeInductions(Plan); 1261 removeRedundantExpandSCEVRecipes(Plan); 1262 simplifyRecipes(Plan); 1263 removeDeadRecipes(Plan); 1264 1265 createAndOptimizeReplicateRegions(Plan); 1266 mergeBlocksIntoPredecessors(Plan); 1267 licm(Plan); 1268 } 1269 1270 // Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace 1271 // the loop terminator with a branch-on-cond recipe with the negated 1272 // active-lane-mask as operand. Note that this turns the loop into an 1273 // uncountable one. Only the existing terminator is replaced, all other existing 1274 // recipes/users remain unchanged, except for poison-generating flags being 1275 // dropped from the canonical IV increment. Return the created 1276 // VPActiveLaneMaskPHIRecipe. 1277 // 1278 // The function uses the following definitions: 1279 // 1280 // %TripCount = DataWithControlFlowWithoutRuntimeCheck ? 1281 // calculate-trip-count-minus-VF (original TC) : original TC 1282 // %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ? 1283 // CanonicalIVPhi : CanonicalIVIncrement 1284 // %StartV is the canonical induction start value. 1285 // 1286 // The function adds the following recipes: 1287 // 1288 // vector.ph: 1289 // %TripCount = calculate-trip-count-minus-VF (original TC) 1290 // [if DataWithControlFlowWithoutRuntimeCheck] 1291 // %EntryInc = canonical-iv-increment-for-part %StartV 1292 // %EntryALM = active-lane-mask %EntryInc, %TripCount 1293 // 1294 // vector.body: 1295 // ... 1296 // %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ] 1297 // ... 1298 // %InLoopInc = canonical-iv-increment-for-part %IncrementValue 1299 // %ALM = active-lane-mask %InLoopInc, TripCount 1300 // %Negated = Not %ALM 1301 // branch-on-cond %Negated 1302 // 1303 static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( 1304 VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck) { 1305 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 1306 VPBasicBlock *EB = TopRegion->getExitingBasicBlock(); 1307 auto *CanonicalIVPHI = Plan.getCanonicalIV(); 1308 VPValue *StartV = CanonicalIVPHI->getStartValue(); 1309 1310 auto *CanonicalIVIncrement = 1311 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue()); 1312 // TODO: Check if dropping the flags is needed if 1313 // !DataAndControlFlowWithoutRuntimeCheck. 1314 CanonicalIVIncrement->dropPoisonGeneratingFlags(); 1315 DebugLoc DL = CanonicalIVIncrement->getDebugLoc(); 1316 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since 1317 // we have to take unrolling into account. Each part needs to start at 1318 // Part * VF 1319 auto *VecPreheader = Plan.getVectorPreheader(); 1320 VPBuilder Builder(VecPreheader); 1321 1322 // Create the ActiveLaneMask instruction using the correct start values. 1323 VPValue *TC = Plan.getTripCount(); 1324 1325 VPValue *TripCount, *IncrementValue; 1326 if (!DataAndControlFlowWithoutRuntimeCheck) { 1327 // When the loop is guarded by a runtime overflow check for the loop 1328 // induction variable increment by VF, we can increment the value before 1329 // the get.active.lane mask and use the unmodified tripcount. 1330 IncrementValue = CanonicalIVIncrement; 1331 TripCount = TC; 1332 } else { 1333 // When avoiding a runtime check, the active.lane.mask inside the loop 1334 // uses a modified trip count and the induction variable increment is 1335 // done after the active.lane.mask intrinsic is called. 1336 IncrementValue = CanonicalIVPHI; 1337 TripCount = Builder.createNaryOp(VPInstruction::CalculateTripCountMinusVF, 1338 {TC}, DL); 1339 } 1340 auto *EntryIncrement = Builder.createOverflowingOp( 1341 VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL, 1342 "index.part.next"); 1343 1344 // Create the active lane mask instruction in the VPlan preheader. 1345 auto *EntryALM = 1346 Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC}, 1347 DL, "active.lane.mask.entry"); 1348 1349 // Now create the ActiveLaneMaskPhi recipe in the main loop using the 1350 // preheader ActiveLaneMask instruction. 1351 auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc()); 1352 LaneMaskPhi->insertAfter(CanonicalIVPHI); 1353 1354 // Create the active lane mask for the next iteration of the loop before the 1355 // original terminator. 1356 VPRecipeBase *OriginalTerminator = EB->getTerminator(); 1357 Builder.setInsertPoint(OriginalTerminator); 1358 auto *InLoopIncrement = 1359 Builder.createOverflowingOp(VPInstruction::CanonicalIVIncrementForPart, 1360 {IncrementValue}, {false, false}, DL); 1361 auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask, 1362 {InLoopIncrement, TripCount}, DL, 1363 "active.lane.mask.next"); 1364 LaneMaskPhi->addOperand(ALM); 1365 1366 // Replace the original terminator with BranchOnCond. We have to invert the 1367 // mask here because a true condition means jumping to the exit block. 1368 auto *NotMask = Builder.createNot(ALM, DL); 1369 Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL); 1370 OriginalTerminator->eraseFromParent(); 1371 return LaneMaskPhi; 1372 } 1373 1374 /// Collect all VPValues representing a header mask through the (ICMP_ULE, 1375 /// WideCanonicalIV, backedge-taken-count) pattern. 1376 /// TODO: Introduce explicit recipe for header-mask instead of searching 1377 /// for the header-mask pattern manually. 1378 static SmallVector<VPValue *> collectAllHeaderMasks(VPlan &Plan) { 1379 SmallVector<VPValue *> WideCanonicalIVs; 1380 auto *FoundWidenCanonicalIVUser = 1381 find_if(Plan.getCanonicalIV()->users(), 1382 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }); 1383 assert(count_if(Plan.getCanonicalIV()->users(), 1384 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <= 1385 1 && 1386 "Must have at most one VPWideCanonicalIVRecipe"); 1387 if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) { 1388 auto *WideCanonicalIV = 1389 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser); 1390 WideCanonicalIVs.push_back(WideCanonicalIV); 1391 } 1392 1393 // Also include VPWidenIntOrFpInductionRecipes that represent a widened 1394 // version of the canonical induction. 1395 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); 1396 for (VPRecipeBase &Phi : HeaderVPBB->phis()) { 1397 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi); 1398 if (WidenOriginalIV && WidenOriginalIV->isCanonical()) 1399 WideCanonicalIVs.push_back(WidenOriginalIV); 1400 } 1401 1402 // Walk users of wide canonical IVs and collect to all compares of the form 1403 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count). 1404 SmallVector<VPValue *> HeaderMasks; 1405 for (auto *Wide : WideCanonicalIVs) { 1406 for (VPUser *U : SmallVector<VPUser *>(Wide->users())) { 1407 auto *HeaderMask = dyn_cast<VPInstruction>(U); 1408 if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan)) 1409 continue; 1410 1411 assert(HeaderMask->getOperand(0) == Wide && 1412 "WidenCanonicalIV must be the first operand of the compare"); 1413 HeaderMasks.push_back(HeaderMask); 1414 } 1415 } 1416 return HeaderMasks; 1417 } 1418 1419 void VPlanTransforms::addActiveLaneMask( 1420 VPlan &Plan, bool UseActiveLaneMaskForControlFlow, 1421 bool DataAndControlFlowWithoutRuntimeCheck) { 1422 assert((!DataAndControlFlowWithoutRuntimeCheck || 1423 UseActiveLaneMaskForControlFlow) && 1424 "DataAndControlFlowWithoutRuntimeCheck implies " 1425 "UseActiveLaneMaskForControlFlow"); 1426 1427 auto *FoundWidenCanonicalIVUser = 1428 find_if(Plan.getCanonicalIV()->users(), 1429 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }); 1430 assert(FoundWidenCanonicalIVUser && 1431 "Must have widened canonical IV when tail folding!"); 1432 auto *WideCanonicalIV = 1433 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser); 1434 VPSingleDefRecipe *LaneMask; 1435 if (UseActiveLaneMaskForControlFlow) { 1436 LaneMask = addVPLaneMaskPhiAndUpdateExitBranch( 1437 Plan, DataAndControlFlowWithoutRuntimeCheck); 1438 } else { 1439 VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV); 1440 LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask, 1441 {WideCanonicalIV, Plan.getTripCount()}, nullptr, 1442 "active.lane.mask"); 1443 } 1444 1445 // Walk users of WideCanonicalIV and replace all compares of the form 1446 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an 1447 // active-lane-mask. 1448 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) 1449 HeaderMask->replaceAllUsesWith(LaneMask); 1450 } 1451 1452 /// Try to convert \p CurRecipe to a corresponding EVL-based recipe. Returns 1453 /// nullptr if no EVL-based recipe could be created. 1454 /// \p HeaderMask Header Mask. 1455 /// \p CurRecipe Recipe to be transform. 1456 /// \p TypeInfo VPlan-based type analysis. 1457 /// \p AllOneMask The vector mask parameter of vector-predication intrinsics. 1458 /// \p EVL The explicit vector length parameter of vector-predication 1459 /// intrinsics. 1460 static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask, 1461 VPRecipeBase &CurRecipe, 1462 VPTypeAnalysis &TypeInfo, 1463 VPValue &AllOneMask, VPValue &EVL) { 1464 using namespace llvm::VPlanPatternMatch; 1465 auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * { 1466 assert(OrigMask && "Unmasked recipe when folding tail"); 1467 return HeaderMask == OrigMask ? nullptr : OrigMask; 1468 }; 1469 1470 return TypeSwitch<VPRecipeBase *, VPRecipeBase *>(&CurRecipe) 1471 .Case<VPWidenLoadRecipe>([&](VPWidenLoadRecipe *L) { 1472 VPValue *NewMask = GetNewMask(L->getMask()); 1473 return new VPWidenLoadEVLRecipe(*L, EVL, NewMask); 1474 }) 1475 .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) { 1476 VPValue *NewMask = GetNewMask(S->getMask()); 1477 return new VPWidenStoreEVLRecipe(*S, EVL, NewMask); 1478 }) 1479 .Case<VPWidenRecipe>([&](VPWidenRecipe *W) -> VPRecipeBase * { 1480 unsigned Opcode = W->getOpcode(); 1481 if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode)) 1482 return nullptr; 1483 return new VPWidenEVLRecipe(*W, EVL); 1484 }) 1485 .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) { 1486 VPValue *NewMask = GetNewMask(Red->getCondOp()); 1487 return new VPReductionEVLRecipe(*Red, EVL, NewMask); 1488 }) 1489 .Case<VPWidenIntrinsicRecipe, VPWidenCastRecipe>( 1490 [&](auto *CR) -> VPRecipeBase * { 1491 Intrinsic::ID VPID = Intrinsic::not_intrinsic; 1492 if (auto *CallR = dyn_cast<VPWidenIntrinsicRecipe>(CR)) 1493 VPID = 1494 VPIntrinsic::getForIntrinsic(CallR->getVectorIntrinsicID()); 1495 else if (auto *CastR = dyn_cast<VPWidenCastRecipe>(CR)) 1496 VPID = VPIntrinsic::getForOpcode(CastR->getOpcode()); 1497 assert(VPID != Intrinsic::not_intrinsic && "Expected VP intrinsic"); 1498 assert(VPIntrinsic::getMaskParamPos(VPID) && 1499 VPIntrinsic::getVectorLengthParamPos(VPID) && 1500 "Expected VP intrinsic"); 1501 1502 SmallVector<VPValue *> Ops(CR->operands()); 1503 Ops.push_back(&AllOneMask); 1504 Ops.push_back(&EVL); 1505 return new VPWidenIntrinsicRecipe( 1506 VPID, Ops, TypeInfo.inferScalarType(CR), CR->getDebugLoc()); 1507 }) 1508 .Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) { 1509 SmallVector<VPValue *> Ops(Sel->operands()); 1510 Ops.push_back(&EVL); 1511 return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops, 1512 TypeInfo.inferScalarType(Sel), 1513 Sel->getDebugLoc()); 1514 }) 1515 .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * { 1516 VPValue *LHS, *RHS; 1517 // Transform select with a header mask condition 1518 // select(header_mask, LHS, RHS) 1519 // into vector predication merge. 1520 // vp.merge(all-true, LHS, RHS, EVL) 1521 if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS), 1522 m_VPValue(RHS)))) 1523 return nullptr; 1524 // Use all true as the condition because this transformation is 1525 // limited to selects whose condition is a header mask. 1526 return new VPWidenIntrinsicRecipe( 1527 Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL}, 1528 TypeInfo.inferScalarType(LHS), VPI->getDebugLoc()); 1529 }) 1530 .Default([&](VPRecipeBase *R) { return nullptr; }); 1531 } 1532 1533 /// Replace recipes with their EVL variants. 1534 static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) { 1535 Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); 1536 VPTypeAnalysis TypeInfo(CanonicalIVType); 1537 LLVMContext &Ctx = CanonicalIVType->getContext(); 1538 VPValue *AllOneMask = Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx)); 1539 1540 for (VPUser *U : Plan.getVF().users()) { 1541 if (auto *R = dyn_cast<VPReverseVectorPointerRecipe>(U)) 1542 R->setOperand(1, &EVL); 1543 } 1544 1545 SmallVector<VPRecipeBase *> ToErase; 1546 1547 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) { 1548 for (VPUser *U : collectUsersRecursively(HeaderMask)) { 1549 auto *CurRecipe = cast<VPRecipeBase>(U); 1550 VPRecipeBase *EVLRecipe = 1551 createEVLRecipe(HeaderMask, *CurRecipe, TypeInfo, *AllOneMask, EVL); 1552 if (!EVLRecipe) 1553 continue; 1554 1555 [[maybe_unused]] unsigned NumDefVal = EVLRecipe->getNumDefinedValues(); 1556 assert(NumDefVal == CurRecipe->getNumDefinedValues() && 1557 "New recipe must define the same number of values as the " 1558 "original."); 1559 assert( 1560 NumDefVal <= 1 && 1561 "Only supports recipes with a single definition or without users."); 1562 EVLRecipe->insertBefore(CurRecipe); 1563 if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(EVLRecipe)) { 1564 VPValue *CurVPV = CurRecipe->getVPSingleValue(); 1565 CurVPV->replaceAllUsesWith(EVLRecipe->getVPSingleValue()); 1566 } 1567 // Defer erasing recipes till the end so that we don't invalidate the 1568 // VPTypeAnalysis cache. 1569 ToErase.push_back(CurRecipe); 1570 } 1571 } 1572 1573 for (VPRecipeBase *R : reverse(ToErase)) { 1574 SmallVector<VPValue *> PossiblyDead(R->operands()); 1575 R->eraseFromParent(); 1576 for (VPValue *Op : PossiblyDead) 1577 recursivelyDeleteDeadRecipes(Op); 1578 } 1579 } 1580 1581 /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and 1582 /// replaces all uses except the canonical IV increment of 1583 /// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe 1584 /// is used only for loop iterations counting after this transformation. 1585 /// 1586 /// The function uses the following definitions: 1587 /// %StartV is the canonical induction start value. 1588 /// 1589 /// The function adds the following recipes: 1590 /// 1591 /// vector.ph: 1592 /// ... 1593 /// 1594 /// vector.body: 1595 /// ... 1596 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], 1597 /// [ %NextEVLIV, %vector.body ] 1598 /// %AVL = sub original TC, %EVLPhi 1599 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL 1600 /// ... 1601 /// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi 1602 /// ... 1603 /// 1604 /// If MaxSafeElements is provided, the function adds the following recipes: 1605 /// vector.ph: 1606 /// ... 1607 /// 1608 /// vector.body: 1609 /// ... 1610 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], 1611 /// [ %NextEVLIV, %vector.body ] 1612 /// %AVL = sub original TC, %EVLPhi 1613 /// %cmp = cmp ult %AVL, MaxSafeElements 1614 /// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements 1615 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL 1616 /// ... 1617 /// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi 1618 /// ... 1619 /// 1620 bool VPlanTransforms::tryAddExplicitVectorLength( 1621 VPlan &Plan, const std::optional<unsigned> &MaxSafeElements) { 1622 VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock(); 1623 // The transform updates all users of inductions to work based on EVL, instead 1624 // of the VF directly. At the moment, widened inductions cannot be updated, so 1625 // bail out if the plan contains any. 1626 bool ContainsWidenInductions = any_of( 1627 Header->phis(), 1628 IsaPred<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>); 1629 if (ContainsWidenInductions) 1630 return false; 1631 1632 auto *CanonicalIVPHI = Plan.getCanonicalIV(); 1633 VPValue *StartV = CanonicalIVPHI->getStartValue(); 1634 1635 // Create the ExplicitVectorLengthPhi recipe in the main loop. 1636 auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc()); 1637 EVLPhi->insertAfter(CanonicalIVPHI); 1638 VPBuilder Builder(Header, Header->getFirstNonPhi()); 1639 // Compute original TC - IV as the AVL (application vector length). 1640 VPValue *AVL = Builder.createNaryOp( 1641 Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl"); 1642 if (MaxSafeElements) { 1643 // Support for MaxSafeDist for correct loop emission. 1644 VPValue *AVLSafe = Plan.getOrAddLiveIn( 1645 ConstantInt::get(CanonicalIVPHI->getScalarType(), *MaxSafeElements)); 1646 VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe); 1647 AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc(), "safe_avl"); 1648 } 1649 auto *VPEVL = Builder.createNaryOp(VPInstruction::ExplicitVectorLength, AVL, 1650 DebugLoc()); 1651 1652 auto *CanonicalIVIncrement = 1653 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue()); 1654 VPSingleDefRecipe *OpVPEVL = VPEVL; 1655 if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits(); 1656 IVSize != 32) { 1657 OpVPEVL = new VPScalarCastRecipe(IVSize < 32 ? Instruction::Trunc 1658 : Instruction::ZExt, 1659 OpVPEVL, CanonicalIVPHI->getScalarType()); 1660 OpVPEVL->insertBefore(CanonicalIVIncrement); 1661 } 1662 auto *NextEVLIV = 1663 new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi}, 1664 {CanonicalIVIncrement->hasNoUnsignedWrap(), 1665 CanonicalIVIncrement->hasNoSignedWrap()}, 1666 CanonicalIVIncrement->getDebugLoc(), "index.evl.next"); 1667 NextEVLIV->insertBefore(CanonicalIVIncrement); 1668 EVLPhi->addOperand(NextEVLIV); 1669 1670 transformRecipestoEVLRecipes(Plan, *VPEVL); 1671 1672 // Replace all uses of VPCanonicalIVPHIRecipe by 1673 // VPEVLBasedIVPHIRecipe except for the canonical IV increment. 1674 CanonicalIVPHI->replaceAllUsesWith(EVLPhi); 1675 CanonicalIVIncrement->setOperand(0, CanonicalIVPHI); 1676 // TODO: support unroll factor > 1. 1677 Plan.setUF(1); 1678 return true; 1679 } 1680 1681 void VPlanTransforms::dropPoisonGeneratingRecipes( 1682 VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) { 1683 // Collect recipes in the backward slice of `Root` that may generate a poison 1684 // value that is used after vectorization. 1685 SmallPtrSet<VPRecipeBase *, 16> Visited; 1686 auto CollectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1687 SmallVector<VPRecipeBase *, 16> Worklist; 1688 Worklist.push_back(Root); 1689 1690 // Traverse the backward slice of Root through its use-def chain. 1691 while (!Worklist.empty()) { 1692 VPRecipeBase *CurRec = Worklist.pop_back_val(); 1693 1694 if (!Visited.insert(CurRec).second) 1695 continue; 1696 1697 // Prune search if we find another recipe generating a widen memory 1698 // instruction. Widen memory instructions involved in address computation 1699 // will lead to gather/scatter instructions, which don't need to be 1700 // handled. 1701 if (isa<VPWidenMemoryRecipe, VPInterleaveRecipe, VPScalarIVStepsRecipe, 1702 VPHeaderPHIRecipe>(CurRec)) 1703 continue; 1704 1705 // This recipe contributes to the address computation of a widen 1706 // load/store. If the underlying instruction has poison-generating flags, 1707 // drop them directly. 1708 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) { 1709 VPValue *A, *B; 1710 using namespace llvm::VPlanPatternMatch; 1711 // Dropping disjoint from an OR may yield incorrect results, as some 1712 // analysis may have converted it to an Add implicitly (e.g. SCEV used 1713 // for dependence analysis). Instead, replace it with an equivalent Add. 1714 // This is possible as all users of the disjoint OR only access lanes 1715 // where the operands are disjoint or poison otherwise. 1716 if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) && 1717 RecWithFlags->isDisjoint()) { 1718 VPBuilder Builder(RecWithFlags); 1719 VPInstruction *New = Builder.createOverflowingOp( 1720 Instruction::Add, {A, B}, {false, false}, 1721 RecWithFlags->getDebugLoc()); 1722 New->setUnderlyingValue(RecWithFlags->getUnderlyingValue()); 1723 RecWithFlags->replaceAllUsesWith(New); 1724 RecWithFlags->eraseFromParent(); 1725 CurRec = New; 1726 } else 1727 RecWithFlags->dropPoisonGeneratingFlags(); 1728 } else { 1729 Instruction *Instr = dyn_cast_or_null<Instruction>( 1730 CurRec->getVPSingleValue()->getUnderlyingValue()); 1731 (void)Instr; 1732 assert((!Instr || !Instr->hasPoisonGeneratingFlags()) && 1733 "found instruction with poison generating flags not covered by " 1734 "VPRecipeWithIRFlags"); 1735 } 1736 1737 // Add new definitions to the worklist. 1738 for (VPValue *Operand : CurRec->operands()) 1739 if (VPRecipeBase *OpDef = Operand->getDefiningRecipe()) 1740 Worklist.push_back(OpDef); 1741 } 1742 }); 1743 1744 // Traverse all the recipes in the VPlan and collect the poison-generating 1745 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1746 // VPInterleaveRecipe. 1747 auto Iter = vp_depth_first_deep(Plan.getEntry()); 1748 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1749 for (VPRecipeBase &Recipe : *VPBB) { 1750 if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) { 1751 Instruction &UnderlyingInstr = WidenRec->getIngredient(); 1752 VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe(); 1753 if (AddrDef && WidenRec->isConsecutive() && 1754 BlockNeedsPredication(UnderlyingInstr.getParent())) 1755 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef); 1756 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1757 VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe(); 1758 if (AddrDef) { 1759 // Check if any member of the interleave group needs predication. 1760 const InterleaveGroup<Instruction> *InterGroup = 1761 InterleaveRec->getInterleaveGroup(); 1762 bool NeedPredication = false; 1763 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1764 I < NumMembers; ++I) { 1765 Instruction *Member = InterGroup->getMember(I); 1766 if (Member) 1767 NeedPredication |= BlockNeedsPredication(Member->getParent()); 1768 } 1769 1770 if (NeedPredication) 1771 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef); 1772 } 1773 } 1774 } 1775 } 1776 } 1777 1778 void VPlanTransforms::createInterleaveGroups( 1779 VPlan &Plan, 1780 const SmallPtrSetImpl<const InterleaveGroup<Instruction> *> 1781 &InterleaveGroups, 1782 VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed) { 1783 if (InterleaveGroups.empty()) 1784 return; 1785 1786 // Interleave memory: for each Interleave Group we marked earlier as relevant 1787 // for this VPlan, replace the Recipes widening its memory instructions with a 1788 // single VPInterleaveRecipe at its insertion point. 1789 VPDominatorTree VPDT; 1790 VPDT.recalculate(Plan); 1791 for (const auto *IG : InterleaveGroups) { 1792 SmallVector<VPValue *, 4> StoredValues; 1793 for (unsigned i = 0; i < IG->getFactor(); ++i) 1794 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 1795 auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI)); 1796 StoredValues.push_back(StoreR->getStoredValue()); 1797 } 1798 1799 bool NeedsMaskForGaps = 1800 IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed; 1801 1802 Instruction *IRInsertPos = IG->getInsertPos(); 1803 auto *InsertPos = 1804 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IRInsertPos)); 1805 1806 // Get or create the start address for the interleave group. 1807 auto *Start = 1808 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getMember(0))); 1809 VPValue *Addr = Start->getAddr(); 1810 VPRecipeBase *AddrDef = Addr->getDefiningRecipe(); 1811 if (AddrDef && !VPDT.properlyDominates(AddrDef, InsertPos)) { 1812 // TODO: Hoist Addr's defining recipe (and any operands as needed) to 1813 // InsertPos or sink loads above zero members to join it. 1814 bool InBounds = false; 1815 if (auto *Gep = dyn_cast<GetElementPtrInst>( 1816 getLoadStorePointerOperand(IRInsertPos)->stripPointerCasts())) 1817 InBounds = Gep->isInBounds(); 1818 1819 // We cannot re-use the address of member zero because it does not 1820 // dominate the insert position. Instead, use the address of the insert 1821 // position and create a PtrAdd adjusting it to the address of member 1822 // zero. 1823 assert(IG->getIndex(IRInsertPos) != 0 && 1824 "index of insert position shouldn't be zero"); 1825 auto &DL = IRInsertPos->getDataLayout(); 1826 APInt Offset(32, 1827 DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) * 1828 IG->getIndex(IRInsertPos), 1829 /*IsSigned=*/true); 1830 VPValue *OffsetVPV = Plan.getOrAddLiveIn( 1831 ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset)); 1832 VPBuilder B(InsertPos); 1833 Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV) 1834 : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV); 1835 } 1836 auto *VPIG = new VPInterleaveRecipe(IG, Addr, StoredValues, 1837 InsertPos->getMask(), NeedsMaskForGaps); 1838 VPIG->insertBefore(InsertPos); 1839 1840 unsigned J = 0; 1841 for (unsigned i = 0; i < IG->getFactor(); ++i) 1842 if (Instruction *Member = IG->getMember(i)) { 1843 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member); 1844 if (!Member->getType()->isVoidTy()) { 1845 VPValue *OriginalV = MemberR->getVPSingleValue(); 1846 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 1847 J++; 1848 } 1849 MemberR->eraseFromParent(); 1850 } 1851 } 1852 } 1853 1854 void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { 1855 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( 1856 vp_depth_first_deep(Plan.getEntry()))) { 1857 for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) { 1858 if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R)) 1859 continue; 1860 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 1861 StringRef Name = 1862 isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv"; 1863 auto *ScalarR = 1864 new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(), 1865 PhiR->getDebugLoc(), Name); 1866 ScalarR->insertBefore(PhiR); 1867 PhiR->replaceAllUsesWith(ScalarR); 1868 PhiR->eraseFromParent(); 1869 } 1870 } 1871 } 1872 1873 void VPlanTransforms::handleUncountableEarlyExit( 1874 VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop, 1875 BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder) { 1876 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); 1877 auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting()); 1878 VPBuilder Builder(LatchVPBB->getTerminator()); 1879 auto *MiddleVPBB = Plan.getMiddleBlock(); 1880 VPValue *IsEarlyExitTaken = nullptr; 1881 1882 // Process the uncountable exiting block. Update IsEarlyExitTaken, which 1883 // tracks if the uncountable early exit has been taken. Also split the middle 1884 // block and have it conditionally branch to the early exit block if 1885 // EarlyExitTaken. 1886 auto *EarlyExitingBranch = 1887 cast<BranchInst>(UncountableExitingBlock->getTerminator()); 1888 BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0); 1889 BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1); 1890 1891 // The early exit block may or may not be the same as the "countable" exit 1892 // block. Creates a new VPIRBB for the early exit block in case it is distinct 1893 // from the countable exit block. 1894 // TODO: Introduce both exit blocks during VPlan skeleton construction. 1895 VPIRBasicBlock *VPEarlyExitBlock; 1896 if (OrigLoop->getUniqueExitBlock()) { 1897 VPEarlyExitBlock = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]); 1898 } else { 1899 VPEarlyExitBlock = VPIRBasicBlock::fromBasicBlock( 1900 !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc); 1901 } 1902 1903 VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask( 1904 OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc); 1905 auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond); 1906 IsEarlyExitTaken = 1907 Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond}); 1908 1909 VPBasicBlock *NewMiddle = new VPBasicBlock("middle.split"); 1910 VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle); 1911 VPBlockUtils::connectBlocks(NewMiddle, VPEarlyExitBlock); 1912 NewMiddle->swapSuccessors(); 1913 1914 VPBuilder MiddleBuilder(NewMiddle); 1915 MiddleBuilder.createNaryOp(VPInstruction::BranchOnCond, {IsEarlyExitTaken}); 1916 1917 // Replace the condition controlling the non-early exit from the vector loop 1918 // with one exiting if either the original condition of the vector latch is 1919 // true or the early exit has been taken. 1920 auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator()); 1921 assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount && 1922 "Unexpected terminator"); 1923 auto *IsLatchExitTaken = 1924 Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0), 1925 LatchExitingBranch->getOperand(1)); 1926 auto *AnyExitTaken = Builder.createNaryOp( 1927 Instruction::Or, {IsEarlyExitTaken, IsLatchExitTaken}); 1928 Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken); 1929 LatchExitingBranch->eraseFromParent(); 1930 } 1931