1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This is the LLVM vectorization plan. It represents a candidate for 11 /// vectorization, allowing to plan and optimize how to vectorize a given loop 12 /// before generating LLVM-IR. 13 /// The vectorizer uses vectorization plans to estimate the costs of potential 14 /// candidates and if profitable to execute the desired plan, generating vector 15 /// LLVM-IR code. 16 /// 17 //===----------------------------------------------------------------------===// 18 19 #include "VPlan.h" 20 #include "VPlanCFG.h" 21 #include "VPlanDominatorTree.h" 22 #include "VPlanPatternMatch.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringExtras.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/GenericDomTreeConstruction.h" 41 #include "llvm/Support/GraphWriter.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/LoopVersioning.h" 45 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 46 #include <cassert> 47 #include <string> 48 #include <vector> 49 50 using namespace llvm; 51 using namespace llvm::VPlanPatternMatch; 52 53 namespace llvm { 54 extern cl::opt<bool> EnableVPlanNativePath; 55 } 56 57 #define DEBUG_TYPE "vplan" 58 59 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 60 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) { 61 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V); 62 VPSlotTracker SlotTracker( 63 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 64 V.print(OS, SlotTracker); 65 return OS; 66 } 67 #endif 68 69 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder, 70 const ElementCount &VF) const { 71 switch (LaneKind) { 72 case VPLane::Kind::ScalableLast: 73 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane 74 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF), 75 Builder.getInt32(VF.getKnownMinValue() - Lane)); 76 case VPLane::Kind::First: 77 return Builder.getInt32(Lane); 78 } 79 llvm_unreachable("Unknown lane kind"); 80 } 81 82 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def) 83 : SubclassID(SC), UnderlyingVal(UV), Def(Def) { 84 if (Def) 85 Def->addDefinedValue(this); 86 } 87 88 VPValue::~VPValue() { 89 assert(Users.empty() && "trying to delete a VPValue with remaining users"); 90 if (Def) 91 Def->removeDefinedValue(this); 92 } 93 94 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 95 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const { 96 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def)) 97 R->print(OS, "", SlotTracker); 98 else 99 printAsOperand(OS, SlotTracker); 100 } 101 102 void VPValue::dump() const { 103 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def); 104 VPSlotTracker SlotTracker( 105 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 106 print(dbgs(), SlotTracker); 107 dbgs() << "\n"; 108 } 109 110 void VPDef::dump() const { 111 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this); 112 VPSlotTracker SlotTracker( 113 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 114 print(dbgs(), "", SlotTracker); 115 dbgs() << "\n"; 116 } 117 #endif 118 119 VPRecipeBase *VPValue::getDefiningRecipe() { 120 return cast_or_null<VPRecipeBase>(Def); 121 } 122 123 const VPRecipeBase *VPValue::getDefiningRecipe() const { 124 return cast_or_null<VPRecipeBase>(Def); 125 } 126 127 // Get the top-most entry block of \p Start. This is the entry block of the 128 // containing VPlan. This function is templated to support both const and non-const blocks 129 template <typename T> static T *getPlanEntry(T *Start) { 130 T *Next = Start; 131 T *Current = Start; 132 while ((Next = Next->getParent())) 133 Current = Next; 134 135 SmallSetVector<T *, 8> WorkList; 136 WorkList.insert(Current); 137 138 for (unsigned i = 0; i < WorkList.size(); i++) { 139 T *Current = WorkList[i]; 140 if (Current->getNumPredecessors() == 0) 141 return Current; 142 auto &Predecessors = Current->getPredecessors(); 143 WorkList.insert(Predecessors.begin(), Predecessors.end()); 144 } 145 146 llvm_unreachable("VPlan without any entry node without predecessors"); 147 } 148 149 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; } 150 151 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; } 152 153 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly. 154 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const { 155 const VPBlockBase *Block = this; 156 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 157 Block = Region->getEntry(); 158 return cast<VPBasicBlock>(Block); 159 } 160 161 VPBasicBlock *VPBlockBase::getEntryBasicBlock() { 162 VPBlockBase *Block = this; 163 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 164 Block = Region->getEntry(); 165 return cast<VPBasicBlock>(Block); 166 } 167 168 void VPBlockBase::setPlan(VPlan *ParentPlan) { 169 assert( 170 (ParentPlan->getEntry() == this || ParentPlan->getPreheader() == this) && 171 "Can only set plan on its entry or preheader block."); 172 Plan = ParentPlan; 173 } 174 175 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly. 176 const VPBasicBlock *VPBlockBase::getExitingBasicBlock() const { 177 const VPBlockBase *Block = this; 178 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 179 Block = Region->getExiting(); 180 return cast<VPBasicBlock>(Block); 181 } 182 183 VPBasicBlock *VPBlockBase::getExitingBasicBlock() { 184 VPBlockBase *Block = this; 185 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 186 Block = Region->getExiting(); 187 return cast<VPBasicBlock>(Block); 188 } 189 190 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() { 191 if (!Successors.empty() || !Parent) 192 return this; 193 assert(Parent->getExiting() == this && 194 "Block w/o successors not the exiting block of its parent."); 195 return Parent->getEnclosingBlockWithSuccessors(); 196 } 197 198 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() { 199 if (!Predecessors.empty() || !Parent) 200 return this; 201 assert(Parent->getEntry() == this && 202 "Block w/o predecessors not the entry of its parent."); 203 return Parent->getEnclosingBlockWithPredecessors(); 204 } 205 206 void VPBlockBase::deleteCFG(VPBlockBase *Entry) { 207 for (VPBlockBase *Block : to_vector(vp_depth_first_shallow(Entry))) 208 delete Block; 209 } 210 211 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { 212 iterator It = begin(); 213 while (It != end() && It->isPhi()) 214 It++; 215 return It; 216 } 217 218 VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI, 219 DominatorTree *DT, IRBuilderBase &Builder, 220 InnerLoopVectorizer *ILV, VPlan *Plan, 221 LLVMContext &Ctx) 222 : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan), 223 LVer(nullptr), 224 TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {} 225 226 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { 227 if (Def->isLiveIn()) 228 return Def->getLiveInIRValue(); 229 230 if (hasScalarValue(Def, Instance)) { 231 return Data 232 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)]; 233 } 234 if (!Instance.Lane.isFirstLane() && 235 vputils::isUniformAfterVectorization(Def) && 236 hasScalarValue(Def, {Instance.Part, VPLane::getFirstLane()})) { 237 return Data.PerPartScalars[Def][Instance.Part][0]; 238 } 239 240 assert(hasVectorValue(Def, Instance.Part)); 241 auto *VecPart = Data.PerPartOutput[Def][Instance.Part]; 242 if (!VecPart->getType()->isVectorTy()) { 243 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar"); 244 return VecPart; 245 } 246 // TODO: Cache created scalar values. 247 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF); 248 auto *Extract = Builder.CreateExtractElement(VecPart, Lane); 249 // set(Def, Extract, Instance); 250 return Extract; 251 } 252 253 Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) { 254 if (NeedsScalar) { 255 assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) || 256 (hasScalarValue(Def, VPIteration(Part, 0)) && 257 Data.PerPartScalars[Def][Part].size() == 1)) && 258 "Trying to access a single scalar per part but has multiple scalars " 259 "per part."); 260 return get(Def, VPIteration(Part, 0)); 261 } 262 263 // If Values have been set for this Def return the one relevant for \p Part. 264 if (hasVectorValue(Def, Part)) 265 return Data.PerPartOutput[Def][Part]; 266 267 auto GetBroadcastInstrs = [this, Def](Value *V) { 268 bool SafeToHoist = Def->isDefinedOutsideVectorRegions(); 269 if (VF.isScalar()) 270 return V; 271 // Place the code for broadcasting invariant variables in the new preheader. 272 IRBuilder<>::InsertPointGuard Guard(Builder); 273 if (SafeToHoist) { 274 BasicBlock *LoopVectorPreHeader = CFG.VPBB2IRBB[cast<VPBasicBlock>( 275 Plan->getVectorLoopRegion()->getSinglePredecessor())]; 276 if (LoopVectorPreHeader) 277 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 278 } 279 280 // Place the code for broadcasting invariant variables in the new preheader. 281 // Broadcast the scalar into all locations in the vector. 282 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 283 284 return Shuf; 285 }; 286 287 if (!hasScalarValue(Def, {Part, 0})) { 288 assert(Def->isLiveIn() && "expected a live-in"); 289 if (Part != 0) 290 return get(Def, 0); 291 Value *IRV = Def->getLiveInIRValue(); 292 Value *B = GetBroadcastInstrs(IRV); 293 set(Def, B, Part); 294 return B; 295 } 296 297 Value *ScalarValue = get(Def, {Part, 0}); 298 // If we aren't vectorizing, we can just copy the scalar map values over 299 // to the vector map. 300 if (VF.isScalar()) { 301 set(Def, ScalarValue, Part); 302 return ScalarValue; 303 } 304 305 bool IsUniform = vputils::isUniformAfterVectorization(Def); 306 307 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 308 // Check if there is a scalar value for the selected lane. 309 if (!hasScalarValue(Def, {Part, LastLane})) { 310 // At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and 311 // VPExpandSCEVRecipes can also be uniform. 312 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) || 313 isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe()) || 314 isa<VPExpandSCEVRecipe>(Def->getDefiningRecipe())) && 315 "unexpected recipe found to be invariant"); 316 IsUniform = true; 317 LastLane = 0; 318 } 319 320 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 321 // Set the insert point after the last scalarized instruction or after the 322 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 323 // will directly follow the scalar definitions. 324 auto OldIP = Builder.saveIP(); 325 auto NewIP = 326 isa<PHINode>(LastInst) 327 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 328 : std::next(BasicBlock::iterator(LastInst)); 329 Builder.SetInsertPoint(&*NewIP); 330 331 // However, if we are vectorizing, we need to construct the vector values. 332 // If the value is known to be uniform after vectorization, we can just 333 // broadcast the scalar value corresponding to lane zero for each unroll 334 // iteration. Otherwise, we construct the vector values using 335 // insertelement instructions. Since the resulting vectors are stored in 336 // State, we will only generate the insertelements once. 337 Value *VectorValue = nullptr; 338 if (IsUniform) { 339 VectorValue = GetBroadcastInstrs(ScalarValue); 340 set(Def, VectorValue, Part); 341 } else { 342 // Initialize packing with insertelements to start from undef. 343 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 344 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 345 set(Def, Undef, Part); 346 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 347 packScalarIntoVectorValue(Def, {Part, Lane}); 348 VectorValue = get(Def, Part); 349 } 350 Builder.restoreIP(OldIP); 351 return VectorValue; 352 } 353 354 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) { 355 VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion(); 356 return VPBB2IRBB[LoopRegion->getPreheaderVPBB()]; 357 } 358 359 void VPTransformState::addNewMetadata(Instruction *To, 360 const Instruction *Orig) { 361 // If the loop was versioned with memchecks, add the corresponding no-alias 362 // metadata. 363 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 364 LVer->annotateInstWithNoAlias(To, Orig); 365 } 366 367 void VPTransformState::addMetadata(Value *To, Instruction *From) { 368 // No source instruction to transfer metadata from? 369 if (!From) 370 return; 371 372 if (Instruction *ToI = dyn_cast<Instruction>(To)) { 373 propagateMetadata(ToI, From); 374 addNewMetadata(ToI, From); 375 } 376 } 377 378 void VPTransformState::setDebugLocFrom(DebugLoc DL) { 379 const DILocation *DIL = DL; 380 // When a FSDiscriminator is enabled, we don't need to add the multiply 381 // factors to the discriminators. 382 if (DIL && 383 Builder.GetInsertBlock() 384 ->getParent() 385 ->shouldEmitDebugInfoForProfiling() && 386 !EnableFSDiscriminator) { 387 // FIXME: For scalable vectors, assume vscale=1. 388 auto NewDIL = 389 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 390 if (NewDIL) 391 Builder.SetCurrentDebugLocation(*NewDIL); 392 else 393 LLVM_DEBUG(dbgs() << "Failed to create new discriminator: " 394 << DIL->getFilename() << " Line: " << DIL->getLine()); 395 } else 396 Builder.SetCurrentDebugLocation(DIL); 397 } 398 399 void VPTransformState::packScalarIntoVectorValue(VPValue *Def, 400 const VPIteration &Instance) { 401 Value *ScalarInst = get(Def, Instance); 402 Value *VectorValue = get(Def, Instance.Part); 403 VectorValue = Builder.CreateInsertElement( 404 VectorValue, ScalarInst, Instance.Lane.getAsRuntimeExpr(Builder, VF)); 405 set(Def, VectorValue, Instance.Part); 406 } 407 408 BasicBlock * 409 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { 410 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks. 411 // Pred stands for Predessor. Prev stands for Previous - last visited/created. 412 BasicBlock *PrevBB = CFG.PrevBB; 413 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), 414 PrevBB->getParent(), CFG.ExitBB); 415 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); 416 417 // Hook up the new basic block to its predecessors. 418 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 419 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock(); 420 auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors(); 421 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; 422 423 assert(PredBB && "Predecessor basic-block not found building successor."); 424 auto *PredBBTerminator = PredBB->getTerminator(); 425 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 426 427 auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator); 428 if (isa<UnreachableInst>(PredBBTerminator)) { 429 assert(PredVPSuccessors.size() == 1 && 430 "Predecessor ending w/o branch must have single successor."); 431 DebugLoc DL = PredBBTerminator->getDebugLoc(); 432 PredBBTerminator->eraseFromParent(); 433 auto *Br = BranchInst::Create(NewBB, PredBB); 434 Br->setDebugLoc(DL); 435 } else if (TermBr && !TermBr->isConditional()) { 436 TermBr->setSuccessor(0, NewBB); 437 } else { 438 // Set each forward successor here when it is created, excluding 439 // backedges. A backward successor is set when the branch is created. 440 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 441 assert(!TermBr->getSuccessor(idx) && 442 "Trying to reset an existing successor block."); 443 TermBr->setSuccessor(idx, NewBB); 444 } 445 CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, NewBB}}); 446 } 447 return NewBB; 448 } 449 450 void VPIRBasicBlock::execute(VPTransformState *State) { 451 assert(getHierarchicalSuccessors().empty() && 452 "VPIRBasicBlock cannot have successors at the moment"); 453 454 State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator()); 455 executeRecipes(State, getIRBasicBlock()); 456 457 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 458 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock(); 459 BasicBlock *PredBB = State->CFG.VPBB2IRBB[PredVPBB]; 460 assert(PredBB && "Predecessor basic-block not found building successor."); 461 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 462 463 auto *PredBBTerminator = PredBB->getTerminator(); 464 auto *TermBr = cast<BranchInst>(PredBBTerminator); 465 // Set each forward successor here when it is created, excluding 466 // backedges. A backward successor is set when the branch is created. 467 const auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors(); 468 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 469 assert(!TermBr->getSuccessor(idx) && 470 "Trying to reset an existing successor block."); 471 TermBr->setSuccessor(idx, IRBB); 472 State->CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, IRBB}}); 473 } 474 } 475 476 void VPBasicBlock::execute(VPTransformState *State) { 477 bool Replica = State->Instance && !State->Instance->isFirstIteration(); 478 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB; 479 VPBlockBase *SingleHPred = nullptr; 480 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible. 481 482 auto IsLoopRegion = [](VPBlockBase *BB) { 483 auto *R = dyn_cast<VPRegionBlock>(BB); 484 return R && !R->isReplicator(); 485 }; 486 487 // 1. Create an IR basic block. 488 if (PrevVPBB && /* A */ 489 !((SingleHPred = getSingleHierarchicalPredecessor()) && 490 SingleHPred->getExitingBasicBlock() == PrevVPBB && 491 PrevVPBB->getSingleHierarchicalSuccessor() && 492 (SingleHPred->getParent() == getEnclosingLoopRegion() && 493 !IsLoopRegion(SingleHPred))) && /* B */ 494 !(Replica && getPredecessors().empty())) { /* C */ 495 // The last IR basic block is reused, as an optimization, in three cases: 496 // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null; 497 // B. when the current VPBB has a single (hierarchical) predecessor which 498 // is PrevVPBB and the latter has a single (hierarchical) successor which 499 // both are in the same non-replicator region; and 500 // C. when the current VPBB is an entry of a region replica - where PrevVPBB 501 // is the exiting VPBB of this region from a previous instance, or the 502 // predecessor of this region. 503 504 NewBB = createEmptyBasicBlock(State->CFG); 505 State->Builder.SetInsertPoint(NewBB); 506 // Temporarily terminate with unreachable until CFG is rewired. 507 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 508 // Register NewBB in its loop. In innermost loops its the same for all 509 // BB's. 510 if (State->CurrentVectorLoop) 511 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI); 512 State->Builder.SetInsertPoint(Terminator); 513 State->CFG.PrevBB = NewBB; 514 } 515 516 // 2. Fill the IR basic block with IR instructions. 517 executeRecipes(State, NewBB); 518 } 519 520 void VPBasicBlock::dropAllReferences(VPValue *NewValue) { 521 for (VPRecipeBase &R : Recipes) { 522 for (auto *Def : R.definedValues()) 523 Def->replaceAllUsesWith(NewValue); 524 525 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++) 526 R.setOperand(I, NewValue); 527 } 528 } 529 530 void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) { 531 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() 532 << " in BB:" << BB->getName() << '\n'); 533 534 State->CFG.VPBB2IRBB[this] = BB; 535 State->CFG.PrevVPBB = this; 536 537 for (VPRecipeBase &Recipe : Recipes) 538 Recipe.execute(*State); 539 540 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB); 541 } 542 543 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { 544 assert((SplitAt == end() || SplitAt->getParent() == this) && 545 "can only split at a position in the same block"); 546 547 SmallVector<VPBlockBase *, 2> Succs(successors()); 548 // First, disconnect the current block from its successors. 549 for (VPBlockBase *Succ : Succs) 550 VPBlockUtils::disconnectBlocks(this, Succ); 551 552 // Create new empty block after the block to split. 553 auto *SplitBlock = new VPBasicBlock(getName() + ".split"); 554 VPBlockUtils::insertBlockAfter(SplitBlock, this); 555 556 // Add successors for block to split to new block. 557 for (VPBlockBase *Succ : Succs) 558 VPBlockUtils::connectBlocks(SplitBlock, Succ); 559 560 // Finally, move the recipes starting at SplitAt to new block. 561 for (VPRecipeBase &ToMove : 562 make_early_inc_range(make_range(SplitAt, this->end()))) 563 ToMove.moveBefore(*SplitBlock, SplitBlock->end()); 564 565 return SplitBlock; 566 } 567 568 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() { 569 VPRegionBlock *P = getParent(); 570 if (P && P->isReplicator()) { 571 P = P->getParent(); 572 assert(!cast<VPRegionBlock>(P)->isReplicator() && 573 "unexpected nested replicate regions"); 574 } 575 return P; 576 } 577 578 static bool hasConditionalTerminator(const VPBasicBlock *VPBB) { 579 if (VPBB->empty()) { 580 assert( 581 VPBB->getNumSuccessors() < 2 && 582 "block with multiple successors doesn't have a recipe as terminator"); 583 return false; 584 } 585 586 const VPRecipeBase *R = &VPBB->back(); 587 bool IsCondBranch = isa<VPBranchOnMaskRecipe>(R) || 588 match(R, m_BranchOnCond(m_VPValue())) || 589 match(R, m_BranchOnCount(m_VPValue(), m_VPValue())); 590 (void)IsCondBranch; 591 592 if (VPBB->getNumSuccessors() >= 2 || 593 (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) { 594 assert(IsCondBranch && "block with multiple successors not terminated by " 595 "conditional branch recipe"); 596 597 return true; 598 } 599 600 assert( 601 !IsCondBranch && 602 "block with 0 or 1 successors terminated by conditional branch recipe"); 603 return false; 604 } 605 606 VPRecipeBase *VPBasicBlock::getTerminator() { 607 if (hasConditionalTerminator(this)) 608 return &back(); 609 return nullptr; 610 } 611 612 const VPRecipeBase *VPBasicBlock::getTerminator() const { 613 if (hasConditionalTerminator(this)) 614 return &back(); 615 return nullptr; 616 } 617 618 bool VPBasicBlock::isExiting() const { 619 return getParent() && getParent()->getExitingBasicBlock() == this; 620 } 621 622 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 623 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const { 624 if (getSuccessors().empty()) { 625 O << Indent << "No successors\n"; 626 } else { 627 O << Indent << "Successor(s): "; 628 ListSeparator LS; 629 for (auto *Succ : getSuccessors()) 630 O << LS << Succ->getName(); 631 O << '\n'; 632 } 633 } 634 635 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, 636 VPSlotTracker &SlotTracker) const { 637 O << Indent << getName() << ":\n"; 638 639 auto RecipeIndent = Indent + " "; 640 for (const VPRecipeBase &Recipe : *this) { 641 Recipe.print(O, RecipeIndent, SlotTracker); 642 O << '\n'; 643 } 644 645 printSuccessors(O, Indent); 646 } 647 #endif 648 649 static std::pair<VPBlockBase *, VPBlockBase *> cloneSESE(VPBlockBase *Entry); 650 651 // Clone the CFG for all nodes in the single-entry-single-exit region reachable 652 // from \p Entry, this includes cloning the blocks and their recipes. Operands 653 // of cloned recipes will NOT be updated. Remapping of operands must be done 654 // separately. Returns a pair with the the new entry and exiting blocks of the 655 // cloned region. 656 static std::pair<VPBlockBase *, VPBlockBase *> cloneSESE(VPBlockBase *Entry) { 657 DenseMap<VPBlockBase *, VPBlockBase *> Old2NewVPBlocks; 658 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT( 659 Entry); 660 for (VPBlockBase *BB : RPOT) { 661 VPBlockBase *NewBB = BB->clone(); 662 for (VPBlockBase *Pred : BB->getPredecessors()) 663 VPBlockUtils::connectBlocks(Old2NewVPBlocks[Pred], NewBB); 664 665 Old2NewVPBlocks[BB] = NewBB; 666 } 667 668 #if !defined(NDEBUG) 669 // Verify that the order of predecessors and successors matches in the cloned 670 // version. 671 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> 672 NewRPOT(Old2NewVPBlocks[Entry]); 673 for (const auto &[OldBB, NewBB] : zip(RPOT, NewRPOT)) { 674 for (const auto &[OldPred, NewPred] : 675 zip(OldBB->getPredecessors(), NewBB->getPredecessors())) 676 assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors"); 677 678 for (const auto &[OldSucc, NewSucc] : 679 zip(OldBB->successors(), NewBB->successors())) 680 assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors"); 681 } 682 #endif 683 684 return std::make_pair(Old2NewVPBlocks[Entry], 685 Old2NewVPBlocks[*reverse(RPOT).begin()]); 686 } 687 688 VPRegionBlock *VPRegionBlock::clone() { 689 const auto &[NewEntry, NewExiting] = cloneSESE(getEntry()); 690 auto *NewRegion = 691 new VPRegionBlock(NewEntry, NewExiting, getName(), isReplicator()); 692 for (VPBlockBase *Block : vp_depth_first_shallow(NewEntry)) 693 Block->setParent(NewRegion); 694 return NewRegion; 695 } 696 697 void VPRegionBlock::dropAllReferences(VPValue *NewValue) { 698 for (VPBlockBase *Block : vp_depth_first_shallow(Entry)) 699 // Drop all references in VPBasicBlocks and replace all uses with 700 // DummyValue. 701 Block->dropAllReferences(NewValue); 702 } 703 704 void VPRegionBlock::execute(VPTransformState *State) { 705 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> 706 RPOT(Entry); 707 708 if (!isReplicator()) { 709 // Create and register the new vector loop. 710 Loop *PrevLoop = State->CurrentVectorLoop; 711 State->CurrentVectorLoop = State->LI->AllocateLoop(); 712 BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()]; 713 Loop *ParentLoop = State->LI->getLoopFor(VectorPH); 714 715 // Insert the new loop into the loop nest and register the new basic blocks 716 // before calling any utilities such as SCEV that require valid LoopInfo. 717 if (ParentLoop) 718 ParentLoop->addChildLoop(State->CurrentVectorLoop); 719 else 720 State->LI->addTopLevelLoop(State->CurrentVectorLoop); 721 722 // Visit the VPBlocks connected to "this", starting from it. 723 for (VPBlockBase *Block : RPOT) { 724 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 725 Block->execute(State); 726 } 727 728 State->CurrentVectorLoop = PrevLoop; 729 return; 730 } 731 732 assert(!State->Instance && "Replicating a Region with non-null instance."); 733 734 // Enter replicating mode. 735 State->Instance = VPIteration(0, 0); 736 737 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) { 738 State->Instance->Part = Part; 739 assert(!State->VF.isScalable() && "VF is assumed to be non scalable."); 740 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF; 741 ++Lane) { 742 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First); 743 // Visit the VPBlocks connected to \p this, starting from it. 744 for (VPBlockBase *Block : RPOT) { 745 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 746 Block->execute(State); 747 } 748 } 749 } 750 751 // Exit replicating mode. 752 State->Instance.reset(); 753 } 754 755 InstructionCost VPBasicBlock::cost(ElementCount VF, VPCostContext &Ctx) { 756 InstructionCost Cost = 0; 757 for (VPRecipeBase &R : Recipes) 758 Cost += R.cost(VF, Ctx); 759 return Cost; 760 } 761 762 InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) { 763 if (!isReplicator()) { 764 InstructionCost Cost = 0; 765 for (VPBlockBase *Block : vp_depth_first_shallow(getEntry())) 766 Cost += Block->cost(VF, Ctx); 767 return Cost; 768 } 769 770 // Compute the cost of a replicate region. Replicating isn't supported for 771 // scalable vectors, return an invalid cost for them. 772 // TODO: Discard scalable VPlans with replicate recipes earlier after 773 // construction. 774 if (VF.isScalable()) 775 return InstructionCost::getInvalid(); 776 777 // First compute the cost of the conditionally executed recipes, followed by 778 // account for the branching cost, except if the mask is a header mask or 779 // uniform condition. 780 using namespace llvm::VPlanPatternMatch; 781 VPBasicBlock *Then = cast<VPBasicBlock>(getEntry()->getSuccessors()[0]); 782 InstructionCost ThenCost = Then->cost(VF, Ctx); 783 784 // Note the cost estimates below closely match the current legacy cost model. 785 auto *BOM = cast<VPBranchOnMaskRecipe>(&getEntryBasicBlock()->front()); 786 VPValue *Cond = BOM->getOperand(0); 787 788 // Check if Cond is a header mask and don't account for branching costs as the 789 // header mask will always be true except in the last iteration. 790 if (vputils::isHeaderMask(Cond, *getPlan())) 791 return ThenCost; 792 793 // For the scalar case, we may not always execute the original predicated 794 // block, Thus, scale the block's cost by the probability of executing it. 795 if (VF.isScalar()) 796 return ThenCost / getReciprocalPredBlockProb(); 797 798 // Check if Cond is a uniform compare and don't account for branching costs as 799 // a uniform condition corresponds to a single branch per VF. 800 if (vputils::isUniformBoolean(Cond)) 801 return ThenCost; 802 803 // Add the cost for branches around scalarized and predicated blocks. 804 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 805 806 auto *Vec_i1Ty = VectorType::get(IntegerType::getInt1Ty(Ctx.LLVMCtx), VF); 807 auto FixedVF = VF.getFixedValue(); // Known to be non scalable. 808 InstructionCost Cost = ThenCost; 809 Cost += Ctx.TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnes(FixedVF), 810 /*Insert*/ false, /*Extract*/ true, 811 CostKind); 812 Cost += Ctx.TTI.getCFInstrCost(Instruction::Br, CostKind) * FixedVF; 813 return Cost; 814 } 815 816 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 817 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent, 818 VPSlotTracker &SlotTracker) const { 819 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {"; 820 auto NewIndent = Indent + " "; 821 for (auto *BlockBase : vp_depth_first_shallow(Entry)) { 822 O << '\n'; 823 BlockBase->print(O, NewIndent, SlotTracker); 824 } 825 O << Indent << "}\n"; 826 827 printSuccessors(O, Indent); 828 } 829 #endif 830 831 VPlan::~VPlan() { 832 for (auto &KV : LiveOuts) 833 delete KV.second; 834 LiveOuts.clear(); 835 836 if (Entry) { 837 VPValue DummyValue; 838 for (VPBlockBase *Block : vp_depth_first_shallow(Entry)) 839 Block->dropAllReferences(&DummyValue); 840 841 VPBlockBase::deleteCFG(Entry); 842 843 Preheader->dropAllReferences(&DummyValue); 844 delete Preheader; 845 } 846 for (VPValue *VPV : VPLiveInsToFree) 847 delete VPV; 848 if (BackedgeTakenCount) 849 delete BackedgeTakenCount; 850 } 851 852 VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE, 853 BasicBlock *PH) { 854 VPIRBasicBlock *Preheader = new VPIRBasicBlock(PH); 855 VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph"); 856 auto Plan = std::make_unique<VPlan>(Preheader, VecPreheader); 857 Plan->TripCount = 858 vputils::getOrCreateVPValueForSCEVExpr(*Plan, TripCount, SE); 859 // Create empty VPRegionBlock, to be filled during processing later. 860 auto *TopRegion = new VPRegionBlock("vector loop", false /*isReplicator*/); 861 VPBlockUtils::insertBlockAfter(TopRegion, VecPreheader); 862 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block"); 863 VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion); 864 return Plan; 865 } 866 867 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, 868 Value *CanonicalIVStartValue, 869 VPTransformState &State) { 870 // Check if the backedge taken count is needed, and if so build it. 871 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 872 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 873 auto *TCMO = Builder.CreateSub(TripCountV, 874 ConstantInt::get(TripCountV->getType(), 1), 875 "trip.count.minus.1"); 876 BackedgeTakenCount->setUnderlyingValue(TCMO); 877 } 878 879 VectorTripCount.setUnderlyingValue(VectorTripCountV); 880 881 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 882 // FIXME: Model VF * UF computation completely in VPlan. 883 VFxUF.setUnderlyingValue( 884 createStepForVF(Builder, TripCountV->getType(), State.VF, State.UF)); 885 886 // When vectorizing the epilogue loop, the canonical induction start value 887 // needs to be changed from zero to the value after the main vector loop. 888 // FIXME: Improve modeling for canonical IV start values in the epilogue loop. 889 if (CanonicalIVStartValue) { 890 VPValue *VPV = getOrAddLiveIn(CanonicalIVStartValue); 891 auto *IV = getCanonicalIV(); 892 assert(all_of(IV->users(), 893 [](const VPUser *U) { 894 return isa<VPScalarIVStepsRecipe>(U) || 895 isa<VPScalarCastRecipe>(U) || 896 isa<VPDerivedIVRecipe>(U) || 897 cast<VPInstruction>(U)->getOpcode() == 898 Instruction::Add; 899 }) && 900 "the canonical IV should only be used by its increment or " 901 "ScalarIVSteps when resetting the start value"); 902 IV->setOperand(0, VPV); 903 } 904 } 905 906 /// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p 907 /// VPBB are moved to the newly created VPIRBasicBlock. 908 static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) { 909 assert(VPBB->getNumSuccessors() == 0 && "VPBB must be a leave node"); 910 VPIRBasicBlock *IRMiddleVPBB = new VPIRBasicBlock(IRBB); 911 for (auto &R : make_early_inc_range(*VPBB)) 912 R.moveBefore(*IRMiddleVPBB, IRMiddleVPBB->end()); 913 VPBlockBase *PredVPBB = VPBB->getSinglePredecessor(); 914 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB); 915 VPBlockUtils::connectBlocks(PredVPBB, IRMiddleVPBB); 916 delete VPBB; 917 } 918 919 /// Generate the code inside the preheader and body of the vectorized loop. 920 /// Assumes a single pre-header basic-block was created for this. Introduce 921 /// additional basic-blocks as needed, and fill them all. 922 void VPlan::execute(VPTransformState *State) { 923 // Initialize CFG state. 924 State->CFG.PrevVPBB = nullptr; 925 State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor(); 926 BasicBlock *VectorPreHeader = State->CFG.PrevBB; 927 State->Builder.SetInsertPoint(VectorPreHeader->getTerminator()); 928 replaceVPBBWithIRVPBB( 929 cast<VPBasicBlock>(getVectorLoopRegion()->getSingleSuccessor()), 930 State->CFG.ExitBB); 931 932 // Disconnect VectorPreHeader from ExitBB in both the CFG and DT. 933 cast<BranchInst>(VectorPreHeader->getTerminator())->setSuccessor(0, nullptr); 934 State->CFG.DTU.applyUpdates( 935 {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}}); 936 937 // Generate code in the loop pre-header and body. 938 for (VPBlockBase *Block : vp_depth_first_shallow(Entry)) 939 Block->execute(State); 940 941 VPBasicBlock *LatchVPBB = getVectorLoopRegion()->getExitingBasicBlock(); 942 BasicBlock *VectorLatchBB = State->CFG.VPBB2IRBB[LatchVPBB]; 943 944 // Fix the latch value of canonical, reduction and first-order recurrences 945 // phis in the vector loop. 946 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock(); 947 for (VPRecipeBase &R : Header->phis()) { 948 // Skip phi-like recipes that generate their backedege values themselves. 949 if (isa<VPWidenPHIRecipe>(&R)) 950 continue; 951 952 if (isa<VPWidenPointerInductionRecipe>(&R) || 953 isa<VPWidenIntOrFpInductionRecipe>(&R)) { 954 PHINode *Phi = nullptr; 955 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) { 956 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0)); 957 } else { 958 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R); 959 assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) && 960 "recipe generating only scalars should have been replaced"); 961 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0)); 962 Phi = cast<PHINode>(GEP->getPointerOperand()); 963 } 964 965 Phi->setIncomingBlock(1, VectorLatchBB); 966 967 // Move the last step to the end of the latch block. This ensures 968 // consistent placement of all induction updates. 969 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1)); 970 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode()); 971 continue; 972 } 973 974 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 975 // For canonical IV, first-order recurrences and in-order reduction phis, 976 // only a single part is generated, which provides the last part from the 977 // previous iteration. For non-ordered reductions all UF parts are 978 // generated. 979 bool SinglePartNeeded = 980 isa<VPCanonicalIVPHIRecipe>(PhiR) || 981 isa<VPFirstOrderRecurrencePHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) || 982 (isa<VPReductionPHIRecipe>(PhiR) && 983 cast<VPReductionPHIRecipe>(PhiR)->isOrdered()); 984 bool NeedsScalar = 985 isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) || 986 (isa<VPReductionPHIRecipe>(PhiR) && 987 cast<VPReductionPHIRecipe>(PhiR)->isInLoop()); 988 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; 989 990 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 991 Value *Phi = State->get(PhiR, Part, NeedsScalar); 992 Value *Val = 993 State->get(PhiR->getBackedgeValue(), 994 SinglePartNeeded ? State->UF - 1 : Part, NeedsScalar); 995 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB); 996 } 997 } 998 999 State->CFG.DTU.flush(); 1000 assert(State->CFG.DTU.getDomTree().verify( 1001 DominatorTree::VerificationLevel::Fast) && 1002 "DT not preserved correctly"); 1003 } 1004 1005 InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) { 1006 // For now only return the cost of the vector loop region, ignoring any other 1007 // blocks, like the preheader or middle blocks. 1008 return getVectorLoopRegion()->cost(VF, Ctx); 1009 } 1010 1011 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1012 void VPlan::printLiveIns(raw_ostream &O) const { 1013 VPSlotTracker SlotTracker(this); 1014 1015 if (VFxUF.getNumUsers() > 0) { 1016 O << "\nLive-in "; 1017 VFxUF.printAsOperand(O, SlotTracker); 1018 O << " = VF * UF"; 1019 } 1020 1021 if (VectorTripCount.getNumUsers() > 0) { 1022 O << "\nLive-in "; 1023 VectorTripCount.printAsOperand(O, SlotTracker); 1024 O << " = vector-trip-count"; 1025 } 1026 1027 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 1028 O << "\nLive-in "; 1029 BackedgeTakenCount->printAsOperand(O, SlotTracker); 1030 O << " = backedge-taken count"; 1031 } 1032 1033 O << "\n"; 1034 if (TripCount->isLiveIn()) 1035 O << "Live-in "; 1036 TripCount->printAsOperand(O, SlotTracker); 1037 O << " = original trip-count"; 1038 O << "\n"; 1039 } 1040 1041 LLVM_DUMP_METHOD 1042 void VPlan::print(raw_ostream &O) const { 1043 VPSlotTracker SlotTracker(this); 1044 1045 O << "VPlan '" << getName() << "' {"; 1046 1047 printLiveIns(O); 1048 1049 if (!getPreheader()->empty()) { 1050 O << "\n"; 1051 getPreheader()->print(O, "", SlotTracker); 1052 } 1053 1054 for (const VPBlockBase *Block : vp_depth_first_shallow(getEntry())) { 1055 O << '\n'; 1056 Block->print(O, "", SlotTracker); 1057 } 1058 1059 if (!LiveOuts.empty()) 1060 O << "\n"; 1061 for (const auto &KV : LiveOuts) { 1062 KV.second->print(O, SlotTracker); 1063 } 1064 1065 O << "}\n"; 1066 } 1067 1068 std::string VPlan::getName() const { 1069 std::string Out; 1070 raw_string_ostream RSO(Out); 1071 RSO << Name << " for "; 1072 if (!VFs.empty()) { 1073 RSO << "VF={" << VFs[0]; 1074 for (ElementCount VF : drop_begin(VFs)) 1075 RSO << "," << VF; 1076 RSO << "},"; 1077 } 1078 1079 if (UFs.empty()) { 1080 RSO << "UF>=1"; 1081 } else { 1082 RSO << "UF={" << UFs[0]; 1083 for (unsigned UF : drop_begin(UFs)) 1084 RSO << "," << UF; 1085 RSO << "}"; 1086 } 1087 1088 return Out; 1089 } 1090 1091 LLVM_DUMP_METHOD 1092 void VPlan::printDOT(raw_ostream &O) const { 1093 VPlanPrinter Printer(O, *this); 1094 Printer.dump(); 1095 } 1096 1097 LLVM_DUMP_METHOD 1098 void VPlan::dump() const { print(dbgs()); } 1099 #endif 1100 1101 void VPlan::addLiveOut(PHINode *PN, VPValue *V) { 1102 assert(LiveOuts.count(PN) == 0 && "an exit value for PN already exists"); 1103 LiveOuts.insert({PN, new VPLiveOut(PN, V)}); 1104 } 1105 1106 static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry, 1107 DenseMap<VPValue *, VPValue *> &Old2NewVPValues) { 1108 // Update the operands of all cloned recipes starting at NewEntry. This 1109 // traverses all reachable blocks. This is done in two steps, to handle cycles 1110 // in PHI recipes. 1111 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> 1112 OldDeepRPOT(Entry); 1113 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> 1114 NewDeepRPOT(NewEntry); 1115 // First, collect all mappings from old to new VPValues defined by cloned 1116 // recipes. 1117 for (const auto &[OldBB, NewBB] : 1118 zip(VPBlockUtils::blocksOnly<VPBasicBlock>(OldDeepRPOT), 1119 VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT))) { 1120 assert(OldBB->getRecipeList().size() == NewBB->getRecipeList().size() && 1121 "blocks must have the same number of recipes"); 1122 for (const auto &[OldR, NewR] : zip(*OldBB, *NewBB)) { 1123 assert(OldR.getNumOperands() == NewR.getNumOperands() && 1124 "recipes must have the same number of operands"); 1125 assert(OldR.getNumDefinedValues() == NewR.getNumDefinedValues() && 1126 "recipes must define the same number of operands"); 1127 for (const auto &[OldV, NewV] : 1128 zip(OldR.definedValues(), NewR.definedValues())) 1129 Old2NewVPValues[OldV] = NewV; 1130 } 1131 } 1132 1133 // Update all operands to use cloned VPValues. 1134 for (VPBasicBlock *NewBB : 1135 VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT)) { 1136 for (VPRecipeBase &NewR : *NewBB) 1137 for (unsigned I = 0, E = NewR.getNumOperands(); I != E; ++I) { 1138 VPValue *NewOp = Old2NewVPValues.lookup(NewR.getOperand(I)); 1139 NewR.setOperand(I, NewOp); 1140 } 1141 } 1142 } 1143 1144 VPlan *VPlan::duplicate() { 1145 // Clone blocks. 1146 VPBasicBlock *NewPreheader = Preheader->clone(); 1147 const auto &[NewEntry, __] = cloneSESE(Entry); 1148 1149 // Create VPlan, clone live-ins and remap operands in the cloned blocks. 1150 auto *NewPlan = new VPlan(NewPreheader, cast<VPBasicBlock>(NewEntry)); 1151 DenseMap<VPValue *, VPValue *> Old2NewVPValues; 1152 for (VPValue *OldLiveIn : VPLiveInsToFree) { 1153 Old2NewVPValues[OldLiveIn] = 1154 NewPlan->getOrAddLiveIn(OldLiveIn->getLiveInIRValue()); 1155 } 1156 Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount; 1157 Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF; 1158 if (BackedgeTakenCount) { 1159 NewPlan->BackedgeTakenCount = new VPValue(); 1160 Old2NewVPValues[BackedgeTakenCount] = NewPlan->BackedgeTakenCount; 1161 } 1162 assert(TripCount && "trip count must be set"); 1163 if (TripCount->isLiveIn()) 1164 Old2NewVPValues[TripCount] = 1165 NewPlan->getOrAddLiveIn(TripCount->getLiveInIRValue()); 1166 // else NewTripCount will be created and inserted into Old2NewVPValues when 1167 // TripCount is cloned. In any case NewPlan->TripCount is updated below. 1168 1169 remapOperands(Preheader, NewPreheader, Old2NewVPValues); 1170 remapOperands(Entry, NewEntry, Old2NewVPValues); 1171 1172 // Clone live-outs. 1173 for (const auto &[_, LO] : LiveOuts) 1174 NewPlan->addLiveOut(LO->getPhi(), Old2NewVPValues[LO->getOperand(0)]); 1175 1176 // Initialize remaining fields of cloned VPlan. 1177 NewPlan->VFs = VFs; 1178 NewPlan->UFs = UFs; 1179 // TODO: Adjust names. 1180 NewPlan->Name = Name; 1181 assert(Old2NewVPValues.contains(TripCount) && 1182 "TripCount must have been added to Old2NewVPValues"); 1183 NewPlan->TripCount = Old2NewVPValues[TripCount]; 1184 return NewPlan; 1185 } 1186 1187 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1188 1189 Twine VPlanPrinter::getUID(const VPBlockBase *Block) { 1190 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") + 1191 Twine(getOrCreateBID(Block)); 1192 } 1193 1194 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) { 1195 const std::string &Name = Block->getName(); 1196 if (!Name.empty()) 1197 return Name; 1198 return "VPB" + Twine(getOrCreateBID(Block)); 1199 } 1200 1201 void VPlanPrinter::dump() { 1202 Depth = 1; 1203 bumpIndent(0); 1204 OS << "digraph VPlan {\n"; 1205 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan"; 1206 if (!Plan.getName().empty()) 1207 OS << "\\n" << DOT::EscapeString(Plan.getName()); 1208 1209 { 1210 // Print live-ins. 1211 std::string Str; 1212 raw_string_ostream SS(Str); 1213 Plan.printLiveIns(SS); 1214 SmallVector<StringRef, 0> Lines; 1215 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1216 for (auto Line : Lines) 1217 OS << DOT::EscapeString(Line.str()) << "\\n"; 1218 } 1219 1220 OS << "\"]\n"; 1221 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n"; 1222 OS << "edge [fontname=Courier, fontsize=30]\n"; 1223 OS << "compound=true\n"; 1224 1225 dumpBlock(Plan.getPreheader()); 1226 1227 for (const VPBlockBase *Block : vp_depth_first_shallow(Plan.getEntry())) 1228 dumpBlock(Block); 1229 1230 OS << "}\n"; 1231 } 1232 1233 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) { 1234 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block)) 1235 dumpBasicBlock(BasicBlock); 1236 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1237 dumpRegion(Region); 1238 else 1239 llvm_unreachable("Unsupported kind of VPBlock."); 1240 } 1241 1242 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To, 1243 bool Hidden, const Twine &Label) { 1244 // Due to "dot" we print an edge between two regions as an edge between the 1245 // exiting basic block and the entry basic of the respective regions. 1246 const VPBlockBase *Tail = From->getExitingBasicBlock(); 1247 const VPBlockBase *Head = To->getEntryBasicBlock(); 1248 OS << Indent << getUID(Tail) << " -> " << getUID(Head); 1249 OS << " [ label=\"" << Label << '\"'; 1250 if (Tail != From) 1251 OS << " ltail=" << getUID(From); 1252 if (Head != To) 1253 OS << " lhead=" << getUID(To); 1254 if (Hidden) 1255 OS << "; splines=none"; 1256 OS << "]\n"; 1257 } 1258 1259 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) { 1260 auto &Successors = Block->getSuccessors(); 1261 if (Successors.size() == 1) 1262 drawEdge(Block, Successors.front(), false, ""); 1263 else if (Successors.size() == 2) { 1264 drawEdge(Block, Successors.front(), false, "T"); 1265 drawEdge(Block, Successors.back(), false, "F"); 1266 } else { 1267 unsigned SuccessorNumber = 0; 1268 for (auto *Successor : Successors) 1269 drawEdge(Block, Successor, false, Twine(SuccessorNumber++)); 1270 } 1271 } 1272 1273 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) { 1274 // Implement dot-formatted dump by performing plain-text dump into the 1275 // temporary storage followed by some post-processing. 1276 OS << Indent << getUID(BasicBlock) << " [label =\n"; 1277 bumpIndent(1); 1278 std::string Str; 1279 raw_string_ostream SS(Str); 1280 // Use no indentation as we need to wrap the lines into quotes ourselves. 1281 BasicBlock->print(SS, "", SlotTracker); 1282 1283 // We need to process each line of the output separately, so split 1284 // single-string plain-text dump. 1285 SmallVector<StringRef, 0> Lines; 1286 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1287 1288 auto EmitLine = [&](StringRef Line, StringRef Suffix) { 1289 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix; 1290 }; 1291 1292 // Don't need the "+" after the last line. 1293 for (auto Line : make_range(Lines.begin(), Lines.end() - 1)) 1294 EmitLine(Line, " +\n"); 1295 EmitLine(Lines.back(), "\n"); 1296 1297 bumpIndent(-1); 1298 OS << Indent << "]\n"; 1299 1300 dumpEdges(BasicBlock); 1301 } 1302 1303 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) { 1304 OS << Indent << "subgraph " << getUID(Region) << " {\n"; 1305 bumpIndent(1); 1306 OS << Indent << "fontname=Courier\n" 1307 << Indent << "label=\"" 1308 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ") 1309 << DOT::EscapeString(Region->getName()) << "\"\n"; 1310 // Dump the blocks of the region. 1311 assert(Region->getEntry() && "Region contains no inner blocks."); 1312 for (const VPBlockBase *Block : vp_depth_first_shallow(Region->getEntry())) 1313 dumpBlock(Block); 1314 bumpIndent(-1); 1315 OS << Indent << "}\n"; 1316 dumpEdges(Region); 1317 } 1318 1319 void VPlanIngredient::print(raw_ostream &O) const { 1320 if (auto *Inst = dyn_cast<Instruction>(V)) { 1321 if (!Inst->getType()->isVoidTy()) { 1322 Inst->printAsOperand(O, false); 1323 O << " = "; 1324 } 1325 O << Inst->getOpcodeName() << " "; 1326 unsigned E = Inst->getNumOperands(); 1327 if (E > 0) { 1328 Inst->getOperand(0)->printAsOperand(O, false); 1329 for (unsigned I = 1; I < E; ++I) 1330 Inst->getOperand(I)->printAsOperand(O << ", ", false); 1331 } 1332 } else // !Inst 1333 V->printAsOperand(O, false); 1334 } 1335 1336 #endif 1337 1338 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT); 1339 1340 void VPValue::replaceAllUsesWith(VPValue *New) { 1341 replaceUsesWithIf(New, [](VPUser &, unsigned) { return true; }); 1342 } 1343 1344 void VPValue::replaceUsesWithIf( 1345 VPValue *New, 1346 llvm::function_ref<bool(VPUser &U, unsigned Idx)> ShouldReplace) { 1347 // Note that this early exit is required for correctness; the implementation 1348 // below relies on the number of users for this VPValue to decrease, which 1349 // isn't the case if this == New. 1350 if (this == New) 1351 return; 1352 1353 for (unsigned J = 0; J < getNumUsers();) { 1354 VPUser *User = Users[J]; 1355 bool RemovedUser = false; 1356 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) { 1357 if (User->getOperand(I) != this || !ShouldReplace(*User, I)) 1358 continue; 1359 1360 RemovedUser = true; 1361 User->setOperand(I, New); 1362 } 1363 // If a user got removed after updating the current user, the next user to 1364 // update will be moved to the current position, so we only need to 1365 // increment the index if the number of users did not change. 1366 if (!RemovedUser) 1367 J++; 1368 } 1369 } 1370 1371 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1372 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const { 1373 OS << Tracker.getOrCreateName(this); 1374 } 1375 1376 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const { 1377 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) { 1378 Op->printAsOperand(O, SlotTracker); 1379 }); 1380 } 1381 #endif 1382 1383 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region, 1384 Old2NewTy &Old2New, 1385 InterleavedAccessInfo &IAI) { 1386 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> 1387 RPOT(Region->getEntry()); 1388 for (VPBlockBase *Base : RPOT) { 1389 visitBlock(Base, Old2New, IAI); 1390 } 1391 } 1392 1393 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New, 1394 InterleavedAccessInfo &IAI) { 1395 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) { 1396 for (VPRecipeBase &VPI : *VPBB) { 1397 if (isa<VPWidenPHIRecipe>(&VPI)) 1398 continue; 1399 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions"); 1400 auto *VPInst = cast<VPInstruction>(&VPI); 1401 1402 auto *Inst = dyn_cast_or_null<Instruction>(VPInst->getUnderlyingValue()); 1403 if (!Inst) 1404 continue; 1405 auto *IG = IAI.getInterleaveGroup(Inst); 1406 if (!IG) 1407 continue; 1408 1409 auto NewIGIter = Old2New.find(IG); 1410 if (NewIGIter == Old2New.end()) 1411 Old2New[IG] = new InterleaveGroup<VPInstruction>( 1412 IG->getFactor(), IG->isReverse(), IG->getAlign()); 1413 1414 if (Inst == IG->getInsertPos()) 1415 Old2New[IG]->setInsertPos(VPInst); 1416 1417 InterleaveGroupMap[VPInst] = Old2New[IG]; 1418 InterleaveGroupMap[VPInst]->insertMember( 1419 VPInst, IG->getIndex(Inst), 1420 Align(IG->isReverse() ? (-1) * int(IG->getFactor()) 1421 : IG->getFactor())); 1422 } 1423 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1424 visitRegion(Region, Old2New, IAI); 1425 else 1426 llvm_unreachable("Unsupported kind of VPBlock."); 1427 } 1428 1429 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan, 1430 InterleavedAccessInfo &IAI) { 1431 Old2NewTy Old2New; 1432 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI); 1433 } 1434 1435 void VPSlotTracker::assignName(const VPValue *V) { 1436 assert(!VPValue2Name.contains(V) && "VPValue already has a name!"); 1437 auto *UV = V->getUnderlyingValue(); 1438 if (!UV) { 1439 VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str(); 1440 NextSlot++; 1441 return; 1442 } 1443 1444 // Use the name of the underlying Value, wrapped in "ir<>", and versioned by 1445 // appending ".Number" to the name if there are multiple uses. 1446 std::string Name; 1447 raw_string_ostream S(Name); 1448 UV->printAsOperand(S, false); 1449 assert(!Name.empty() && "Name cannot be empty."); 1450 std::string BaseName = (Twine("ir<") + Name + Twine(">")).str(); 1451 1452 // First assign the base name for V. 1453 const auto &[A, _] = VPValue2Name.insert({V, BaseName}); 1454 // Integer or FP constants with different types will result in he same string 1455 // due to stripping types. 1456 if (V->isLiveIn() && isa<ConstantInt, ConstantFP>(UV)) 1457 return; 1458 1459 // If it is already used by C > 0 other VPValues, increase the version counter 1460 // C and use it for V. 1461 const auto &[C, UseInserted] = BaseName2Version.insert({BaseName, 0}); 1462 if (!UseInserted) { 1463 C->second++; 1464 A->second = (BaseName + Twine(".") + Twine(C->second)).str(); 1465 } 1466 } 1467 1468 void VPSlotTracker::assignNames(const VPlan &Plan) { 1469 if (Plan.VFxUF.getNumUsers() > 0) 1470 assignName(&Plan.VFxUF); 1471 assignName(&Plan.VectorTripCount); 1472 if (Plan.BackedgeTakenCount) 1473 assignName(Plan.BackedgeTakenCount); 1474 for (VPValue *LI : Plan.VPLiveInsToFree) 1475 assignName(LI); 1476 assignNames(Plan.getPreheader()); 1477 1478 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<const VPBlockBase *>> 1479 RPOT(VPBlockDeepTraversalWrapper<const VPBlockBase *>(Plan.getEntry())); 1480 for (const VPBasicBlock *VPBB : 1481 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT)) 1482 assignNames(VPBB); 1483 } 1484 1485 void VPSlotTracker::assignNames(const VPBasicBlock *VPBB) { 1486 for (const VPRecipeBase &Recipe : *VPBB) 1487 for (VPValue *Def : Recipe.definedValues()) 1488 assignName(Def); 1489 } 1490 1491 std::string VPSlotTracker::getOrCreateName(const VPValue *V) const { 1492 std::string Name = VPValue2Name.lookup(V); 1493 if (!Name.empty()) 1494 return Name; 1495 1496 // If no name was assigned, no VPlan was provided when creating the slot 1497 // tracker or it is not reachable from the provided VPlan. This can happen, 1498 // e.g. when trying to print a recipe that has not been inserted into a VPlan 1499 // in a debugger. 1500 // TODO: Update VPSlotTracker constructor to assign names to recipes & 1501 // VPValues not associated with a VPlan, instead of constructing names ad-hoc 1502 // here. 1503 const VPRecipeBase *DefR = V->getDefiningRecipe(); 1504 (void)DefR; 1505 assert((!DefR || !DefR->getParent() || !DefR->getParent()->getPlan()) && 1506 "VPValue defined by a recipe in a VPlan?"); 1507 1508 // Use the underlying value's name, if there is one. 1509 if (auto *UV = V->getUnderlyingValue()) { 1510 std::string Name; 1511 raw_string_ostream S(Name); 1512 UV->printAsOperand(S, false); 1513 return (Twine("ir<") + Name + ">").str(); 1514 } 1515 1516 return "<badref>"; 1517 } 1518 1519 bool vputils::onlyFirstLaneUsed(const VPValue *Def) { 1520 return all_of(Def->users(), 1521 [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Def); }); 1522 } 1523 1524 bool vputils::onlyFirstPartUsed(const VPValue *Def) { 1525 return all_of(Def->users(), 1526 [Def](const VPUser *U) { return U->onlyFirstPartUsed(Def); }); 1527 } 1528 1529 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, 1530 ScalarEvolution &SE) { 1531 if (auto *Expanded = Plan.getSCEVExpansion(Expr)) 1532 return Expanded; 1533 VPValue *Expanded = nullptr; 1534 if (auto *E = dyn_cast<SCEVConstant>(Expr)) 1535 Expanded = Plan.getOrAddLiveIn(E->getValue()); 1536 else if (auto *E = dyn_cast<SCEVUnknown>(Expr)) 1537 Expanded = Plan.getOrAddLiveIn(E->getValue()); 1538 else { 1539 Expanded = new VPExpandSCEVRecipe(Expr, SE); 1540 Plan.getPreheader()->appendRecipe(Expanded->getDefiningRecipe()); 1541 } 1542 Plan.addSCEVExpansion(Expr, Expanded); 1543 return Expanded; 1544 } 1545 1546 bool vputils::isHeaderMask(VPValue *V, VPlan &Plan) { 1547 if (isa<VPActiveLaneMaskPHIRecipe>(V)) 1548 return true; 1549 1550 auto IsWideCanonicalIV = [](VPValue *A) { 1551 return isa<VPWidenCanonicalIVRecipe>(A) || 1552 (isa<VPWidenIntOrFpInductionRecipe>(A) && 1553 cast<VPWidenIntOrFpInductionRecipe>(A)->isCanonical()) || 1554 match(A, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))); 1555 }; 1556 1557 VPValue *A, *B; 1558 if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B)))) 1559 return B == Plan.getTripCount() && 1560 (match(A, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))) || 1561 IsWideCanonicalIV(A)); 1562 1563 return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) && 1564 IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount(); 1565 } 1566 1567 bool vputils::isUniformBoolean(VPValue *Cond) { 1568 if (match(Cond, m_Not(m_VPValue()))) 1569 Cond = Cond->getDefiningRecipe()->getOperand(0); 1570 auto *R = Cond->getDefiningRecipe(); 1571 if (!R) 1572 return true; 1573 // TODO: match additional patterns preserving uniformity of booleans, e.g., 1574 // AND/OR/etc. 1575 return match(R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) && 1576 all_of(R->operands(), [](VPValue *Op) { 1577 return vputils::isUniformAfterVectorization(Op); 1578 }); 1579 } 1580