1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This is the LLVM vectorization plan. It represents a candidate for 11 /// vectorization, allowing to plan and optimize how to vectorize a given loop 12 /// before generating LLVM-IR. 13 /// The vectorizer uses vectorization plans to estimate the costs of potential 14 /// candidates and if profitable to execute the desired plan, generating vector 15 /// LLVM-IR code. 16 /// 17 //===----------------------------------------------------------------------===// 18 19 #include "VPlan.h" 20 #include "VPlanDominatorTree.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/IVDescriptors.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/Support/Casting.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/GenericDomTreeConstruction.h" 40 #include "llvm/Support/GraphWriter.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 44 #include <cassert> 45 #include <string> 46 #include <vector> 47 48 using namespace llvm; 49 extern cl::opt<bool> EnableVPlanNativePath; 50 51 #define DEBUG_TYPE "vplan" 52 53 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 54 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) { 55 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V); 56 VPSlotTracker SlotTracker( 57 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 58 V.print(OS, SlotTracker); 59 return OS; 60 } 61 #endif 62 63 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder, 64 const ElementCount &VF) const { 65 switch (LaneKind) { 66 case VPLane::Kind::ScalableLast: 67 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane 68 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF), 69 Builder.getInt32(VF.getKnownMinValue() - Lane)); 70 case VPLane::Kind::First: 71 return Builder.getInt32(Lane); 72 } 73 llvm_unreachable("Unknown lane kind"); 74 } 75 76 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def) 77 : SubclassID(SC), UnderlyingVal(UV), Def(Def) { 78 if (Def) 79 Def->addDefinedValue(this); 80 } 81 82 VPValue::~VPValue() { 83 assert(Users.empty() && "trying to delete a VPValue with remaining users"); 84 if (Def) 85 Def->removeDefinedValue(this); 86 } 87 88 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 89 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const { 90 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def)) 91 R->print(OS, "", SlotTracker); 92 else 93 printAsOperand(OS, SlotTracker); 94 } 95 96 void VPValue::dump() const { 97 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def); 98 VPSlotTracker SlotTracker( 99 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 100 print(dbgs(), SlotTracker); 101 dbgs() << "\n"; 102 } 103 104 void VPDef::dump() const { 105 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this); 106 VPSlotTracker SlotTracker( 107 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 108 print(dbgs(), "", SlotTracker); 109 dbgs() << "\n"; 110 } 111 #endif 112 113 // Get the top-most entry block of \p Start. This is the entry block of the 114 // containing VPlan. This function is templated to support both const and non-const blocks 115 template <typename T> static T *getPlanEntry(T *Start) { 116 T *Next = Start; 117 T *Current = Start; 118 while ((Next = Next->getParent())) 119 Current = Next; 120 121 SmallSetVector<T *, 8> WorkList; 122 WorkList.insert(Current); 123 124 for (unsigned i = 0; i < WorkList.size(); i++) { 125 T *Current = WorkList[i]; 126 if (Current->getNumPredecessors() == 0) 127 return Current; 128 auto &Predecessors = Current->getPredecessors(); 129 WorkList.insert(Predecessors.begin(), Predecessors.end()); 130 } 131 132 llvm_unreachable("VPlan without any entry node without predecessors"); 133 } 134 135 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; } 136 137 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; } 138 139 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly. 140 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const { 141 const VPBlockBase *Block = this; 142 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 143 Block = Region->getEntry(); 144 return cast<VPBasicBlock>(Block); 145 } 146 147 VPBasicBlock *VPBlockBase::getEntryBasicBlock() { 148 VPBlockBase *Block = this; 149 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 150 Block = Region->getEntry(); 151 return cast<VPBasicBlock>(Block); 152 } 153 154 void VPBlockBase::setPlan(VPlan *ParentPlan) { 155 assert(ParentPlan->getEntry() == this && 156 "Can only set plan on its entry block."); 157 Plan = ParentPlan; 158 } 159 160 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly. 161 const VPBasicBlock *VPBlockBase::getExitBasicBlock() const { 162 const VPBlockBase *Block = this; 163 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 164 Block = Region->getExit(); 165 return cast<VPBasicBlock>(Block); 166 } 167 168 VPBasicBlock *VPBlockBase::getExitBasicBlock() { 169 VPBlockBase *Block = this; 170 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 171 Block = Region->getExit(); 172 return cast<VPBasicBlock>(Block); 173 } 174 175 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() { 176 if (!Successors.empty() || !Parent) 177 return this; 178 assert(Parent->getExit() == this && 179 "Block w/o successors not the exit of its parent."); 180 return Parent->getEnclosingBlockWithSuccessors(); 181 } 182 183 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() { 184 if (!Predecessors.empty() || !Parent) 185 return this; 186 assert(Parent->getEntry() == this && 187 "Block w/o predecessors not the entry of its parent."); 188 return Parent->getEnclosingBlockWithPredecessors(); 189 } 190 191 VPValue *VPBlockBase::getCondBit() { 192 return CondBitUser.getSingleOperandOrNull(); 193 } 194 195 const VPValue *VPBlockBase::getCondBit() const { 196 return CondBitUser.getSingleOperandOrNull(); 197 } 198 199 void VPBlockBase::setCondBit(VPValue *CV) { CondBitUser.resetSingleOpUser(CV); } 200 201 VPValue *VPBlockBase::getPredicate() { 202 return PredicateUser.getSingleOperandOrNull(); 203 } 204 205 const VPValue *VPBlockBase::getPredicate() const { 206 return PredicateUser.getSingleOperandOrNull(); 207 } 208 209 void VPBlockBase::setPredicate(VPValue *CV) { 210 PredicateUser.resetSingleOpUser(CV); 211 } 212 213 void VPBlockBase::deleteCFG(VPBlockBase *Entry) { 214 SmallVector<VPBlockBase *, 8> Blocks(depth_first(Entry)); 215 216 for (VPBlockBase *Block : Blocks) 217 delete Block; 218 } 219 220 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { 221 iterator It = begin(); 222 while (It != end() && It->isPhi()) 223 It++; 224 return It; 225 } 226 227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { 228 if (!Def->getDef()) 229 return Def->getLiveInIRValue(); 230 231 if (hasScalarValue(Def, Instance)) { 232 return Data 233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)]; 234 } 235 236 assert(hasVectorValue(Def, Instance.Part)); 237 auto *VecPart = Data.PerPartOutput[Def][Instance.Part]; 238 if (!VecPart->getType()->isVectorTy()) { 239 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar"); 240 return VecPart; 241 } 242 // TODO: Cache created scalar values. 243 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF); 244 auto *Extract = Builder.CreateExtractElement(VecPart, Lane); 245 // set(Def, Extract, Instance); 246 return Extract; 247 } 248 249 BasicBlock * 250 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { 251 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks. 252 // Pred stands for Predessor. Prev stands for Previous - last visited/created. 253 BasicBlock *PrevBB = CFG.PrevBB; 254 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), 255 PrevBB->getParent(), CFG.ExitBB); 256 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); 257 258 // Hook up the new basic block to its predecessors. 259 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 260 VPBasicBlock *PredVPBB = PredVPBlock->getExitBasicBlock(); 261 auto &PredVPSuccessors = PredVPBB->getSuccessors(); 262 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; 263 264 // In outer loop vectorization scenario, the predecessor BBlock may not yet 265 // be visited(backedge). Mark the VPBasicBlock for fixup at the end of 266 // vectorization. We do not encounter this case in inner loop vectorization 267 // as we start out by building a loop skeleton with the vector loop header 268 // and latch blocks. As a result, we never enter this function for the 269 // header block in the non VPlan-native path. 270 if (!PredBB) { 271 assert(EnableVPlanNativePath && 272 "Unexpected null predecessor in non VPlan-native path"); 273 CFG.VPBBsToFix.push_back(PredVPBB); 274 continue; 275 } 276 277 assert(PredBB && "Predecessor basic-block not found building successor."); 278 auto *PredBBTerminator = PredBB->getTerminator(); 279 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 280 if (isa<UnreachableInst>(PredBBTerminator)) { 281 assert(PredVPSuccessors.size() == 1 && 282 "Predecessor ending w/o branch must have single successor."); 283 PredBBTerminator->eraseFromParent(); 284 BranchInst::Create(NewBB, PredBB); 285 } else { 286 assert(PredVPSuccessors.size() == 2 && 287 "Predecessor ending with branch must have two successors."); 288 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 289 assert(!PredBBTerminator->getSuccessor(idx) && 290 "Trying to reset an existing successor block."); 291 PredBBTerminator->setSuccessor(idx, NewBB); 292 } 293 } 294 return NewBB; 295 } 296 297 void VPBasicBlock::execute(VPTransformState *State) { 298 bool Replica = State->Instance && !State->Instance->isFirstIteration(); 299 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB; 300 VPBlockBase *SingleHPred = nullptr; 301 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible. 302 303 // 1. Create an IR basic block, or reuse the last one if possible. 304 // The last IR basic block is reused, as an optimization, in three cases: 305 // A. the first VPBB reuses the loop header BB - when PrevVPBB is null; 306 // B. when the current VPBB has a single (hierarchical) predecessor which 307 // is PrevVPBB and the latter has a single (hierarchical) successor; and 308 // C. when the current VPBB is an entry of a region replica - where PrevVPBB 309 // is the exit of this region from a previous instance, or the predecessor 310 // of this region. 311 if (PrevVPBB && /* A */ 312 !((SingleHPred = getSingleHierarchicalPredecessor()) && 313 SingleHPred->getExitBasicBlock() == PrevVPBB && 314 PrevVPBB->getSingleHierarchicalSuccessor()) && /* B */ 315 !(Replica && getPredecessors().empty())) { /* C */ 316 NewBB = createEmptyBasicBlock(State->CFG); 317 State->Builder.SetInsertPoint(NewBB); 318 // Temporarily terminate with unreachable until CFG is rewired. 319 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 320 State->Builder.SetInsertPoint(Terminator); 321 State->CFG.PrevBB = NewBB; 322 } 323 324 if (State->CurrentVectorLoop && 325 !State->CurrentVectorLoop->contains(State->CFG.PrevBB)) { 326 // Register NewBB in its loop. In innermost loops its the same for all BB's. 327 State->CurrentVectorLoop->addBasicBlockToLoop(State->CFG.PrevBB, 328 *State->LI); 329 } 330 331 // 2. Fill the IR basic block with IR instructions. 332 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() 333 << " in BB:" << NewBB->getName() << '\n'); 334 335 State->CFG.VPBB2IRBB[this] = NewBB; 336 State->CFG.PrevVPBB = this; 337 338 for (VPRecipeBase &Recipe : Recipes) 339 Recipe.execute(*State); 340 341 VPValue *CBV; 342 if (EnableVPlanNativePath && (CBV = getCondBit())) { 343 assert(CBV->getUnderlyingValue() && 344 "Unexpected null underlying value for condition bit"); 345 346 // Condition bit value in a VPBasicBlock is used as the branch selector. In 347 // the VPlan-native path case, since all branches are uniform we generate a 348 // branch instruction using the condition value from vector lane 0 and dummy 349 // successors. The successors are fixed later when the successor blocks are 350 // visited. 351 Value *NewCond = State->get(CBV, {0, 0}); 352 353 // Replace the temporary unreachable terminator with the new conditional 354 // branch. 355 auto *CurrentTerminator = NewBB->getTerminator(); 356 assert(isa<UnreachableInst>(CurrentTerminator) && 357 "Expected to replace unreachable terminator with conditional " 358 "branch."); 359 auto *CondBr = BranchInst::Create(NewBB, nullptr, NewCond); 360 CondBr->setSuccessor(0, nullptr); 361 ReplaceInstWithInst(CurrentTerminator, CondBr); 362 } 363 364 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB); 365 } 366 367 void VPBasicBlock::dropAllReferences(VPValue *NewValue) { 368 for (VPRecipeBase &R : Recipes) { 369 for (auto *Def : R.definedValues()) 370 Def->replaceAllUsesWith(NewValue); 371 372 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++) 373 R.setOperand(I, NewValue); 374 } 375 } 376 377 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { 378 assert((SplitAt == end() || SplitAt->getParent() == this) && 379 "can only split at a position in the same block"); 380 381 SmallVector<VPBlockBase *, 2> Succs(successors()); 382 // First, disconnect the current block from its successors. 383 for (VPBlockBase *Succ : Succs) 384 VPBlockUtils::disconnectBlocks(this, Succ); 385 386 // Create new empty block after the block to split. 387 auto *SplitBlock = new VPBasicBlock(getName() + ".split"); 388 VPBlockUtils::insertBlockAfter(SplitBlock, this); 389 390 // Add successors for block to split to new block. 391 for (VPBlockBase *Succ : Succs) 392 VPBlockUtils::connectBlocks(SplitBlock, Succ); 393 394 // Finally, move the recipes starting at SplitAt to new block. 395 for (VPRecipeBase &ToMove : 396 make_early_inc_range(make_range(SplitAt, this->end()))) 397 ToMove.moveBefore(*SplitBlock, SplitBlock->end()); 398 399 return SplitBlock; 400 } 401 402 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 403 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const { 404 if (getSuccessors().empty()) { 405 O << Indent << "No successors\n"; 406 } else { 407 O << Indent << "Successor(s): "; 408 ListSeparator LS; 409 for (auto *Succ : getSuccessors()) 410 O << LS << Succ->getName(); 411 O << '\n'; 412 } 413 } 414 415 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, 416 VPSlotTracker &SlotTracker) const { 417 O << Indent << getName() << ":\n"; 418 if (const VPValue *Pred = getPredicate()) { 419 O << Indent << "BlockPredicate:"; 420 Pred->printAsOperand(O, SlotTracker); 421 if (const auto *PredInst = dyn_cast<VPInstruction>(Pred)) 422 O << " (" << PredInst->getParent()->getName() << ")"; 423 O << '\n'; 424 } 425 426 auto RecipeIndent = Indent + " "; 427 for (const VPRecipeBase &Recipe : *this) { 428 Recipe.print(O, RecipeIndent, SlotTracker); 429 O << '\n'; 430 } 431 432 printSuccessors(O, Indent); 433 434 if (const VPValue *CBV = getCondBit()) { 435 O << Indent << "CondBit: "; 436 CBV->printAsOperand(O, SlotTracker); 437 if (const auto *CBI = dyn_cast<VPInstruction>(CBV)) 438 O << " (" << CBI->getParent()->getName() << ")"; 439 O << '\n'; 440 } 441 } 442 #endif 443 444 void VPRegionBlock::dropAllReferences(VPValue *NewValue) { 445 for (VPBlockBase *Block : depth_first(Entry)) 446 // Drop all references in VPBasicBlocks and replace all uses with 447 // DummyValue. 448 Block->dropAllReferences(NewValue); 449 } 450 451 void VPRegionBlock::execute(VPTransformState *State) { 452 ReversePostOrderTraversal<VPBlockBase *> RPOT(Entry); 453 454 if (!isReplicator()) { 455 // Create and register the new vector loop. 456 State->CurrentVectorLoop = State->LI->AllocateLoop(); 457 Loop *ParentLoop = State->LI->getLoopFor(State->CFG.VectorPreHeader); 458 459 // Insert the new loop into the loop nest and register the new basic blocks 460 // before calling any utilities such as SCEV that require valid LoopInfo. 461 if (ParentLoop) 462 ParentLoop->addChildLoop(State->CurrentVectorLoop); 463 else 464 State->LI->addTopLevelLoop(State->CurrentVectorLoop); 465 466 // Visit the VPBlocks connected to "this", starting from it. 467 for (VPBlockBase *Block : RPOT) { 468 if (EnableVPlanNativePath) { 469 // The inner loop vectorization path does not represent loop preheader 470 // and exit blocks as part of the VPlan. In the VPlan-native path, skip 471 // vectorizing loop preheader block. In future, we may replace this 472 // check with the check for loop preheader. 473 if (Block->getNumPredecessors() == 0) 474 continue; 475 476 // Skip vectorizing loop exit block. In future, we may replace this 477 // check with the check for loop exit. 478 if (Block->getNumSuccessors() == 0) 479 continue; 480 } 481 482 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 483 Block->execute(State); 484 } 485 return; 486 } 487 488 assert(!State->Instance && "Replicating a Region with non-null instance."); 489 490 // Enter replicating mode. 491 State->Instance = VPIteration(0, 0); 492 493 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) { 494 State->Instance->Part = Part; 495 assert(!State->VF.isScalable() && "VF is assumed to be non scalable."); 496 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF; 497 ++Lane) { 498 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First); 499 // Visit the VPBlocks connected to \p this, starting from it. 500 for (VPBlockBase *Block : RPOT) { 501 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 502 Block->execute(State); 503 } 504 } 505 } 506 507 // Exit replicating mode. 508 State->Instance.reset(); 509 } 510 511 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 512 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent, 513 VPSlotTracker &SlotTracker) const { 514 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {"; 515 auto NewIndent = Indent + " "; 516 for (auto *BlockBase : depth_first(Entry)) { 517 O << '\n'; 518 BlockBase->print(O, NewIndent, SlotTracker); 519 } 520 O << Indent << "}\n"; 521 522 printSuccessors(O, Indent); 523 } 524 #endif 525 526 bool VPRecipeBase::mayWriteToMemory() const { 527 switch (getVPDefID()) { 528 case VPWidenMemoryInstructionSC: { 529 return cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 530 } 531 case VPReplicateSC: 532 case VPWidenCallSC: 533 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 534 ->mayWriteToMemory(); 535 case VPBranchOnMaskSC: 536 return false; 537 case VPWidenIntOrFpInductionSC: 538 case VPWidenCanonicalIVSC: 539 case VPWidenPHISC: 540 case VPBlendSC: 541 case VPWidenSC: 542 case VPWidenGEPSC: 543 case VPReductionSC: 544 case VPWidenSelectSC: { 545 const Instruction *I = 546 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 547 (void)I; 548 assert((!I || !I->mayWriteToMemory()) && 549 "underlying instruction may write to memory"); 550 return false; 551 } 552 default: 553 return true; 554 } 555 } 556 557 bool VPRecipeBase::mayReadFromMemory() const { 558 switch (getVPDefID()) { 559 case VPWidenMemoryInstructionSC: { 560 return !cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 561 } 562 case VPReplicateSC: 563 case VPWidenCallSC: 564 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 565 ->mayReadFromMemory(); 566 case VPBranchOnMaskSC: 567 return false; 568 case VPWidenIntOrFpInductionSC: 569 case VPWidenCanonicalIVSC: 570 case VPWidenPHISC: 571 case VPBlendSC: 572 case VPWidenSC: 573 case VPWidenGEPSC: 574 case VPReductionSC: 575 case VPWidenSelectSC: { 576 const Instruction *I = 577 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 578 (void)I; 579 assert((!I || !I->mayReadFromMemory()) && 580 "underlying instruction may read from memory"); 581 return false; 582 } 583 default: 584 return true; 585 } 586 } 587 588 bool VPRecipeBase::mayHaveSideEffects() const { 589 switch (getVPDefID()) { 590 case VPBranchOnMaskSC: 591 return false; 592 case VPWidenIntOrFpInductionSC: 593 case VPWidenPointerInductionSC: 594 case VPWidenCanonicalIVSC: 595 case VPWidenPHISC: 596 case VPBlendSC: 597 case VPWidenSC: 598 case VPWidenGEPSC: 599 case VPReductionSC: 600 case VPWidenSelectSC: 601 case VPScalarIVStepsSC: { 602 const Instruction *I = 603 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 604 (void)I; 605 assert((!I || !I->mayHaveSideEffects()) && 606 "underlying instruction has side-effects"); 607 return false; 608 } 609 case VPReplicateSC: { 610 auto *R = cast<VPReplicateRecipe>(this); 611 return R->getUnderlyingInstr()->mayHaveSideEffects(); 612 } 613 default: 614 return true; 615 } 616 } 617 618 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) { 619 assert(!Parent && "Recipe already in some VPBasicBlock"); 620 assert(InsertPos->getParent() && 621 "Insertion position not in any VPBasicBlock"); 622 Parent = InsertPos->getParent(); 623 Parent->getRecipeList().insert(InsertPos->getIterator(), this); 624 } 625 626 void VPRecipeBase::insertBefore(VPBasicBlock &BB, 627 iplist<VPRecipeBase>::iterator I) { 628 assert(!Parent && "Recipe already in some VPBasicBlock"); 629 assert(I == BB.end() || I->getParent() == &BB); 630 Parent = &BB; 631 BB.getRecipeList().insert(I, this); 632 } 633 634 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) { 635 assert(!Parent && "Recipe already in some VPBasicBlock"); 636 assert(InsertPos->getParent() && 637 "Insertion position not in any VPBasicBlock"); 638 Parent = InsertPos->getParent(); 639 Parent->getRecipeList().insertAfter(InsertPos->getIterator(), this); 640 } 641 642 void VPRecipeBase::removeFromParent() { 643 assert(getParent() && "Recipe not in any VPBasicBlock"); 644 getParent()->getRecipeList().remove(getIterator()); 645 Parent = nullptr; 646 } 647 648 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() { 649 assert(getParent() && "Recipe not in any VPBasicBlock"); 650 return getParent()->getRecipeList().erase(getIterator()); 651 } 652 653 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) { 654 removeFromParent(); 655 insertAfter(InsertPos); 656 } 657 658 void VPRecipeBase::moveBefore(VPBasicBlock &BB, 659 iplist<VPRecipeBase>::iterator I) { 660 removeFromParent(); 661 insertBefore(BB, I); 662 } 663 664 void VPInstruction::generateInstruction(VPTransformState &State, 665 unsigned Part) { 666 IRBuilderBase &Builder = State.Builder; 667 Builder.SetCurrentDebugLocation(DL); 668 669 if (Instruction::isBinaryOp(getOpcode())) { 670 Value *A = State.get(getOperand(0), Part); 671 Value *B = State.get(getOperand(1), Part); 672 Value *V = Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B); 673 State.set(this, V, Part); 674 return; 675 } 676 677 switch (getOpcode()) { 678 case VPInstruction::Not: { 679 Value *A = State.get(getOperand(0), Part); 680 Value *V = Builder.CreateNot(A); 681 State.set(this, V, Part); 682 break; 683 } 684 case VPInstruction::ICmpULE: { 685 Value *IV = State.get(getOperand(0), Part); 686 Value *TC = State.get(getOperand(1), Part); 687 Value *V = Builder.CreateICmpULE(IV, TC); 688 State.set(this, V, Part); 689 break; 690 } 691 case Instruction::Select: { 692 Value *Cond = State.get(getOperand(0), Part); 693 Value *Op1 = State.get(getOperand(1), Part); 694 Value *Op2 = State.get(getOperand(2), Part); 695 Value *V = Builder.CreateSelect(Cond, Op1, Op2); 696 State.set(this, V, Part); 697 break; 698 } 699 case VPInstruction::ActiveLaneMask: { 700 // Get first lane of vector induction variable. 701 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0)); 702 // Get the original loop tripcount. 703 Value *ScalarTC = State.get(getOperand(1), Part); 704 705 auto *Int1Ty = Type::getInt1Ty(Builder.getContext()); 706 auto *PredTy = VectorType::get(Int1Ty, State.VF); 707 Instruction *Call = Builder.CreateIntrinsic( 708 Intrinsic::get_active_lane_mask, {PredTy, ScalarTC->getType()}, 709 {VIVElem0, ScalarTC}, nullptr, "active.lane.mask"); 710 State.set(this, Call, Part); 711 break; 712 } 713 case VPInstruction::FirstOrderRecurrenceSplice: { 714 // Generate code to combine the previous and current values in vector v3. 715 // 716 // vector.ph: 717 // v_init = vector(..., ..., ..., a[-1]) 718 // br vector.body 719 // 720 // vector.body 721 // i = phi [0, vector.ph], [i+4, vector.body] 722 // v1 = phi [v_init, vector.ph], [v2, vector.body] 723 // v2 = a[i, i+1, i+2, i+3]; 724 // v3 = vector(v1(3), v2(0, 1, 2)) 725 726 // For the first part, use the recurrence phi (v1), otherwise v2. 727 auto *V1 = State.get(getOperand(0), 0); 728 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1); 729 if (!PartMinus1->getType()->isVectorTy()) { 730 State.set(this, PartMinus1, Part); 731 } else { 732 Value *V2 = State.get(getOperand(1), Part); 733 State.set(this, Builder.CreateVectorSplice(PartMinus1, V2, -1), Part); 734 } 735 break; 736 } 737 738 case VPInstruction::CanonicalIVIncrement: 739 case VPInstruction::CanonicalIVIncrementNUW: { 740 Value *Next = nullptr; 741 if (Part == 0) { 742 bool IsNUW = getOpcode() == VPInstruction::CanonicalIVIncrementNUW; 743 auto *Phi = State.get(getOperand(0), 0); 744 // The loop step is equal to the vectorization factor (num of SIMD 745 // elements) times the unroll factor (num of SIMD instructions). 746 Value *Step = 747 createStepForVF(Builder, Phi->getType(), State.VF, State.UF); 748 Next = Builder.CreateAdd(Phi, Step, "index.next", IsNUW, false); 749 } else { 750 Next = State.get(this, 0); 751 } 752 753 State.set(this, Next, Part); 754 break; 755 } 756 case VPInstruction::BranchOnCount: { 757 if (Part != 0) 758 break; 759 // First create the compare. 760 Value *IV = State.get(getOperand(0), Part); 761 Value *TC = State.get(getOperand(1), Part); 762 Value *Cond = Builder.CreateICmpEQ(IV, TC); 763 764 // Now create the branch. 765 auto *Plan = getParent()->getPlan(); 766 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion(); 767 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock(); 768 if (Header->empty()) { 769 assert(EnableVPlanNativePath && 770 "empty entry block only expected in VPlanNativePath"); 771 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 772 } 773 // TODO: Once the exit block is modeled in VPlan, use it instead of going 774 // through State.CFG.ExitBB. 775 BasicBlock *Exit = State.CFG.ExitBB; 776 777 Builder.CreateCondBr(Cond, Exit, State.CFG.VPBB2IRBB[Header]); 778 Builder.GetInsertBlock()->getTerminator()->eraseFromParent(); 779 break; 780 } 781 default: 782 llvm_unreachable("Unsupported opcode for instruction"); 783 } 784 } 785 786 void VPInstruction::execute(VPTransformState &State) { 787 assert(!State.Instance && "VPInstruction executing an Instance"); 788 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 789 State.Builder.setFastMathFlags(FMF); 790 for (unsigned Part = 0; Part < State.UF; ++Part) 791 generateInstruction(State, Part); 792 } 793 794 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 795 void VPInstruction::dump() const { 796 VPSlotTracker SlotTracker(getParent()->getPlan()); 797 print(dbgs(), "", SlotTracker); 798 } 799 800 void VPInstruction::print(raw_ostream &O, const Twine &Indent, 801 VPSlotTracker &SlotTracker) const { 802 O << Indent << "EMIT "; 803 804 if (hasResult()) { 805 printAsOperand(O, SlotTracker); 806 O << " = "; 807 } 808 809 switch (getOpcode()) { 810 case VPInstruction::Not: 811 O << "not"; 812 break; 813 case VPInstruction::ICmpULE: 814 O << "icmp ule"; 815 break; 816 case VPInstruction::SLPLoad: 817 O << "combined load"; 818 break; 819 case VPInstruction::SLPStore: 820 O << "combined store"; 821 break; 822 case VPInstruction::ActiveLaneMask: 823 O << "active lane mask"; 824 break; 825 case VPInstruction::FirstOrderRecurrenceSplice: 826 O << "first-order splice"; 827 break; 828 case VPInstruction::CanonicalIVIncrement: 829 O << "VF * UF + "; 830 break; 831 case VPInstruction::CanonicalIVIncrementNUW: 832 O << "VF * UF +(nuw) "; 833 break; 834 case VPInstruction::BranchOnCount: 835 O << "branch-on-count "; 836 break; 837 default: 838 O << Instruction::getOpcodeName(getOpcode()); 839 } 840 841 O << FMF; 842 843 for (const VPValue *Operand : operands()) { 844 O << " "; 845 Operand->printAsOperand(O, SlotTracker); 846 } 847 848 if (DL) { 849 O << ", !dbg "; 850 DL.print(O); 851 } 852 } 853 #endif 854 855 void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) { 856 // Make sure the VPInstruction is a floating-point operation. 857 assert((Opcode == Instruction::FAdd || Opcode == Instruction::FMul || 858 Opcode == Instruction::FNeg || Opcode == Instruction::FSub || 859 Opcode == Instruction::FDiv || Opcode == Instruction::FRem || 860 Opcode == Instruction::FCmp) && 861 "this op can't take fast-math flags"); 862 FMF = FMFNew; 863 } 864 865 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, 866 Value *CanonicalIVStartValue, 867 VPTransformState &State) { 868 // Check if the trip count is needed, and if so build it. 869 if (TripCount && TripCount->getNumUsers()) { 870 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 871 State.set(TripCount, TripCountV, Part); 872 } 873 874 // Check if the backedge taken count is needed, and if so build it. 875 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 876 IRBuilder<> Builder(State.CFG.VectorPreHeader->getTerminator()); 877 auto *TCMO = Builder.CreateSub(TripCountV, 878 ConstantInt::get(TripCountV->getType(), 1), 879 "trip.count.minus.1"); 880 auto VF = State.VF; 881 Value *VTCMO = 882 VF.isScalar() ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast"); 883 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 884 State.set(BackedgeTakenCount, VTCMO, Part); 885 } 886 887 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 888 State.set(&VectorTripCount, VectorTripCountV, Part); 889 890 // When vectorizing the epilogue loop, the canonical induction start value 891 // needs to be changed from zero to the value after the main vector loop. 892 if (CanonicalIVStartValue) { 893 VPValue *VPV = new VPValue(CanonicalIVStartValue); 894 addExternalDef(VPV); 895 auto *IV = getCanonicalIV(); 896 assert(all_of(IV->users(), 897 [](const VPUser *U) { 898 if (isa<VPScalarIVStepsRecipe>(U)) 899 return true; 900 auto *VPI = cast<VPInstruction>(U); 901 return VPI->getOpcode() == 902 VPInstruction::CanonicalIVIncrement || 903 VPI->getOpcode() == 904 VPInstruction::CanonicalIVIncrementNUW; 905 }) && 906 "the canonical IV should only be used by its increments or " 907 "ScalarIVSteps when " 908 "resetting the start value"); 909 IV->setOperand(0, VPV); 910 } 911 } 912 913 /// Generate the code inside the body of the vectorized loop. Assumes a single 914 /// LoopVectorBody basic-block was created for this. Introduce additional 915 /// basic-blocks as needed, and fill them all. 916 void VPlan::execute(VPTransformState *State) { 917 // Set the reverse mapping from VPValues to Values for code generation. 918 for (auto &Entry : Value2VPValue) 919 State->VPValue2Value[Entry.second] = Entry.first; 920 921 // Initialize CFG state. 922 State->CFG.PrevVPBB = nullptr; 923 BasicBlock *VectorHeaderBB = State->CFG.VectorPreHeader->getSingleSuccessor(); 924 State->CFG.PrevBB = VectorHeaderBB; 925 State->CFG.ExitBB = VectorHeaderBB->getSingleSuccessor(); 926 State->CurrentVectorLoop = State->LI->getLoopFor(VectorHeaderBB); 927 928 // Remove the edge between Header and Latch to allow other connections. 929 // Temporarily terminate with unreachable until CFG is rewired. 930 // Note: this asserts the generated code's assumption that 931 // getFirstInsertionPt() can be dereferenced into an Instruction. 932 VectorHeaderBB->getTerminator()->eraseFromParent(); 933 State->Builder.SetInsertPoint(VectorHeaderBB); 934 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 935 State->Builder.SetInsertPoint(Terminator); 936 937 // Generate code in loop body. 938 for (VPBlockBase *Block : depth_first(Entry)) 939 Block->execute(State); 940 941 // Setup branch terminator successors for VPBBs in VPBBsToFix based on 942 // VPBB's successors. 943 for (auto VPBB : State->CFG.VPBBsToFix) { 944 assert(EnableVPlanNativePath && 945 "Unexpected VPBBsToFix in non VPlan-native path"); 946 BasicBlock *BB = State->CFG.VPBB2IRBB[VPBB]; 947 assert(BB && "Unexpected null basic block for VPBB"); 948 949 unsigned Idx = 0; 950 auto *BBTerminator = BB->getTerminator(); 951 952 for (VPBlockBase *SuccVPBlock : VPBB->getHierarchicalSuccessors()) { 953 VPBasicBlock *SuccVPBB = SuccVPBlock->getEntryBasicBlock(); 954 BBTerminator->setSuccessor(Idx, State->CFG.VPBB2IRBB[SuccVPBB]); 955 ++Idx; 956 } 957 } 958 959 BasicBlock *VectorLatchBB = State->CFG.PrevBB; 960 961 // Fix the latch value of canonical, reduction and first-order recurrences 962 // phis in the vector loop. 963 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock(); 964 if (Header->empty()) { 965 assert(EnableVPlanNativePath); 966 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 967 } 968 for (VPRecipeBase &R : Header->phis()) { 969 // Skip phi-like recipes that generate their backedege values themselves. 970 if (isa<VPWidenPHIRecipe>(&R)) 971 continue; 972 973 if (isa<VPWidenPointerInductionRecipe>(&R) || 974 isa<VPWidenIntOrFpInductionRecipe>(&R)) { 975 PHINode *Phi = nullptr; 976 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) { 977 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0)); 978 } else { 979 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R); 980 // TODO: Split off the case that all users of a pointer phi are scalar 981 // from the VPWidenPointerInductionRecipe. 982 if (all_of(WidenPhi->users(), [WidenPhi](const VPUser *U) { 983 return cast<VPRecipeBase>(U)->usesScalars(WidenPhi); 984 })) 985 continue; 986 987 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0)); 988 Phi = cast<PHINode>(GEP->getPointerOperand()); 989 } 990 991 Phi->setIncomingBlock(1, VectorLatchBB); 992 993 // Move the last step to the end of the latch block. This ensures 994 // consistent placement of all induction updates. 995 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1)); 996 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode()); 997 continue; 998 } 999 1000 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 1001 // For canonical IV, first-order recurrences and in-order reduction phis, 1002 // only a single part is generated, which provides the last part from the 1003 // previous iteration. For non-ordered reductions all UF parts are 1004 // generated. 1005 bool SinglePartNeeded = isa<VPCanonicalIVPHIRecipe>(PhiR) || 1006 isa<VPFirstOrderRecurrencePHIRecipe>(PhiR) || 1007 cast<VPReductionPHIRecipe>(PhiR)->isOrdered(); 1008 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; 1009 1010 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1011 Value *Phi = State->get(PhiR, Part); 1012 Value *Val = State->get(PhiR->getBackedgeValue(), 1013 SinglePartNeeded ? State->UF - 1 : Part); 1014 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB); 1015 } 1016 } 1017 1018 // We do not attempt to preserve DT for outer loop vectorization currently. 1019 if (!EnableVPlanNativePath) 1020 updateDominatorTree(State->DT, VectorHeaderBB, VectorLatchBB, 1021 State->CFG.ExitBB); 1022 } 1023 1024 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1025 LLVM_DUMP_METHOD 1026 void VPlan::print(raw_ostream &O) const { 1027 VPSlotTracker SlotTracker(this); 1028 1029 O << "VPlan '" << Name << "' {"; 1030 1031 if (VectorTripCount.getNumUsers() > 0) { 1032 O << "\nLive-in "; 1033 VectorTripCount.printAsOperand(O, SlotTracker); 1034 O << " = vector-trip-count\n"; 1035 } 1036 1037 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 1038 O << "\nLive-in "; 1039 BackedgeTakenCount->printAsOperand(O, SlotTracker); 1040 O << " = backedge-taken count\n"; 1041 } 1042 1043 for (const VPBlockBase *Block : depth_first(getEntry())) { 1044 O << '\n'; 1045 Block->print(O, "", SlotTracker); 1046 } 1047 O << "}\n"; 1048 } 1049 1050 LLVM_DUMP_METHOD 1051 void VPlan::printDOT(raw_ostream &O) const { 1052 VPlanPrinter Printer(O, *this); 1053 Printer.dump(); 1054 } 1055 1056 LLVM_DUMP_METHOD 1057 void VPlan::dump() const { print(dbgs()); } 1058 #endif 1059 1060 void VPlan::updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB, 1061 BasicBlock *LoopLatchBB, 1062 BasicBlock *LoopExitBB) { 1063 // The vector body may be more than a single basic-block by this point. 1064 // Update the dominator tree information inside the vector body by propagating 1065 // it from header to latch, expecting only triangular control-flow, if any. 1066 BasicBlock *PostDomSucc = nullptr; 1067 for (auto *BB = LoopHeaderBB; BB != LoopLatchBB; BB = PostDomSucc) { 1068 // Get the list of successors of this block. 1069 std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB)); 1070 assert(Succs.size() <= 2 && 1071 "Basic block in vector loop has more than 2 successors."); 1072 PostDomSucc = Succs[0]; 1073 if (Succs.size() == 1) { 1074 assert(PostDomSucc->getSinglePredecessor() && 1075 "PostDom successor has more than one predecessor."); 1076 DT->addNewBlock(PostDomSucc, BB); 1077 continue; 1078 } 1079 BasicBlock *InterimSucc = Succs[1]; 1080 if (PostDomSucc->getSingleSuccessor() == InterimSucc) { 1081 PostDomSucc = Succs[1]; 1082 InterimSucc = Succs[0]; 1083 } 1084 assert(InterimSucc->getSingleSuccessor() == PostDomSucc && 1085 "One successor of a basic block does not lead to the other."); 1086 assert(InterimSucc->getSinglePredecessor() && 1087 "Interim successor has more than one predecessor."); 1088 assert(PostDomSucc->hasNPredecessors(2) && 1089 "PostDom successor has more than two predecessors."); 1090 DT->addNewBlock(InterimSucc, BB); 1091 DT->addNewBlock(PostDomSucc, BB); 1092 } 1093 // Latch block is a new dominator for the loop exit. 1094 DT->changeImmediateDominator(LoopExitBB, LoopLatchBB); 1095 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 1096 } 1097 1098 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1099 Twine VPlanPrinter::getUID(const VPBlockBase *Block) { 1100 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") + 1101 Twine(getOrCreateBID(Block)); 1102 } 1103 1104 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) { 1105 const std::string &Name = Block->getName(); 1106 if (!Name.empty()) 1107 return Name; 1108 return "VPB" + Twine(getOrCreateBID(Block)); 1109 } 1110 1111 void VPlanPrinter::dump() { 1112 Depth = 1; 1113 bumpIndent(0); 1114 OS << "digraph VPlan {\n"; 1115 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan"; 1116 if (!Plan.getName().empty()) 1117 OS << "\\n" << DOT::EscapeString(Plan.getName()); 1118 if (Plan.BackedgeTakenCount) { 1119 OS << ", where:\\n"; 1120 Plan.BackedgeTakenCount->print(OS, SlotTracker); 1121 OS << " := BackedgeTakenCount"; 1122 } 1123 OS << "\"]\n"; 1124 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n"; 1125 OS << "edge [fontname=Courier, fontsize=30]\n"; 1126 OS << "compound=true\n"; 1127 1128 for (const VPBlockBase *Block : depth_first(Plan.getEntry())) 1129 dumpBlock(Block); 1130 1131 OS << "}\n"; 1132 } 1133 1134 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) { 1135 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block)) 1136 dumpBasicBlock(BasicBlock); 1137 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1138 dumpRegion(Region); 1139 else 1140 llvm_unreachable("Unsupported kind of VPBlock."); 1141 } 1142 1143 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To, 1144 bool Hidden, const Twine &Label) { 1145 // Due to "dot" we print an edge between two regions as an edge between the 1146 // exit basic block and the entry basic of the respective regions. 1147 const VPBlockBase *Tail = From->getExitBasicBlock(); 1148 const VPBlockBase *Head = To->getEntryBasicBlock(); 1149 OS << Indent << getUID(Tail) << " -> " << getUID(Head); 1150 OS << " [ label=\"" << Label << '\"'; 1151 if (Tail != From) 1152 OS << " ltail=" << getUID(From); 1153 if (Head != To) 1154 OS << " lhead=" << getUID(To); 1155 if (Hidden) 1156 OS << "; splines=none"; 1157 OS << "]\n"; 1158 } 1159 1160 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) { 1161 auto &Successors = Block->getSuccessors(); 1162 if (Successors.size() == 1) 1163 drawEdge(Block, Successors.front(), false, ""); 1164 else if (Successors.size() == 2) { 1165 drawEdge(Block, Successors.front(), false, "T"); 1166 drawEdge(Block, Successors.back(), false, "F"); 1167 } else { 1168 unsigned SuccessorNumber = 0; 1169 for (auto *Successor : Successors) 1170 drawEdge(Block, Successor, false, Twine(SuccessorNumber++)); 1171 } 1172 } 1173 1174 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) { 1175 // Implement dot-formatted dump by performing plain-text dump into the 1176 // temporary storage followed by some post-processing. 1177 OS << Indent << getUID(BasicBlock) << " [label =\n"; 1178 bumpIndent(1); 1179 std::string Str; 1180 raw_string_ostream SS(Str); 1181 // Use no indentation as we need to wrap the lines into quotes ourselves. 1182 BasicBlock->print(SS, "", SlotTracker); 1183 1184 // We need to process each line of the output separately, so split 1185 // single-string plain-text dump. 1186 SmallVector<StringRef, 0> Lines; 1187 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1188 1189 auto EmitLine = [&](StringRef Line, StringRef Suffix) { 1190 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix; 1191 }; 1192 1193 // Don't need the "+" after the last line. 1194 for (auto Line : make_range(Lines.begin(), Lines.end() - 1)) 1195 EmitLine(Line, " +\n"); 1196 EmitLine(Lines.back(), "\n"); 1197 1198 bumpIndent(-1); 1199 OS << Indent << "]\n"; 1200 1201 dumpEdges(BasicBlock); 1202 } 1203 1204 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) { 1205 OS << Indent << "subgraph " << getUID(Region) << " {\n"; 1206 bumpIndent(1); 1207 OS << Indent << "fontname=Courier\n" 1208 << Indent << "label=\"" 1209 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ") 1210 << DOT::EscapeString(Region->getName()) << "\"\n"; 1211 // Dump the blocks of the region. 1212 assert(Region->getEntry() && "Region contains no inner blocks."); 1213 for (const VPBlockBase *Block : depth_first(Region->getEntry())) 1214 dumpBlock(Block); 1215 bumpIndent(-1); 1216 OS << Indent << "}\n"; 1217 dumpEdges(Region); 1218 } 1219 1220 void VPlanIngredient::print(raw_ostream &O) const { 1221 if (auto *Inst = dyn_cast<Instruction>(V)) { 1222 if (!Inst->getType()->isVoidTy()) { 1223 Inst->printAsOperand(O, false); 1224 O << " = "; 1225 } 1226 O << Inst->getOpcodeName() << " "; 1227 unsigned E = Inst->getNumOperands(); 1228 if (E > 0) { 1229 Inst->getOperand(0)->printAsOperand(O, false); 1230 for (unsigned I = 1; I < E; ++I) 1231 Inst->getOperand(I)->printAsOperand(O << ", ", false); 1232 } 1233 } else // !Inst 1234 V->printAsOperand(O, false); 1235 } 1236 1237 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent, 1238 VPSlotTracker &SlotTracker) const { 1239 O << Indent << "WIDEN-CALL "; 1240 1241 auto *CI = cast<CallInst>(getUnderlyingInstr()); 1242 if (CI->getType()->isVoidTy()) 1243 O << "void "; 1244 else { 1245 printAsOperand(O, SlotTracker); 1246 O << " = "; 1247 } 1248 1249 O << "call @" << CI->getCalledFunction()->getName() << "("; 1250 printOperands(O, SlotTracker); 1251 O << ")"; 1252 } 1253 1254 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent, 1255 VPSlotTracker &SlotTracker) const { 1256 O << Indent << "WIDEN-SELECT "; 1257 printAsOperand(O, SlotTracker); 1258 O << " = select "; 1259 getOperand(0)->printAsOperand(O, SlotTracker); 1260 O << ", "; 1261 getOperand(1)->printAsOperand(O, SlotTracker); 1262 O << ", "; 1263 getOperand(2)->printAsOperand(O, SlotTracker); 1264 O << (InvariantCond ? " (condition is loop invariant)" : ""); 1265 } 1266 1267 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent, 1268 VPSlotTracker &SlotTracker) const { 1269 O << Indent << "WIDEN "; 1270 printAsOperand(O, SlotTracker); 1271 O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; 1272 printOperands(O, SlotTracker); 1273 } 1274 1275 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1276 VPSlotTracker &SlotTracker) const { 1277 O << Indent << "WIDEN-INDUCTION"; 1278 if (getTruncInst()) { 1279 O << "\\l\""; 1280 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 1281 O << " +\n" << Indent << "\" "; 1282 getVPValue(0)->printAsOperand(O, SlotTracker); 1283 } else 1284 O << " " << VPlanIngredient(IV); 1285 } 1286 1287 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1288 VPSlotTracker &SlotTracker) const { 1289 O << Indent << "EMIT "; 1290 printAsOperand(O, SlotTracker); 1291 O << " = WIDEN-POINTER-INDUCTION "; 1292 getStartValue()->printAsOperand(O, SlotTracker); 1293 O << ", " << *IndDesc.getStep(); 1294 } 1295 1296 #endif 1297 1298 bool VPWidenIntOrFpInductionRecipe::isCanonical() const { 1299 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue()); 1300 auto *StepC = dyn_cast<SCEVConstant>(getInductionDescriptor().getStep()); 1301 return StartC && StartC->isZero() && StepC && StepC->isOne(); 1302 } 1303 1304 VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const { 1305 return cast<VPCanonicalIVPHIRecipe>(getOperand(0)); 1306 } 1307 1308 bool VPScalarIVStepsRecipe::isCanonical() const { 1309 auto *CanIV = getCanonicalIV(); 1310 // The start value of the steps-recipe must match the start value of the 1311 // canonical induction and it must step by 1. 1312 if (CanIV->getStartValue() != getStartValue()) 1313 return false; 1314 auto *StepVPV = getStepValue(); 1315 if (StepVPV->getDef()) 1316 return false; 1317 auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue()); 1318 return StepC && StepC->isOne(); 1319 } 1320 1321 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1322 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent, 1323 VPSlotTracker &SlotTracker) const { 1324 O << Indent; 1325 printAsOperand(O, SlotTracker); 1326 O << Indent << "= SCALAR-STEPS "; 1327 printOperands(O, SlotTracker); 1328 } 1329 1330 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent, 1331 VPSlotTracker &SlotTracker) const { 1332 O << Indent << "WIDEN-GEP "; 1333 O << (IsPtrLoopInvariant ? "Inv" : "Var"); 1334 size_t IndicesNumber = IsIndexLoopInvariant.size(); 1335 for (size_t I = 0; I < IndicesNumber; ++I) 1336 O << "[" << (IsIndexLoopInvariant[I] ? "Inv" : "Var") << "]"; 1337 1338 O << " "; 1339 printAsOperand(O, SlotTracker); 1340 O << " = getelementptr "; 1341 printOperands(O, SlotTracker); 1342 } 1343 1344 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1345 VPSlotTracker &SlotTracker) const { 1346 O << Indent << "WIDEN-PHI "; 1347 1348 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue()); 1349 // Unless all incoming values are modeled in VPlan print the original PHI 1350 // directly. 1351 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming 1352 // values as VPValues. 1353 if (getNumOperands() != OriginalPhi->getNumOperands()) { 1354 O << VPlanIngredient(OriginalPhi); 1355 return; 1356 } 1357 1358 printAsOperand(O, SlotTracker); 1359 O << " = phi "; 1360 printOperands(O, SlotTracker); 1361 } 1362 1363 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent, 1364 VPSlotTracker &SlotTracker) const { 1365 O << Indent << "BLEND "; 1366 Phi->printAsOperand(O, false); 1367 O << " ="; 1368 if (getNumIncomingValues() == 1) { 1369 // Not a User of any mask: not really blending, this is a 1370 // single-predecessor phi. 1371 O << " "; 1372 getIncomingValue(0)->printAsOperand(O, SlotTracker); 1373 } else { 1374 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) { 1375 O << " "; 1376 getIncomingValue(I)->printAsOperand(O, SlotTracker); 1377 O << "/"; 1378 getMask(I)->printAsOperand(O, SlotTracker); 1379 } 1380 } 1381 } 1382 1383 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent, 1384 VPSlotTracker &SlotTracker) const { 1385 O << Indent << "REDUCE "; 1386 printAsOperand(O, SlotTracker); 1387 O << " = "; 1388 getChainOp()->printAsOperand(O, SlotTracker); 1389 O << " +"; 1390 if (isa<FPMathOperator>(getUnderlyingInstr())) 1391 O << getUnderlyingInstr()->getFastMathFlags(); 1392 O << " reduce." << Instruction::getOpcodeName(RdxDesc->getOpcode()) << " ("; 1393 getVecOp()->printAsOperand(O, SlotTracker); 1394 if (getCondOp()) { 1395 O << ", "; 1396 getCondOp()->printAsOperand(O, SlotTracker); 1397 } 1398 O << ")"; 1399 } 1400 1401 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent, 1402 VPSlotTracker &SlotTracker) const { 1403 O << Indent << (IsUniform ? "CLONE " : "REPLICATE "); 1404 1405 if (!getUnderlyingInstr()->getType()->isVoidTy()) { 1406 printAsOperand(O, SlotTracker); 1407 O << " = "; 1408 } 1409 O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode()) << " "; 1410 printOperands(O, SlotTracker); 1411 1412 if (AlsoPack) 1413 O << " (S->V)"; 1414 } 1415 1416 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1417 VPSlotTracker &SlotTracker) const { 1418 O << Indent << "PHI-PREDICATED-INSTRUCTION "; 1419 printAsOperand(O, SlotTracker); 1420 O << " = "; 1421 printOperands(O, SlotTracker); 1422 } 1423 1424 void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent, 1425 VPSlotTracker &SlotTracker) const { 1426 O << Indent << "WIDEN "; 1427 1428 if (!isStore()) { 1429 printAsOperand(O, SlotTracker); 1430 O << " = "; 1431 } 1432 O << Instruction::getOpcodeName(Ingredient.getOpcode()) << " "; 1433 1434 printOperands(O, SlotTracker); 1435 } 1436 #endif 1437 1438 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) { 1439 Value *Start = getStartValue()->getLiveInIRValue(); 1440 PHINode *EntryPart = PHINode::Create( 1441 Start->getType(), 2, "index", &*State.CFG.PrevBB->getFirstInsertionPt()); 1442 EntryPart->addIncoming(Start, State.CFG.VectorPreHeader); 1443 EntryPart->setDebugLoc(DL); 1444 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1445 State.set(this, EntryPart, Part); 1446 } 1447 1448 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1449 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1450 VPSlotTracker &SlotTracker) const { 1451 O << Indent << "EMIT "; 1452 printAsOperand(O, SlotTracker); 1453 O << " = CANONICAL-INDUCTION"; 1454 } 1455 #endif 1456 1457 void VPExpandSCEVRecipe::execute(VPTransformState &State) { 1458 assert(!State.Instance && "cannot be used in per-lane"); 1459 const DataLayout &DL = 1460 State.CFG.VectorPreHeader->getModule()->getDataLayout(); 1461 SCEVExpander Exp(SE, DL, "induction"); 1462 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(), 1463 State.CFG.VectorPreHeader->getTerminator()); 1464 1465 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1466 State.set(this, Res, Part); 1467 } 1468 1469 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1470 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent, 1471 VPSlotTracker &SlotTracker) const { 1472 O << Indent << "EMIT "; 1473 getVPSingleValue()->printAsOperand(O, SlotTracker); 1474 O << " = EXPAND SCEV " << *Expr; 1475 } 1476 #endif 1477 1478 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) { 1479 Value *CanonicalIV = State.get(getOperand(0), 0); 1480 Type *STy = CanonicalIV->getType(); 1481 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 1482 ElementCount VF = State.VF; 1483 Value *VStart = VF.isScalar() 1484 ? CanonicalIV 1485 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast"); 1486 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) { 1487 Value *VStep = createStepForVF(Builder, STy, VF, Part); 1488 if (VF.isVector()) { 1489 VStep = Builder.CreateVectorSplat(VF, VStep); 1490 VStep = Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType())); 1491 } 1492 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv"); 1493 State.set(this, CanonicalVectorIV, Part); 1494 } 1495 } 1496 1497 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1498 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent, 1499 VPSlotTracker &SlotTracker) const { 1500 O << Indent << "EMIT "; 1501 printAsOperand(O, SlotTracker); 1502 O << " = WIDEN-CANONICAL-INDUCTION "; 1503 printOperands(O, SlotTracker); 1504 } 1505 #endif 1506 1507 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) { 1508 auto &Builder = State.Builder; 1509 // Create a vector from the initial value. 1510 auto *VectorInit = getStartValue()->getLiveInIRValue(); 1511 1512 Type *VecTy = State.VF.isScalar() 1513 ? VectorInit->getType() 1514 : VectorType::get(VectorInit->getType(), State.VF); 1515 1516 if (State.VF.isVector()) { 1517 auto *IdxTy = Builder.getInt32Ty(); 1518 auto *One = ConstantInt::get(IdxTy, 1); 1519 IRBuilder<>::InsertPointGuard Guard(Builder); 1520 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1521 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF); 1522 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 1523 VectorInit = Builder.CreateInsertElement( 1524 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init"); 1525 } 1526 1527 // Create a phi node for the new recurrence. 1528 PHINode *EntryPart = PHINode::Create( 1529 VecTy, 2, "vector.recur", &*State.CFG.PrevBB->getFirstInsertionPt()); 1530 EntryPart->addIncoming(VectorInit, State.CFG.VectorPreHeader); 1531 State.set(this, EntryPart, 0); 1532 } 1533 1534 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1535 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent, 1536 VPSlotTracker &SlotTracker) const { 1537 O << Indent << "FIRST-ORDER-RECURRENCE-PHI "; 1538 printAsOperand(O, SlotTracker); 1539 O << " = phi "; 1540 printOperands(O, SlotTracker); 1541 } 1542 #endif 1543 1544 void VPReductionPHIRecipe::execute(VPTransformState &State) { 1545 PHINode *PN = cast<PHINode>(getUnderlyingValue()); 1546 auto &Builder = State.Builder; 1547 1548 // In order to support recurrences we need to be able to vectorize Phi nodes. 1549 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 1550 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 1551 // this value when we vectorize all of the instructions that use the PHI. 1552 bool ScalarPHI = State.VF.isScalar() || IsInLoop; 1553 Type *VecTy = 1554 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 1555 1556 BasicBlock *HeaderBB = State.CFG.PrevBB; 1557 assert(State.CurrentVectorLoop->getHeader() == HeaderBB && 1558 "recipe must be in the vector loop header"); 1559 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF; 1560 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1561 Value *EntryPart = 1562 PHINode::Create(VecTy, 2, "vec.phi", &*HeaderBB->getFirstInsertionPt()); 1563 State.set(this, EntryPart, Part); 1564 } 1565 1566 // Reductions do not have to start at zero. They can start with 1567 // any loop invariant values. 1568 VPValue *StartVPV = getStartValue(); 1569 Value *StartV = StartVPV->getLiveInIRValue(); 1570 1571 Value *Iden = nullptr; 1572 RecurKind RK = RdxDesc.getRecurrenceKind(); 1573 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) || 1574 RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) { 1575 // MinMax reduction have the start value as their identify. 1576 if (ScalarPHI) { 1577 Iden = StartV; 1578 } else { 1579 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1580 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1581 StartV = Iden = 1582 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 1583 } 1584 } else { 1585 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(), 1586 RdxDesc.getFastMathFlags()); 1587 1588 if (!ScalarPHI) { 1589 Iden = Builder.CreateVectorSplat(State.VF, Iden); 1590 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1591 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1592 Constant *Zero = Builder.getInt32(0); 1593 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 1594 } 1595 } 1596 1597 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1598 Value *EntryPart = State.get(this, Part); 1599 // Make sure to add the reduction start value only to the 1600 // first unroll part. 1601 Value *StartVal = (Part == 0) ? StartV : Iden; 1602 cast<PHINode>(EntryPart)->addIncoming(StartVal, State.CFG.VectorPreHeader); 1603 } 1604 } 1605 1606 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1607 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1608 VPSlotTracker &SlotTracker) const { 1609 O << Indent << "WIDEN-REDUCTION-PHI "; 1610 1611 printAsOperand(O, SlotTracker); 1612 O << " = phi "; 1613 printOperands(O, SlotTracker); 1614 } 1615 #endif 1616 1617 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT); 1618 1619 void VPValue::replaceAllUsesWith(VPValue *New) { 1620 for (unsigned J = 0; J < getNumUsers();) { 1621 VPUser *User = Users[J]; 1622 unsigned NumUsers = getNumUsers(); 1623 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) 1624 if (User->getOperand(I) == this) 1625 User->setOperand(I, New); 1626 // If a user got removed after updating the current user, the next user to 1627 // update will be moved to the current position, so we only need to 1628 // increment the index if the number of users did not change. 1629 if (NumUsers == getNumUsers()) 1630 J++; 1631 } 1632 } 1633 1634 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1635 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const { 1636 if (const Value *UV = getUnderlyingValue()) { 1637 OS << "ir<"; 1638 UV->printAsOperand(OS, false); 1639 OS << ">"; 1640 return; 1641 } 1642 1643 unsigned Slot = Tracker.getSlot(this); 1644 if (Slot == unsigned(-1)) 1645 OS << "<badref>"; 1646 else 1647 OS << "vp<%" << Tracker.getSlot(this) << ">"; 1648 } 1649 1650 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const { 1651 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) { 1652 Op->printAsOperand(O, SlotTracker); 1653 }); 1654 } 1655 #endif 1656 1657 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region, 1658 Old2NewTy &Old2New, 1659 InterleavedAccessInfo &IAI) { 1660 ReversePostOrderTraversal<VPBlockBase *> RPOT(Region->getEntry()); 1661 for (VPBlockBase *Base : RPOT) { 1662 visitBlock(Base, Old2New, IAI); 1663 } 1664 } 1665 1666 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New, 1667 InterleavedAccessInfo &IAI) { 1668 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) { 1669 for (VPRecipeBase &VPI : *VPBB) { 1670 if (isa<VPHeaderPHIRecipe>(&VPI)) 1671 continue; 1672 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions"); 1673 auto *VPInst = cast<VPInstruction>(&VPI); 1674 auto *Inst = cast<Instruction>(VPInst->getUnderlyingValue()); 1675 auto *IG = IAI.getInterleaveGroup(Inst); 1676 if (!IG) 1677 continue; 1678 1679 auto NewIGIter = Old2New.find(IG); 1680 if (NewIGIter == Old2New.end()) 1681 Old2New[IG] = new InterleaveGroup<VPInstruction>( 1682 IG->getFactor(), IG->isReverse(), IG->getAlign()); 1683 1684 if (Inst == IG->getInsertPos()) 1685 Old2New[IG]->setInsertPos(VPInst); 1686 1687 InterleaveGroupMap[VPInst] = Old2New[IG]; 1688 InterleaveGroupMap[VPInst]->insertMember( 1689 VPInst, IG->getIndex(Inst), 1690 Align(IG->isReverse() ? (-1) * int(IG->getFactor()) 1691 : IG->getFactor())); 1692 } 1693 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1694 visitRegion(Region, Old2New, IAI); 1695 else 1696 llvm_unreachable("Unsupported kind of VPBlock."); 1697 } 1698 1699 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan, 1700 InterleavedAccessInfo &IAI) { 1701 Old2NewTy Old2New; 1702 visitRegion(cast<VPRegionBlock>(Plan.getEntry()), Old2New, IAI); 1703 } 1704 1705 void VPSlotTracker::assignSlot(const VPValue *V) { 1706 assert(Slots.find(V) == Slots.end() && "VPValue already has a slot!"); 1707 Slots[V] = NextSlot++; 1708 } 1709 1710 void VPSlotTracker::assignSlots(const VPlan &Plan) { 1711 1712 for (const VPValue *V : Plan.VPExternalDefs) 1713 assignSlot(V); 1714 1715 assignSlot(&Plan.VectorTripCount); 1716 if (Plan.BackedgeTakenCount) 1717 assignSlot(Plan.BackedgeTakenCount); 1718 1719 ReversePostOrderTraversal< 1720 VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> 1721 RPOT(VPBlockRecursiveTraversalWrapper<const VPBlockBase *>( 1722 Plan.getEntry())); 1723 for (const VPBasicBlock *VPBB : 1724 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT)) 1725 for (const VPRecipeBase &Recipe : *VPBB) 1726 for (VPValue *Def : Recipe.definedValues()) 1727 assignSlot(Def); 1728 } 1729 1730 bool vputils::onlyFirstLaneUsed(VPValue *Def) { 1731 return all_of(Def->users(), [Def](VPUser *U) { 1732 return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(Def); 1733 }); 1734 } 1735