1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This is the LLVM vectorization plan. It represents a candidate for 11 /// vectorization, allowing to plan and optimize how to vectorize a given loop 12 /// before generating LLVM-IR. 13 /// The vectorizer uses vectorization plans to estimate the costs of potential 14 /// candidates and if profitable to execute the desired plan, generating vector 15 /// LLVM-IR code. 16 /// 17 //===----------------------------------------------------------------------===// 18 19 #include "VPlan.h" 20 #include "VPlanDominatorTree.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/IVDescriptors.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/Support/Casting.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/GenericDomTreeConstruction.h" 40 #include "llvm/Support/GraphWriter.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 44 #include <cassert> 45 #include <string> 46 #include <vector> 47 48 using namespace llvm; 49 extern cl::opt<bool> EnableVPlanNativePath; 50 51 #define DEBUG_TYPE "vplan" 52 53 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 54 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) { 55 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V); 56 VPSlotTracker SlotTracker( 57 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 58 V.print(OS, SlotTracker); 59 return OS; 60 } 61 #endif 62 63 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder, 64 const ElementCount &VF) const { 65 switch (LaneKind) { 66 case VPLane::Kind::ScalableLast: 67 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane 68 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF), 69 Builder.getInt32(VF.getKnownMinValue() - Lane)); 70 case VPLane::Kind::First: 71 return Builder.getInt32(Lane); 72 } 73 llvm_unreachable("Unknown lane kind"); 74 } 75 76 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def) 77 : SubclassID(SC), UnderlyingVal(UV), Def(Def) { 78 if (Def) 79 Def->addDefinedValue(this); 80 } 81 82 VPValue::~VPValue() { 83 assert(Users.empty() && "trying to delete a VPValue with remaining users"); 84 if (Def) 85 Def->removeDefinedValue(this); 86 } 87 88 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 89 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const { 90 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def)) 91 R->print(OS, "", SlotTracker); 92 else 93 printAsOperand(OS, SlotTracker); 94 } 95 96 void VPValue::dump() const { 97 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def); 98 VPSlotTracker SlotTracker( 99 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 100 print(dbgs(), SlotTracker); 101 dbgs() << "\n"; 102 } 103 104 void VPDef::dump() const { 105 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this); 106 VPSlotTracker SlotTracker( 107 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 108 print(dbgs(), "", SlotTracker); 109 dbgs() << "\n"; 110 } 111 #endif 112 113 // Get the top-most entry block of \p Start. This is the entry block of the 114 // containing VPlan. This function is templated to support both const and non-const blocks 115 template <typename T> static T *getPlanEntry(T *Start) { 116 T *Next = Start; 117 T *Current = Start; 118 while ((Next = Next->getParent())) 119 Current = Next; 120 121 SmallSetVector<T *, 8> WorkList; 122 WorkList.insert(Current); 123 124 for (unsigned i = 0; i < WorkList.size(); i++) { 125 T *Current = WorkList[i]; 126 if (Current->getNumPredecessors() == 0) 127 return Current; 128 auto &Predecessors = Current->getPredecessors(); 129 WorkList.insert(Predecessors.begin(), Predecessors.end()); 130 } 131 132 llvm_unreachable("VPlan without any entry node without predecessors"); 133 } 134 135 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; } 136 137 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; } 138 139 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly. 140 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const { 141 const VPBlockBase *Block = this; 142 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 143 Block = Region->getEntry(); 144 return cast<VPBasicBlock>(Block); 145 } 146 147 VPBasicBlock *VPBlockBase::getEntryBasicBlock() { 148 VPBlockBase *Block = this; 149 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 150 Block = Region->getEntry(); 151 return cast<VPBasicBlock>(Block); 152 } 153 154 void VPBlockBase::setPlan(VPlan *ParentPlan) { 155 assert(ParentPlan->getEntry() == this && 156 "Can only set plan on its entry block."); 157 Plan = ParentPlan; 158 } 159 160 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly. 161 const VPBasicBlock *VPBlockBase::getExitBasicBlock() const { 162 const VPBlockBase *Block = this; 163 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 164 Block = Region->getExit(); 165 return cast<VPBasicBlock>(Block); 166 } 167 168 VPBasicBlock *VPBlockBase::getExitBasicBlock() { 169 VPBlockBase *Block = this; 170 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 171 Block = Region->getExit(); 172 return cast<VPBasicBlock>(Block); 173 } 174 175 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() { 176 if (!Successors.empty() || !Parent) 177 return this; 178 assert(Parent->getExit() == this && 179 "Block w/o successors not the exit of its parent."); 180 return Parent->getEnclosingBlockWithSuccessors(); 181 } 182 183 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() { 184 if (!Predecessors.empty() || !Parent) 185 return this; 186 assert(Parent->getEntry() == this && 187 "Block w/o predecessors not the entry of its parent."); 188 return Parent->getEnclosingBlockWithPredecessors(); 189 } 190 191 VPValue *VPBlockBase::getCondBit() { 192 return CondBitUser.getSingleOperandOrNull(); 193 } 194 195 const VPValue *VPBlockBase::getCondBit() const { 196 return CondBitUser.getSingleOperandOrNull(); 197 } 198 199 void VPBlockBase::setCondBit(VPValue *CV) { CondBitUser.resetSingleOpUser(CV); } 200 201 VPValue *VPBlockBase::getPredicate() { 202 return PredicateUser.getSingleOperandOrNull(); 203 } 204 205 const VPValue *VPBlockBase::getPredicate() const { 206 return PredicateUser.getSingleOperandOrNull(); 207 } 208 209 void VPBlockBase::setPredicate(VPValue *CV) { 210 PredicateUser.resetSingleOpUser(CV); 211 } 212 213 void VPBlockBase::deleteCFG(VPBlockBase *Entry) { 214 SmallVector<VPBlockBase *, 8> Blocks(depth_first(Entry)); 215 216 for (VPBlockBase *Block : Blocks) 217 delete Block; 218 } 219 220 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { 221 iterator It = begin(); 222 while (It != end() && It->isPhi()) 223 It++; 224 return It; 225 } 226 227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { 228 if (!Def->getDef()) 229 return Def->getLiveInIRValue(); 230 231 if (hasScalarValue(Def, Instance)) { 232 return Data 233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)]; 234 } 235 236 assert(hasVectorValue(Def, Instance.Part)); 237 auto *VecPart = Data.PerPartOutput[Def][Instance.Part]; 238 if (!VecPart->getType()->isVectorTy()) { 239 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar"); 240 return VecPart; 241 } 242 // TODO: Cache created scalar values. 243 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF); 244 auto *Extract = Builder.CreateExtractElement(VecPart, Lane); 245 // set(Def, Extract, Instance); 246 return Extract; 247 } 248 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) { 249 VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion(); 250 return VPBB2IRBB[LoopRegion->getPreheaderVPBB()]; 251 } 252 253 BasicBlock * 254 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { 255 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks. 256 // Pred stands for Predessor. Prev stands for Previous - last visited/created. 257 BasicBlock *PrevBB = CFG.PrevBB; 258 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), 259 PrevBB->getParent(), CFG.ExitBB); 260 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); 261 262 // Hook up the new basic block to its predecessors. 263 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 264 VPBasicBlock *PredVPBB = PredVPBlock->getExitBasicBlock(); 265 auto &PredVPSuccessors = PredVPBB->getSuccessors(); 266 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; 267 268 // In outer loop vectorization scenario, the predecessor BBlock may not yet 269 // be visited(backedge). Mark the VPBasicBlock for fixup at the end of 270 // vectorization. We do not encounter this case in inner loop vectorization 271 // as we start out by building a loop skeleton with the vector loop header 272 // and latch blocks. As a result, we never enter this function for the 273 // header block in the non VPlan-native path. 274 if (!PredBB) { 275 assert(EnableVPlanNativePath && 276 "Unexpected null predecessor in non VPlan-native path"); 277 CFG.VPBBsToFix.push_back(PredVPBB); 278 continue; 279 } 280 281 assert(PredBB && "Predecessor basic-block not found building successor."); 282 auto *PredBBTerminator = PredBB->getTerminator(); 283 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 284 285 auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator); 286 if (isa<UnreachableInst>(PredBBTerminator) || 287 (TermBr && !TermBr->isConditional())) { 288 assert(PredVPSuccessors.size() == 1 && 289 "Predecessor ending w/o branch must have single successor."); 290 if (TermBr) { 291 TermBr->setSuccessor(0, NewBB); 292 } else { 293 DebugLoc DL = PredBBTerminator->getDebugLoc(); 294 PredBBTerminator->eraseFromParent(); 295 auto *Br = BranchInst::Create(NewBB, PredBB); 296 Br->setDebugLoc(DL); 297 } 298 } else { 299 if (PredVPSuccessors.size() == 2) { 300 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 301 assert(!PredBBTerminator->getSuccessor(idx) && 302 "Trying to reset an existing successor block."); 303 PredBBTerminator->setSuccessor(idx, NewBB); 304 } else { 305 auto *Reg = dyn_cast<VPRegionBlock>(PredVPBB->getParent()); 306 assert(Reg && !Reg->isReplicator()); 307 assert(this == Reg->getSingleSuccessor()); 308 PredBBTerminator->setSuccessor(0, NewBB); 309 PredBBTerminator->setSuccessor( 310 1, CFG.VPBB2IRBB[Reg->getEntryBasicBlock()]); 311 } 312 } 313 } 314 return NewBB; 315 } 316 317 void VPBasicBlock::execute(VPTransformState *State) { 318 bool Replica = State->Instance && !State->Instance->isFirstIteration(); 319 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB; 320 VPBlockBase *SingleHPred = nullptr; 321 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible. 322 323 auto IsNonReplicateR = [](VPBlockBase *BB) { 324 auto *R = dyn_cast<VPRegionBlock>(BB); 325 return R && !R->isReplicator(); 326 }; 327 328 // 1. Create an IR basic block, or reuse the last one if possible. 329 // The last IR basic block is reused, as an optimization, in three cases: 330 // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null; 331 // B. when the current VPBB has a single (hierarchical) predecessor which 332 // is PrevVPBB and the latter has a single (hierarchical) successor which 333 // both are in the same non-replicator region; and 334 // C. when the current VPBB is an entry of a region replica - where PrevVPBB 335 // is the exit of this region from a previous instance, or the predecessor 336 // of this region. 337 if (PrevVPBB && /* A */ 338 !((SingleHPred = getSingleHierarchicalPredecessor()) && 339 SingleHPred->getExitBasicBlock() == PrevVPBB && 340 PrevVPBB->getSingleHierarchicalSuccessor() && 341 (SingleHPred->getParent() == getEnclosingLoopRegion() && 342 !IsNonReplicateR(SingleHPred))) && /* B */ 343 !(Replica && getPredecessors().empty())) { /* C */ 344 NewBB = createEmptyBasicBlock(State->CFG); 345 State->Builder.SetInsertPoint(NewBB); 346 // Temporarily terminate with unreachable until CFG is rewired. 347 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 348 // Register NewBB in its loop. In innermost loops its the same for all BB's. 349 if (State->CurrentVectorLoop) 350 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI); 351 State->Builder.SetInsertPoint(Terminator); 352 State->CFG.PrevBB = NewBB; 353 } 354 355 // 2. Fill the IR basic block with IR instructions. 356 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() 357 << " in BB:" << NewBB->getName() << '\n'); 358 359 State->CFG.VPBB2IRBB[this] = NewBB; 360 State->CFG.PrevVPBB = this; 361 362 for (VPRecipeBase &Recipe : Recipes) 363 Recipe.execute(*State); 364 365 VPValue *CBV; 366 if (EnableVPlanNativePath && (CBV = getCondBit())) { 367 assert(CBV->getUnderlyingValue() && 368 "Unexpected null underlying value for condition bit"); 369 370 // Condition bit value in a VPBasicBlock is used as the branch selector. In 371 // the VPlan-native path case, since all branches are uniform we generate a 372 // branch instruction using the condition value from vector lane 0 and dummy 373 // successors. The successors are fixed later when the successor blocks are 374 // visited. 375 Value *NewCond = State->get(CBV, {0, 0}); 376 377 // Replace the temporary unreachable terminator with the new conditional 378 // branch. 379 auto *CurrentTerminator = NewBB->getTerminator(); 380 assert(isa<UnreachableInst>(CurrentTerminator) && 381 "Expected to replace unreachable terminator with conditional " 382 "branch."); 383 auto *CondBr = BranchInst::Create(NewBB, nullptr, NewCond); 384 CondBr->setSuccessor(0, nullptr); 385 ReplaceInstWithInst(CurrentTerminator, CondBr); 386 } 387 388 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB); 389 } 390 391 void VPBasicBlock::dropAllReferences(VPValue *NewValue) { 392 for (VPRecipeBase &R : Recipes) { 393 for (auto *Def : R.definedValues()) 394 Def->replaceAllUsesWith(NewValue); 395 396 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++) 397 R.setOperand(I, NewValue); 398 } 399 } 400 401 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { 402 assert((SplitAt == end() || SplitAt->getParent() == this) && 403 "can only split at a position in the same block"); 404 405 SmallVector<VPBlockBase *, 2> Succs(successors()); 406 // First, disconnect the current block from its successors. 407 for (VPBlockBase *Succ : Succs) 408 VPBlockUtils::disconnectBlocks(this, Succ); 409 410 // Create new empty block after the block to split. 411 auto *SplitBlock = new VPBasicBlock(getName() + ".split"); 412 VPBlockUtils::insertBlockAfter(SplitBlock, this); 413 414 // Add successors for block to split to new block. 415 for (VPBlockBase *Succ : Succs) 416 VPBlockUtils::connectBlocks(SplitBlock, Succ); 417 418 // Finally, move the recipes starting at SplitAt to new block. 419 for (VPRecipeBase &ToMove : 420 make_early_inc_range(make_range(SplitAt, this->end()))) 421 ToMove.moveBefore(*SplitBlock, SplitBlock->end()); 422 423 return SplitBlock; 424 } 425 426 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() { 427 VPRegionBlock *P = getParent(); 428 if (P && P->isReplicator()) { 429 P = P->getParent(); 430 assert(!cast<VPRegionBlock>(P)->isReplicator() && 431 "unexpected nested replicate regions"); 432 } 433 return P; 434 } 435 436 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 437 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const { 438 if (getSuccessors().empty()) { 439 O << Indent << "No successors\n"; 440 } else { 441 O << Indent << "Successor(s): "; 442 ListSeparator LS; 443 for (auto *Succ : getSuccessors()) 444 O << LS << Succ->getName(); 445 O << '\n'; 446 } 447 } 448 449 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, 450 VPSlotTracker &SlotTracker) const { 451 O << Indent << getName() << ":\n"; 452 if (const VPValue *Pred = getPredicate()) { 453 O << Indent << "BlockPredicate:"; 454 Pred->printAsOperand(O, SlotTracker); 455 if (const auto *PredInst = dyn_cast<VPInstruction>(Pred)) 456 O << " (" << PredInst->getParent()->getName() << ")"; 457 O << '\n'; 458 } 459 460 auto RecipeIndent = Indent + " "; 461 for (const VPRecipeBase &Recipe : *this) { 462 Recipe.print(O, RecipeIndent, SlotTracker); 463 O << '\n'; 464 } 465 466 printSuccessors(O, Indent); 467 468 if (const VPValue *CBV = getCondBit()) { 469 O << Indent << "CondBit: "; 470 CBV->printAsOperand(O, SlotTracker); 471 if (const auto *CBI = dyn_cast<VPInstruction>(CBV)) 472 O << " (" << CBI->getParent()->getName() << ")"; 473 O << '\n'; 474 } 475 } 476 #endif 477 478 void VPRegionBlock::dropAllReferences(VPValue *NewValue) { 479 for (VPBlockBase *Block : depth_first(Entry)) 480 // Drop all references in VPBasicBlocks and replace all uses with 481 // DummyValue. 482 Block->dropAllReferences(NewValue); 483 } 484 485 void VPRegionBlock::execute(VPTransformState *State) { 486 ReversePostOrderTraversal<VPBlockBase *> RPOT(Entry); 487 488 if (!isReplicator()) { 489 // Create and register the new vector loop. 490 Loop *PrevLoop = State->CurrentVectorLoop; 491 State->CurrentVectorLoop = State->LI->AllocateLoop(); 492 BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()]; 493 Loop *ParentLoop = State->LI->getLoopFor(VectorPH); 494 495 // Insert the new loop into the loop nest and register the new basic blocks 496 // before calling any utilities such as SCEV that require valid LoopInfo. 497 if (ParentLoop) 498 ParentLoop->addChildLoop(State->CurrentVectorLoop); 499 else 500 State->LI->addTopLevelLoop(State->CurrentVectorLoop); 501 502 // Visit the VPBlocks connected to "this", starting from it. 503 for (VPBlockBase *Block : RPOT) { 504 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 505 Block->execute(State); 506 } 507 508 State->CurrentVectorLoop = PrevLoop; 509 return; 510 } 511 512 assert(!State->Instance && "Replicating a Region with non-null instance."); 513 514 // Enter replicating mode. 515 State->Instance = VPIteration(0, 0); 516 517 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) { 518 State->Instance->Part = Part; 519 assert(!State->VF.isScalable() && "VF is assumed to be non scalable."); 520 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF; 521 ++Lane) { 522 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First); 523 // Visit the VPBlocks connected to \p this, starting from it. 524 for (VPBlockBase *Block : RPOT) { 525 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 526 Block->execute(State); 527 } 528 } 529 } 530 531 // Exit replicating mode. 532 State->Instance.reset(); 533 } 534 535 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 536 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent, 537 VPSlotTracker &SlotTracker) const { 538 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {"; 539 auto NewIndent = Indent + " "; 540 for (auto *BlockBase : depth_first(Entry)) { 541 O << '\n'; 542 BlockBase->print(O, NewIndent, SlotTracker); 543 } 544 O << Indent << "}\n"; 545 546 printSuccessors(O, Indent); 547 } 548 #endif 549 550 bool VPRecipeBase::mayWriteToMemory() const { 551 switch (getVPDefID()) { 552 case VPWidenMemoryInstructionSC: { 553 return cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 554 } 555 case VPReplicateSC: 556 case VPWidenCallSC: 557 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 558 ->mayWriteToMemory(); 559 case VPBranchOnMaskSC: 560 return false; 561 case VPWidenIntOrFpInductionSC: 562 case VPWidenCanonicalIVSC: 563 case VPWidenPHISC: 564 case VPBlendSC: 565 case VPWidenSC: 566 case VPWidenGEPSC: 567 case VPReductionSC: 568 case VPWidenSelectSC: { 569 const Instruction *I = 570 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 571 (void)I; 572 assert((!I || !I->mayWriteToMemory()) && 573 "underlying instruction may write to memory"); 574 return false; 575 } 576 default: 577 return true; 578 } 579 } 580 581 bool VPRecipeBase::mayReadFromMemory() const { 582 switch (getVPDefID()) { 583 case VPWidenMemoryInstructionSC: { 584 return !cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 585 } 586 case VPReplicateSC: 587 case VPWidenCallSC: 588 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 589 ->mayReadFromMemory(); 590 case VPBranchOnMaskSC: 591 return false; 592 case VPWidenIntOrFpInductionSC: 593 case VPWidenCanonicalIVSC: 594 case VPWidenPHISC: 595 case VPBlendSC: 596 case VPWidenSC: 597 case VPWidenGEPSC: 598 case VPReductionSC: 599 case VPWidenSelectSC: { 600 const Instruction *I = 601 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 602 (void)I; 603 assert((!I || !I->mayReadFromMemory()) && 604 "underlying instruction may read from memory"); 605 return false; 606 } 607 default: 608 return true; 609 } 610 } 611 612 bool VPRecipeBase::mayHaveSideEffects() const { 613 switch (getVPDefID()) { 614 case VPBranchOnMaskSC: 615 return false; 616 case VPWidenIntOrFpInductionSC: 617 case VPWidenPointerInductionSC: 618 case VPWidenCanonicalIVSC: 619 case VPWidenPHISC: 620 case VPBlendSC: 621 case VPWidenSC: 622 case VPWidenGEPSC: 623 case VPReductionSC: 624 case VPWidenSelectSC: 625 case VPScalarIVStepsSC: { 626 const Instruction *I = 627 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 628 (void)I; 629 assert((!I || !I->mayHaveSideEffects()) && 630 "underlying instruction has side-effects"); 631 return false; 632 } 633 case VPReplicateSC: { 634 auto *R = cast<VPReplicateRecipe>(this); 635 return R->getUnderlyingInstr()->mayHaveSideEffects(); 636 } 637 default: 638 return true; 639 } 640 } 641 642 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) { 643 assert(!Parent && "Recipe already in some VPBasicBlock"); 644 assert(InsertPos->getParent() && 645 "Insertion position not in any VPBasicBlock"); 646 Parent = InsertPos->getParent(); 647 Parent->getRecipeList().insert(InsertPos->getIterator(), this); 648 } 649 650 void VPRecipeBase::insertBefore(VPBasicBlock &BB, 651 iplist<VPRecipeBase>::iterator I) { 652 assert(!Parent && "Recipe already in some VPBasicBlock"); 653 assert(I == BB.end() || I->getParent() == &BB); 654 Parent = &BB; 655 BB.getRecipeList().insert(I, this); 656 } 657 658 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) { 659 assert(!Parent && "Recipe already in some VPBasicBlock"); 660 assert(InsertPos->getParent() && 661 "Insertion position not in any VPBasicBlock"); 662 Parent = InsertPos->getParent(); 663 Parent->getRecipeList().insertAfter(InsertPos->getIterator(), this); 664 } 665 666 void VPRecipeBase::removeFromParent() { 667 assert(getParent() && "Recipe not in any VPBasicBlock"); 668 getParent()->getRecipeList().remove(getIterator()); 669 Parent = nullptr; 670 } 671 672 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() { 673 assert(getParent() && "Recipe not in any VPBasicBlock"); 674 return getParent()->getRecipeList().erase(getIterator()); 675 } 676 677 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) { 678 removeFromParent(); 679 insertAfter(InsertPos); 680 } 681 682 void VPRecipeBase::moveBefore(VPBasicBlock &BB, 683 iplist<VPRecipeBase>::iterator I) { 684 removeFromParent(); 685 insertBefore(BB, I); 686 } 687 688 void VPInstruction::generateInstruction(VPTransformState &State, 689 unsigned Part) { 690 IRBuilderBase &Builder = State.Builder; 691 Builder.SetCurrentDebugLocation(DL); 692 693 if (Instruction::isBinaryOp(getOpcode())) { 694 Value *A = State.get(getOperand(0), Part); 695 Value *B = State.get(getOperand(1), Part); 696 Value *V = Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B); 697 State.set(this, V, Part); 698 return; 699 } 700 701 switch (getOpcode()) { 702 case VPInstruction::Not: { 703 Value *A = State.get(getOperand(0), Part); 704 Value *V = Builder.CreateNot(A); 705 State.set(this, V, Part); 706 break; 707 } 708 case VPInstruction::ICmpULE: { 709 Value *IV = State.get(getOperand(0), Part); 710 Value *TC = State.get(getOperand(1), Part); 711 Value *V = Builder.CreateICmpULE(IV, TC); 712 State.set(this, V, Part); 713 break; 714 } 715 case Instruction::Select: { 716 Value *Cond = State.get(getOperand(0), Part); 717 Value *Op1 = State.get(getOperand(1), Part); 718 Value *Op2 = State.get(getOperand(2), Part); 719 Value *V = Builder.CreateSelect(Cond, Op1, Op2); 720 State.set(this, V, Part); 721 break; 722 } 723 case VPInstruction::ActiveLaneMask: { 724 // Get first lane of vector induction variable. 725 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0)); 726 // Get the original loop tripcount. 727 Value *ScalarTC = State.get(getOperand(1), Part); 728 729 auto *Int1Ty = Type::getInt1Ty(Builder.getContext()); 730 auto *PredTy = VectorType::get(Int1Ty, State.VF); 731 Instruction *Call = Builder.CreateIntrinsic( 732 Intrinsic::get_active_lane_mask, {PredTy, ScalarTC->getType()}, 733 {VIVElem0, ScalarTC}, nullptr, "active.lane.mask"); 734 State.set(this, Call, Part); 735 break; 736 } 737 case VPInstruction::FirstOrderRecurrenceSplice: { 738 // Generate code to combine the previous and current values in vector v3. 739 // 740 // vector.ph: 741 // v_init = vector(..., ..., ..., a[-1]) 742 // br vector.body 743 // 744 // vector.body 745 // i = phi [0, vector.ph], [i+4, vector.body] 746 // v1 = phi [v_init, vector.ph], [v2, vector.body] 747 // v2 = a[i, i+1, i+2, i+3]; 748 // v3 = vector(v1(3), v2(0, 1, 2)) 749 750 // For the first part, use the recurrence phi (v1), otherwise v2. 751 auto *V1 = State.get(getOperand(0), 0); 752 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1); 753 if (!PartMinus1->getType()->isVectorTy()) { 754 State.set(this, PartMinus1, Part); 755 } else { 756 Value *V2 = State.get(getOperand(1), Part); 757 State.set(this, Builder.CreateVectorSplice(PartMinus1, V2, -1), Part); 758 } 759 break; 760 } 761 762 case VPInstruction::CanonicalIVIncrement: 763 case VPInstruction::CanonicalIVIncrementNUW: { 764 Value *Next = nullptr; 765 if (Part == 0) { 766 bool IsNUW = getOpcode() == VPInstruction::CanonicalIVIncrementNUW; 767 auto *Phi = State.get(getOperand(0), 0); 768 // The loop step is equal to the vectorization factor (num of SIMD 769 // elements) times the unroll factor (num of SIMD instructions). 770 Value *Step = 771 createStepForVF(Builder, Phi->getType(), State.VF, State.UF); 772 Next = Builder.CreateAdd(Phi, Step, "index.next", IsNUW, false); 773 } else { 774 Next = State.get(this, 0); 775 } 776 777 State.set(this, Next, Part); 778 break; 779 } 780 case VPInstruction::BranchOnCount: { 781 if (Part != 0) 782 break; 783 // First create the compare. 784 Value *IV = State.get(getOperand(0), Part); 785 Value *TC = State.get(getOperand(1), Part); 786 Value *Cond = Builder.CreateICmpEQ(IV, TC); 787 788 // Now create the branch. 789 auto *Plan = getParent()->getPlan(); 790 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion(); 791 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock(); 792 if (Header->empty()) { 793 assert(EnableVPlanNativePath && 794 "empty entry block only expected in VPlanNativePath"); 795 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 796 } 797 // TODO: Once the exit block is modeled in VPlan, use it instead of going 798 // through State.CFG.ExitBB. 799 BasicBlock *Exit = State.CFG.ExitBB; 800 801 Builder.CreateCondBr(Cond, Exit, State.CFG.VPBB2IRBB[Header]); 802 Builder.GetInsertBlock()->getTerminator()->eraseFromParent(); 803 break; 804 } 805 default: 806 llvm_unreachable("Unsupported opcode for instruction"); 807 } 808 } 809 810 void VPInstruction::execute(VPTransformState &State) { 811 assert(!State.Instance && "VPInstruction executing an Instance"); 812 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 813 State.Builder.setFastMathFlags(FMF); 814 for (unsigned Part = 0; Part < State.UF; ++Part) 815 generateInstruction(State, Part); 816 } 817 818 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 819 void VPInstruction::dump() const { 820 VPSlotTracker SlotTracker(getParent()->getPlan()); 821 print(dbgs(), "", SlotTracker); 822 } 823 824 void VPInstruction::print(raw_ostream &O, const Twine &Indent, 825 VPSlotTracker &SlotTracker) const { 826 O << Indent << "EMIT "; 827 828 if (hasResult()) { 829 printAsOperand(O, SlotTracker); 830 O << " = "; 831 } 832 833 switch (getOpcode()) { 834 case VPInstruction::Not: 835 O << "not"; 836 break; 837 case VPInstruction::ICmpULE: 838 O << "icmp ule"; 839 break; 840 case VPInstruction::SLPLoad: 841 O << "combined load"; 842 break; 843 case VPInstruction::SLPStore: 844 O << "combined store"; 845 break; 846 case VPInstruction::ActiveLaneMask: 847 O << "active lane mask"; 848 break; 849 case VPInstruction::FirstOrderRecurrenceSplice: 850 O << "first-order splice"; 851 break; 852 case VPInstruction::CanonicalIVIncrement: 853 O << "VF * UF + "; 854 break; 855 case VPInstruction::CanonicalIVIncrementNUW: 856 O << "VF * UF +(nuw) "; 857 break; 858 case VPInstruction::BranchOnCount: 859 O << "branch-on-count "; 860 break; 861 default: 862 O << Instruction::getOpcodeName(getOpcode()); 863 } 864 865 O << FMF; 866 867 for (const VPValue *Operand : operands()) { 868 O << " "; 869 Operand->printAsOperand(O, SlotTracker); 870 } 871 872 if (DL) { 873 O << ", !dbg "; 874 DL.print(O); 875 } 876 } 877 #endif 878 879 void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) { 880 // Make sure the VPInstruction is a floating-point operation. 881 assert((Opcode == Instruction::FAdd || Opcode == Instruction::FMul || 882 Opcode == Instruction::FNeg || Opcode == Instruction::FSub || 883 Opcode == Instruction::FDiv || Opcode == Instruction::FRem || 884 Opcode == Instruction::FCmp) && 885 "this op can't take fast-math flags"); 886 FMF = FMFNew; 887 } 888 889 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, 890 Value *CanonicalIVStartValue, 891 VPTransformState &State) { 892 // Check if the trip count is needed, and if so build it. 893 if (TripCount && TripCount->getNumUsers()) { 894 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 895 State.set(TripCount, TripCountV, Part); 896 } 897 898 // Check if the backedge taken count is needed, and if so build it. 899 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 900 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 901 auto *TCMO = Builder.CreateSub(TripCountV, 902 ConstantInt::get(TripCountV->getType(), 1), 903 "trip.count.minus.1"); 904 auto VF = State.VF; 905 Value *VTCMO = 906 VF.isScalar() ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast"); 907 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 908 State.set(BackedgeTakenCount, VTCMO, Part); 909 } 910 911 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 912 State.set(&VectorTripCount, VectorTripCountV, Part); 913 914 // When vectorizing the epilogue loop, the canonical induction start value 915 // needs to be changed from zero to the value after the main vector loop. 916 if (CanonicalIVStartValue) { 917 VPValue *VPV = new VPValue(CanonicalIVStartValue); 918 addExternalDef(VPV); 919 auto *IV = getCanonicalIV(); 920 assert(all_of(IV->users(), 921 [](const VPUser *U) { 922 if (isa<VPScalarIVStepsRecipe>(U)) 923 return true; 924 auto *VPI = cast<VPInstruction>(U); 925 return VPI->getOpcode() == 926 VPInstruction::CanonicalIVIncrement || 927 VPI->getOpcode() == 928 VPInstruction::CanonicalIVIncrementNUW; 929 }) && 930 "the canonical IV should only be used by its increments or " 931 "ScalarIVSteps when " 932 "resetting the start value"); 933 IV->setOperand(0, VPV); 934 } 935 } 936 937 /// Generate the code inside the preheader and body of the vectorized loop. 938 /// Assumes a single pre-header basic-block was created for this. Introduce 939 /// additional basic-blocks as needed, and fill them all. 940 void VPlan::execute(VPTransformState *State) { 941 // Set the reverse mapping from VPValues to Values for code generation. 942 for (auto &Entry : Value2VPValue) 943 State->VPValue2Value[Entry.second] = Entry.first; 944 945 // Initialize CFG state. 946 State->CFG.PrevVPBB = nullptr; 947 State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor(); 948 BasicBlock *VectorPreHeader = State->CFG.PrevBB; 949 State->Builder.SetInsertPoint(VectorPreHeader->getTerminator()); 950 951 // Generate code in the loop pre-header and body. 952 for (VPBlockBase *Block : depth_first(Entry)) 953 Block->execute(State); 954 955 // Setup branch terminator successors for VPBBs in VPBBsToFix based on 956 // VPBB's successors. 957 for (auto VPBB : State->CFG.VPBBsToFix) { 958 assert(EnableVPlanNativePath && 959 "Unexpected VPBBsToFix in non VPlan-native path"); 960 BasicBlock *BB = State->CFG.VPBB2IRBB[VPBB]; 961 assert(BB && "Unexpected null basic block for VPBB"); 962 963 unsigned Idx = 0; 964 auto *BBTerminator = BB->getTerminator(); 965 966 for (VPBlockBase *SuccVPBlock : VPBB->getHierarchicalSuccessors()) { 967 VPBasicBlock *SuccVPBB = SuccVPBlock->getEntryBasicBlock(); 968 BBTerminator->setSuccessor(Idx, State->CFG.VPBB2IRBB[SuccVPBB]); 969 ++Idx; 970 } 971 } 972 973 BasicBlock *VectorLatchBB = State->CFG.PrevBB; 974 975 // Fix the latch value of canonical, reduction and first-order recurrences 976 // phis in the vector loop. 977 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock(); 978 for (VPRecipeBase &R : Header->phis()) { 979 // Skip phi-like recipes that generate their backedege values themselves. 980 if (isa<VPWidenPHIRecipe>(&R)) 981 continue; 982 983 if (isa<VPWidenPointerInductionRecipe>(&R) || 984 isa<VPWidenIntOrFpInductionRecipe>(&R)) { 985 PHINode *Phi = nullptr; 986 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) { 987 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0)); 988 } else { 989 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R); 990 // TODO: Split off the case that all users of a pointer phi are scalar 991 // from the VPWidenPointerInductionRecipe. 992 if (all_of(WidenPhi->users(), [WidenPhi](const VPUser *U) { 993 return cast<VPRecipeBase>(U)->usesScalars(WidenPhi); 994 })) 995 continue; 996 997 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0)); 998 Phi = cast<PHINode>(GEP->getPointerOperand()); 999 } 1000 1001 Phi->setIncomingBlock(1, VectorLatchBB); 1002 1003 // Move the last step to the end of the latch block. This ensures 1004 // consistent placement of all induction updates. 1005 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1)); 1006 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode()); 1007 continue; 1008 } 1009 1010 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 1011 // For canonical IV, first-order recurrences and in-order reduction phis, 1012 // only a single part is generated, which provides the last part from the 1013 // previous iteration. For non-ordered reductions all UF parts are 1014 // generated. 1015 bool SinglePartNeeded = isa<VPCanonicalIVPHIRecipe>(PhiR) || 1016 isa<VPFirstOrderRecurrencePHIRecipe>(PhiR) || 1017 cast<VPReductionPHIRecipe>(PhiR)->isOrdered(); 1018 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; 1019 1020 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1021 Value *Phi = State->get(PhiR, Part); 1022 Value *Val = State->get(PhiR->getBackedgeValue(), 1023 SinglePartNeeded ? State->UF - 1 : Part); 1024 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB); 1025 } 1026 } 1027 1028 // We do not attempt to preserve DT for outer loop vectorization currently. 1029 if (!EnableVPlanNativePath) { 1030 BasicBlock *VectorHeaderBB = State->CFG.VPBB2IRBB[Header]; 1031 State->DT->addNewBlock(VectorHeaderBB, VectorPreHeader); 1032 updateDominatorTree(State->DT, VectorHeaderBB, VectorLatchBB, 1033 State->CFG.ExitBB); 1034 } 1035 } 1036 1037 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1038 LLVM_DUMP_METHOD 1039 void VPlan::print(raw_ostream &O) const { 1040 VPSlotTracker SlotTracker(this); 1041 1042 O << "VPlan '" << Name << "' {"; 1043 1044 if (VectorTripCount.getNumUsers() > 0) { 1045 O << "\nLive-in "; 1046 VectorTripCount.printAsOperand(O, SlotTracker); 1047 O << " = vector-trip-count\n"; 1048 } 1049 1050 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 1051 O << "\nLive-in "; 1052 BackedgeTakenCount->printAsOperand(O, SlotTracker); 1053 O << " = backedge-taken count\n"; 1054 } 1055 1056 for (const VPBlockBase *Block : depth_first(getEntry())) { 1057 O << '\n'; 1058 Block->print(O, "", SlotTracker); 1059 } 1060 O << "}\n"; 1061 } 1062 1063 LLVM_DUMP_METHOD 1064 void VPlan::printDOT(raw_ostream &O) const { 1065 VPlanPrinter Printer(O, *this); 1066 Printer.dump(); 1067 } 1068 1069 LLVM_DUMP_METHOD 1070 void VPlan::dump() const { print(dbgs()); } 1071 #endif 1072 1073 void VPlan::updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB, 1074 BasicBlock *LoopLatchBB, 1075 BasicBlock *LoopExitBB) { 1076 // The vector body may be more than a single basic-block by this point. 1077 // Update the dominator tree information inside the vector body by propagating 1078 // it from header to latch, expecting only triangular control-flow, if any. 1079 BasicBlock *PostDomSucc = nullptr; 1080 for (auto *BB = LoopHeaderBB; BB != LoopLatchBB; BB = PostDomSucc) { 1081 // Get the list of successors of this block. 1082 std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB)); 1083 assert(Succs.size() <= 2 && 1084 "Basic block in vector loop has more than 2 successors."); 1085 PostDomSucc = Succs[0]; 1086 if (Succs.size() == 1) { 1087 assert(PostDomSucc->getSinglePredecessor() && 1088 "PostDom successor has more than one predecessor."); 1089 DT->addNewBlock(PostDomSucc, BB); 1090 continue; 1091 } 1092 BasicBlock *InterimSucc = Succs[1]; 1093 if (PostDomSucc->getSingleSuccessor() == InterimSucc) { 1094 PostDomSucc = Succs[1]; 1095 InterimSucc = Succs[0]; 1096 } 1097 assert(InterimSucc->getSingleSuccessor() == PostDomSucc && 1098 "One successor of a basic block does not lead to the other."); 1099 assert(InterimSucc->getSinglePredecessor() && 1100 "Interim successor has more than one predecessor."); 1101 assert(PostDomSucc->hasNPredecessors(2) && 1102 "PostDom successor has more than two predecessors."); 1103 DT->addNewBlock(InterimSucc, BB); 1104 DT->addNewBlock(PostDomSucc, BB); 1105 } 1106 // Latch block is a new dominator for the loop exit. 1107 DT->changeImmediateDominator(LoopExitBB, LoopLatchBB); 1108 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 1109 } 1110 1111 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1112 Twine VPlanPrinter::getUID(const VPBlockBase *Block) { 1113 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") + 1114 Twine(getOrCreateBID(Block)); 1115 } 1116 1117 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) { 1118 const std::string &Name = Block->getName(); 1119 if (!Name.empty()) 1120 return Name; 1121 return "VPB" + Twine(getOrCreateBID(Block)); 1122 } 1123 1124 void VPlanPrinter::dump() { 1125 Depth = 1; 1126 bumpIndent(0); 1127 OS << "digraph VPlan {\n"; 1128 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan"; 1129 if (!Plan.getName().empty()) 1130 OS << "\\n" << DOT::EscapeString(Plan.getName()); 1131 if (Plan.BackedgeTakenCount) { 1132 OS << ", where:\\n"; 1133 Plan.BackedgeTakenCount->print(OS, SlotTracker); 1134 OS << " := BackedgeTakenCount"; 1135 } 1136 OS << "\"]\n"; 1137 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n"; 1138 OS << "edge [fontname=Courier, fontsize=30]\n"; 1139 OS << "compound=true\n"; 1140 1141 for (const VPBlockBase *Block : depth_first(Plan.getEntry())) 1142 dumpBlock(Block); 1143 1144 OS << "}\n"; 1145 } 1146 1147 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) { 1148 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block)) 1149 dumpBasicBlock(BasicBlock); 1150 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1151 dumpRegion(Region); 1152 else 1153 llvm_unreachable("Unsupported kind of VPBlock."); 1154 } 1155 1156 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To, 1157 bool Hidden, const Twine &Label) { 1158 // Due to "dot" we print an edge between two regions as an edge between the 1159 // exit basic block and the entry basic of the respective regions. 1160 const VPBlockBase *Tail = From->getExitBasicBlock(); 1161 const VPBlockBase *Head = To->getEntryBasicBlock(); 1162 OS << Indent << getUID(Tail) << " -> " << getUID(Head); 1163 OS << " [ label=\"" << Label << '\"'; 1164 if (Tail != From) 1165 OS << " ltail=" << getUID(From); 1166 if (Head != To) 1167 OS << " lhead=" << getUID(To); 1168 if (Hidden) 1169 OS << "; splines=none"; 1170 OS << "]\n"; 1171 } 1172 1173 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) { 1174 auto &Successors = Block->getSuccessors(); 1175 if (Successors.size() == 1) 1176 drawEdge(Block, Successors.front(), false, ""); 1177 else if (Successors.size() == 2) { 1178 drawEdge(Block, Successors.front(), false, "T"); 1179 drawEdge(Block, Successors.back(), false, "F"); 1180 } else { 1181 unsigned SuccessorNumber = 0; 1182 for (auto *Successor : Successors) 1183 drawEdge(Block, Successor, false, Twine(SuccessorNumber++)); 1184 } 1185 } 1186 1187 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) { 1188 // Implement dot-formatted dump by performing plain-text dump into the 1189 // temporary storage followed by some post-processing. 1190 OS << Indent << getUID(BasicBlock) << " [label =\n"; 1191 bumpIndent(1); 1192 std::string Str; 1193 raw_string_ostream SS(Str); 1194 // Use no indentation as we need to wrap the lines into quotes ourselves. 1195 BasicBlock->print(SS, "", SlotTracker); 1196 1197 // We need to process each line of the output separately, so split 1198 // single-string plain-text dump. 1199 SmallVector<StringRef, 0> Lines; 1200 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1201 1202 auto EmitLine = [&](StringRef Line, StringRef Suffix) { 1203 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix; 1204 }; 1205 1206 // Don't need the "+" after the last line. 1207 for (auto Line : make_range(Lines.begin(), Lines.end() - 1)) 1208 EmitLine(Line, " +\n"); 1209 EmitLine(Lines.back(), "\n"); 1210 1211 bumpIndent(-1); 1212 OS << Indent << "]\n"; 1213 1214 dumpEdges(BasicBlock); 1215 } 1216 1217 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) { 1218 OS << Indent << "subgraph " << getUID(Region) << " {\n"; 1219 bumpIndent(1); 1220 OS << Indent << "fontname=Courier\n" 1221 << Indent << "label=\"" 1222 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ") 1223 << DOT::EscapeString(Region->getName()) << "\"\n"; 1224 // Dump the blocks of the region. 1225 assert(Region->getEntry() && "Region contains no inner blocks."); 1226 for (const VPBlockBase *Block : depth_first(Region->getEntry())) 1227 dumpBlock(Block); 1228 bumpIndent(-1); 1229 OS << Indent << "}\n"; 1230 dumpEdges(Region); 1231 } 1232 1233 void VPlanIngredient::print(raw_ostream &O) const { 1234 if (auto *Inst = dyn_cast<Instruction>(V)) { 1235 if (!Inst->getType()->isVoidTy()) { 1236 Inst->printAsOperand(O, false); 1237 O << " = "; 1238 } 1239 O << Inst->getOpcodeName() << " "; 1240 unsigned E = Inst->getNumOperands(); 1241 if (E > 0) { 1242 Inst->getOperand(0)->printAsOperand(O, false); 1243 for (unsigned I = 1; I < E; ++I) 1244 Inst->getOperand(I)->printAsOperand(O << ", ", false); 1245 } 1246 } else // !Inst 1247 V->printAsOperand(O, false); 1248 } 1249 1250 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent, 1251 VPSlotTracker &SlotTracker) const { 1252 O << Indent << "WIDEN-CALL "; 1253 1254 auto *CI = cast<CallInst>(getUnderlyingInstr()); 1255 if (CI->getType()->isVoidTy()) 1256 O << "void "; 1257 else { 1258 printAsOperand(O, SlotTracker); 1259 O << " = "; 1260 } 1261 1262 O << "call @" << CI->getCalledFunction()->getName() << "("; 1263 printOperands(O, SlotTracker); 1264 O << ")"; 1265 } 1266 1267 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent, 1268 VPSlotTracker &SlotTracker) const { 1269 O << Indent << "WIDEN-SELECT "; 1270 printAsOperand(O, SlotTracker); 1271 O << " = select "; 1272 getOperand(0)->printAsOperand(O, SlotTracker); 1273 O << ", "; 1274 getOperand(1)->printAsOperand(O, SlotTracker); 1275 O << ", "; 1276 getOperand(2)->printAsOperand(O, SlotTracker); 1277 O << (InvariantCond ? " (condition is loop invariant)" : ""); 1278 } 1279 1280 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent, 1281 VPSlotTracker &SlotTracker) const { 1282 O << Indent << "WIDEN "; 1283 printAsOperand(O, SlotTracker); 1284 O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; 1285 printOperands(O, SlotTracker); 1286 } 1287 1288 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1289 VPSlotTracker &SlotTracker) const { 1290 O << Indent << "WIDEN-INDUCTION"; 1291 if (getTruncInst()) { 1292 O << "\\l\""; 1293 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 1294 O << " +\n" << Indent << "\" "; 1295 getVPValue(0)->printAsOperand(O, SlotTracker); 1296 } else 1297 O << " " << VPlanIngredient(IV); 1298 } 1299 1300 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1301 VPSlotTracker &SlotTracker) const { 1302 O << Indent << "EMIT "; 1303 printAsOperand(O, SlotTracker); 1304 O << " = WIDEN-POINTER-INDUCTION "; 1305 getStartValue()->printAsOperand(O, SlotTracker); 1306 O << ", " << *IndDesc.getStep(); 1307 } 1308 1309 #endif 1310 1311 bool VPWidenIntOrFpInductionRecipe::isCanonical() const { 1312 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue()); 1313 auto *StepC = dyn_cast<SCEVConstant>(getInductionDescriptor().getStep()); 1314 return StartC && StartC->isZero() && StepC && StepC->isOne(); 1315 } 1316 1317 VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const { 1318 return cast<VPCanonicalIVPHIRecipe>(getOperand(0)); 1319 } 1320 1321 bool VPScalarIVStepsRecipe::isCanonical() const { 1322 auto *CanIV = getCanonicalIV(); 1323 // The start value of the steps-recipe must match the start value of the 1324 // canonical induction and it must step by 1. 1325 if (CanIV->getStartValue() != getStartValue()) 1326 return false; 1327 auto *StepVPV = getStepValue(); 1328 if (StepVPV->getDef()) 1329 return false; 1330 auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue()); 1331 return StepC && StepC->isOne(); 1332 } 1333 1334 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1335 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent, 1336 VPSlotTracker &SlotTracker) const { 1337 O << Indent; 1338 printAsOperand(O, SlotTracker); 1339 O << Indent << "= SCALAR-STEPS "; 1340 printOperands(O, SlotTracker); 1341 } 1342 1343 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent, 1344 VPSlotTracker &SlotTracker) const { 1345 O << Indent << "WIDEN-GEP "; 1346 O << (IsPtrLoopInvariant ? "Inv" : "Var"); 1347 size_t IndicesNumber = IsIndexLoopInvariant.size(); 1348 for (size_t I = 0; I < IndicesNumber; ++I) 1349 O << "[" << (IsIndexLoopInvariant[I] ? "Inv" : "Var") << "]"; 1350 1351 O << " "; 1352 printAsOperand(O, SlotTracker); 1353 O << " = getelementptr "; 1354 printOperands(O, SlotTracker); 1355 } 1356 1357 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1358 VPSlotTracker &SlotTracker) const { 1359 O << Indent << "WIDEN-PHI "; 1360 1361 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue()); 1362 // Unless all incoming values are modeled in VPlan print the original PHI 1363 // directly. 1364 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming 1365 // values as VPValues. 1366 if (getNumOperands() != OriginalPhi->getNumOperands()) { 1367 O << VPlanIngredient(OriginalPhi); 1368 return; 1369 } 1370 1371 printAsOperand(O, SlotTracker); 1372 O << " = phi "; 1373 printOperands(O, SlotTracker); 1374 } 1375 1376 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent, 1377 VPSlotTracker &SlotTracker) const { 1378 O << Indent << "BLEND "; 1379 Phi->printAsOperand(O, false); 1380 O << " ="; 1381 if (getNumIncomingValues() == 1) { 1382 // Not a User of any mask: not really blending, this is a 1383 // single-predecessor phi. 1384 O << " "; 1385 getIncomingValue(0)->printAsOperand(O, SlotTracker); 1386 } else { 1387 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) { 1388 O << " "; 1389 getIncomingValue(I)->printAsOperand(O, SlotTracker); 1390 O << "/"; 1391 getMask(I)->printAsOperand(O, SlotTracker); 1392 } 1393 } 1394 } 1395 1396 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent, 1397 VPSlotTracker &SlotTracker) const { 1398 O << Indent << "REDUCE "; 1399 printAsOperand(O, SlotTracker); 1400 O << " = "; 1401 getChainOp()->printAsOperand(O, SlotTracker); 1402 O << " +"; 1403 if (isa<FPMathOperator>(getUnderlyingInstr())) 1404 O << getUnderlyingInstr()->getFastMathFlags(); 1405 O << " reduce." << Instruction::getOpcodeName(RdxDesc->getOpcode()) << " ("; 1406 getVecOp()->printAsOperand(O, SlotTracker); 1407 if (getCondOp()) { 1408 O << ", "; 1409 getCondOp()->printAsOperand(O, SlotTracker); 1410 } 1411 O << ")"; 1412 } 1413 1414 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent, 1415 VPSlotTracker &SlotTracker) const { 1416 O << Indent << (IsUniform ? "CLONE " : "REPLICATE "); 1417 1418 if (!getUnderlyingInstr()->getType()->isVoidTy()) { 1419 printAsOperand(O, SlotTracker); 1420 O << " = "; 1421 } 1422 O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode()) << " "; 1423 printOperands(O, SlotTracker); 1424 1425 if (AlsoPack) 1426 O << " (S->V)"; 1427 } 1428 1429 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1430 VPSlotTracker &SlotTracker) const { 1431 O << Indent << "PHI-PREDICATED-INSTRUCTION "; 1432 printAsOperand(O, SlotTracker); 1433 O << " = "; 1434 printOperands(O, SlotTracker); 1435 } 1436 1437 void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent, 1438 VPSlotTracker &SlotTracker) const { 1439 O << Indent << "WIDEN "; 1440 1441 if (!isStore()) { 1442 printAsOperand(O, SlotTracker); 1443 O << " = "; 1444 } 1445 O << Instruction::getOpcodeName(Ingredient.getOpcode()) << " "; 1446 1447 printOperands(O, SlotTracker); 1448 } 1449 #endif 1450 1451 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) { 1452 Value *Start = getStartValue()->getLiveInIRValue(); 1453 PHINode *EntryPart = PHINode::Create( 1454 Start->getType(), 2, "index", &*State.CFG.PrevBB->getFirstInsertionPt()); 1455 1456 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1457 EntryPart->addIncoming(Start, VectorPH); 1458 EntryPart->setDebugLoc(DL); 1459 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1460 State.set(this, EntryPart, Part); 1461 } 1462 1463 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1464 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1465 VPSlotTracker &SlotTracker) const { 1466 O << Indent << "EMIT "; 1467 printAsOperand(O, SlotTracker); 1468 O << " = CANONICAL-INDUCTION"; 1469 } 1470 #endif 1471 1472 void VPExpandSCEVRecipe::execute(VPTransformState &State) { 1473 assert(!State.Instance && "cannot be used in per-lane"); 1474 const DataLayout &DL = State.CFG.PrevBB->getModule()->getDataLayout(); 1475 SCEVExpander Exp(SE, DL, "induction"); 1476 1477 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(), 1478 &*State.Builder.GetInsertPoint()); 1479 1480 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1481 State.set(this, Res, Part); 1482 } 1483 1484 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1485 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent, 1486 VPSlotTracker &SlotTracker) const { 1487 O << Indent << "EMIT "; 1488 getVPSingleValue()->printAsOperand(O, SlotTracker); 1489 O << " = EXPAND SCEV " << *Expr; 1490 } 1491 #endif 1492 1493 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) { 1494 Value *CanonicalIV = State.get(getOperand(0), 0); 1495 Type *STy = CanonicalIV->getType(); 1496 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 1497 ElementCount VF = State.VF; 1498 Value *VStart = VF.isScalar() 1499 ? CanonicalIV 1500 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast"); 1501 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) { 1502 Value *VStep = createStepForVF(Builder, STy, VF, Part); 1503 if (VF.isVector()) { 1504 VStep = Builder.CreateVectorSplat(VF, VStep); 1505 VStep = Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType())); 1506 } 1507 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv"); 1508 State.set(this, CanonicalVectorIV, Part); 1509 } 1510 } 1511 1512 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1513 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent, 1514 VPSlotTracker &SlotTracker) const { 1515 O << Indent << "EMIT "; 1516 printAsOperand(O, SlotTracker); 1517 O << " = WIDEN-CANONICAL-INDUCTION "; 1518 printOperands(O, SlotTracker); 1519 } 1520 #endif 1521 1522 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) { 1523 auto &Builder = State.Builder; 1524 // Create a vector from the initial value. 1525 auto *VectorInit = getStartValue()->getLiveInIRValue(); 1526 1527 Type *VecTy = State.VF.isScalar() 1528 ? VectorInit->getType() 1529 : VectorType::get(VectorInit->getType(), State.VF); 1530 1531 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1532 if (State.VF.isVector()) { 1533 auto *IdxTy = Builder.getInt32Ty(); 1534 auto *One = ConstantInt::get(IdxTy, 1); 1535 IRBuilder<>::InsertPointGuard Guard(Builder); 1536 Builder.SetInsertPoint(VectorPH->getTerminator()); 1537 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF); 1538 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 1539 VectorInit = Builder.CreateInsertElement( 1540 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init"); 1541 } 1542 1543 // Create a phi node for the new recurrence. 1544 PHINode *EntryPart = PHINode::Create( 1545 VecTy, 2, "vector.recur", &*State.CFG.PrevBB->getFirstInsertionPt()); 1546 EntryPart->addIncoming(VectorInit, VectorPH); 1547 State.set(this, EntryPart, 0); 1548 } 1549 1550 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1551 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent, 1552 VPSlotTracker &SlotTracker) const { 1553 O << Indent << "FIRST-ORDER-RECURRENCE-PHI "; 1554 printAsOperand(O, SlotTracker); 1555 O << " = phi "; 1556 printOperands(O, SlotTracker); 1557 } 1558 #endif 1559 1560 void VPReductionPHIRecipe::execute(VPTransformState &State) { 1561 PHINode *PN = cast<PHINode>(getUnderlyingValue()); 1562 auto &Builder = State.Builder; 1563 1564 // In order to support recurrences we need to be able to vectorize Phi nodes. 1565 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 1566 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 1567 // this value when we vectorize all of the instructions that use the PHI. 1568 bool ScalarPHI = State.VF.isScalar() || IsInLoop; 1569 Type *VecTy = 1570 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 1571 1572 BasicBlock *HeaderBB = State.CFG.PrevBB; 1573 assert(State.CurrentVectorLoop->getHeader() == HeaderBB && 1574 "recipe must be in the vector loop header"); 1575 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF; 1576 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1577 Value *EntryPart = 1578 PHINode::Create(VecTy, 2, "vec.phi", &*HeaderBB->getFirstInsertionPt()); 1579 State.set(this, EntryPart, Part); 1580 } 1581 1582 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1583 1584 // Reductions do not have to start at zero. They can start with 1585 // any loop invariant values. 1586 VPValue *StartVPV = getStartValue(); 1587 Value *StartV = StartVPV->getLiveInIRValue(); 1588 1589 Value *Iden = nullptr; 1590 RecurKind RK = RdxDesc.getRecurrenceKind(); 1591 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) || 1592 RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) { 1593 // MinMax reduction have the start value as their identify. 1594 if (ScalarPHI) { 1595 Iden = StartV; 1596 } else { 1597 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1598 Builder.SetInsertPoint(VectorPH->getTerminator()); 1599 StartV = Iden = 1600 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 1601 } 1602 } else { 1603 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(), 1604 RdxDesc.getFastMathFlags()); 1605 1606 if (!ScalarPHI) { 1607 Iden = Builder.CreateVectorSplat(State.VF, Iden); 1608 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1609 Builder.SetInsertPoint(VectorPH->getTerminator()); 1610 Constant *Zero = Builder.getInt32(0); 1611 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 1612 } 1613 } 1614 1615 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1616 Value *EntryPart = State.get(this, Part); 1617 // Make sure to add the reduction start value only to the 1618 // first unroll part. 1619 Value *StartVal = (Part == 0) ? StartV : Iden; 1620 cast<PHINode>(EntryPart)->addIncoming(StartVal, VectorPH); 1621 } 1622 } 1623 1624 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1625 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1626 VPSlotTracker &SlotTracker) const { 1627 O << Indent << "WIDEN-REDUCTION-PHI "; 1628 1629 printAsOperand(O, SlotTracker); 1630 O << " = phi "; 1631 printOperands(O, SlotTracker); 1632 } 1633 #endif 1634 1635 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT); 1636 1637 void VPValue::replaceAllUsesWith(VPValue *New) { 1638 for (unsigned J = 0; J < getNumUsers();) { 1639 VPUser *User = Users[J]; 1640 unsigned NumUsers = getNumUsers(); 1641 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) 1642 if (User->getOperand(I) == this) 1643 User->setOperand(I, New); 1644 // If a user got removed after updating the current user, the next user to 1645 // update will be moved to the current position, so we only need to 1646 // increment the index if the number of users did not change. 1647 if (NumUsers == getNumUsers()) 1648 J++; 1649 } 1650 } 1651 1652 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1653 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const { 1654 if (const Value *UV = getUnderlyingValue()) { 1655 OS << "ir<"; 1656 UV->printAsOperand(OS, false); 1657 OS << ">"; 1658 return; 1659 } 1660 1661 unsigned Slot = Tracker.getSlot(this); 1662 if (Slot == unsigned(-1)) 1663 OS << "<badref>"; 1664 else 1665 OS << "vp<%" << Tracker.getSlot(this) << ">"; 1666 } 1667 1668 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const { 1669 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) { 1670 Op->printAsOperand(O, SlotTracker); 1671 }); 1672 } 1673 #endif 1674 1675 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region, 1676 Old2NewTy &Old2New, 1677 InterleavedAccessInfo &IAI) { 1678 ReversePostOrderTraversal<VPBlockBase *> RPOT(Region->getEntry()); 1679 for (VPBlockBase *Base : RPOT) { 1680 visitBlock(Base, Old2New, IAI); 1681 } 1682 } 1683 1684 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New, 1685 InterleavedAccessInfo &IAI) { 1686 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) { 1687 for (VPRecipeBase &VPI : *VPBB) { 1688 if (isa<VPHeaderPHIRecipe>(&VPI)) 1689 continue; 1690 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions"); 1691 auto *VPInst = cast<VPInstruction>(&VPI); 1692 auto *Inst = cast<Instruction>(VPInst->getUnderlyingValue()); 1693 auto *IG = IAI.getInterleaveGroup(Inst); 1694 if (!IG) 1695 continue; 1696 1697 auto NewIGIter = Old2New.find(IG); 1698 if (NewIGIter == Old2New.end()) 1699 Old2New[IG] = new InterleaveGroup<VPInstruction>( 1700 IG->getFactor(), IG->isReverse(), IG->getAlign()); 1701 1702 if (Inst == IG->getInsertPos()) 1703 Old2New[IG]->setInsertPos(VPInst); 1704 1705 InterleaveGroupMap[VPInst] = Old2New[IG]; 1706 InterleaveGroupMap[VPInst]->insertMember( 1707 VPInst, IG->getIndex(Inst), 1708 Align(IG->isReverse() ? (-1) * int(IG->getFactor()) 1709 : IG->getFactor())); 1710 } 1711 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1712 visitRegion(Region, Old2New, IAI); 1713 else 1714 llvm_unreachable("Unsupported kind of VPBlock."); 1715 } 1716 1717 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan, 1718 InterleavedAccessInfo &IAI) { 1719 Old2NewTy Old2New; 1720 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI); 1721 } 1722 1723 void VPSlotTracker::assignSlot(const VPValue *V) { 1724 assert(Slots.find(V) == Slots.end() && "VPValue already has a slot!"); 1725 Slots[V] = NextSlot++; 1726 } 1727 1728 void VPSlotTracker::assignSlots(const VPlan &Plan) { 1729 1730 for (const VPValue *V : Plan.VPExternalDefs) 1731 assignSlot(V); 1732 1733 assignSlot(&Plan.VectorTripCount); 1734 if (Plan.BackedgeTakenCount) 1735 assignSlot(Plan.BackedgeTakenCount); 1736 1737 ReversePostOrderTraversal< 1738 VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> 1739 RPOT(VPBlockRecursiveTraversalWrapper<const VPBlockBase *>( 1740 Plan.getEntry())); 1741 for (const VPBasicBlock *VPBB : 1742 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT)) 1743 for (const VPRecipeBase &Recipe : *VPBB) 1744 for (VPValue *Def : Recipe.definedValues()) 1745 assignSlot(Def); 1746 } 1747 1748 bool vputils::onlyFirstLaneUsed(VPValue *Def) { 1749 return all_of(Def->users(), [Def](VPUser *U) { 1750 return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(Def); 1751 }); 1752 } 1753