1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This is the LLVM vectorization plan. It represents a candidate for 11 /// vectorization, allowing to plan and optimize how to vectorize a given loop 12 /// before generating LLVM-IR. 13 /// The vectorizer uses vectorization plans to estimate the costs of potential 14 /// candidates and if profitable to execute the desired plan, generating vector 15 /// LLVM-IR code. 16 /// 17 //===----------------------------------------------------------------------===// 18 19 #include "VPlan.h" 20 #include "VPlanDominatorTree.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/IVDescriptors.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/Support/Casting.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/GenericDomTreeConstruction.h" 40 #include "llvm/Support/GraphWriter.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 44 #include <cassert> 45 #include <string> 46 #include <vector> 47 48 using namespace llvm; 49 extern cl::opt<bool> EnableVPlanNativePath; 50 51 #define DEBUG_TYPE "vplan" 52 53 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 54 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) { 55 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V); 56 VPSlotTracker SlotTracker( 57 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 58 V.print(OS, SlotTracker); 59 return OS; 60 } 61 #endif 62 63 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder, 64 const ElementCount &VF) const { 65 switch (LaneKind) { 66 case VPLane::Kind::ScalableLast: 67 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane 68 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF), 69 Builder.getInt32(VF.getKnownMinValue() - Lane)); 70 case VPLane::Kind::First: 71 return Builder.getInt32(Lane); 72 } 73 llvm_unreachable("Unknown lane kind"); 74 } 75 76 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def) 77 : SubclassID(SC), UnderlyingVal(UV), Def(Def) { 78 if (Def) 79 Def->addDefinedValue(this); 80 } 81 82 VPValue::~VPValue() { 83 assert(Users.empty() && "trying to delete a VPValue with remaining users"); 84 if (Def) 85 Def->removeDefinedValue(this); 86 } 87 88 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 89 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const { 90 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def)) 91 R->print(OS, "", SlotTracker); 92 else 93 printAsOperand(OS, SlotTracker); 94 } 95 96 void VPValue::dump() const { 97 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def); 98 VPSlotTracker SlotTracker( 99 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 100 print(dbgs(), SlotTracker); 101 dbgs() << "\n"; 102 } 103 104 void VPDef::dump() const { 105 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this); 106 VPSlotTracker SlotTracker( 107 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 108 print(dbgs(), "", SlotTracker); 109 dbgs() << "\n"; 110 } 111 #endif 112 113 // Get the top-most entry block of \p Start. This is the entry block of the 114 // containing VPlan. This function is templated to support both const and non-const blocks 115 template <typename T> static T *getPlanEntry(T *Start) { 116 T *Next = Start; 117 T *Current = Start; 118 while ((Next = Next->getParent())) 119 Current = Next; 120 121 SmallSetVector<T *, 8> WorkList; 122 WorkList.insert(Current); 123 124 for (unsigned i = 0; i < WorkList.size(); i++) { 125 T *Current = WorkList[i]; 126 if (Current->getNumPredecessors() == 0) 127 return Current; 128 auto &Predecessors = Current->getPredecessors(); 129 WorkList.insert(Predecessors.begin(), Predecessors.end()); 130 } 131 132 llvm_unreachable("VPlan without any entry node without predecessors"); 133 } 134 135 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; } 136 137 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; } 138 139 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly. 140 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const { 141 const VPBlockBase *Block = this; 142 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 143 Block = Region->getEntry(); 144 return cast<VPBasicBlock>(Block); 145 } 146 147 VPBasicBlock *VPBlockBase::getEntryBasicBlock() { 148 VPBlockBase *Block = this; 149 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 150 Block = Region->getEntry(); 151 return cast<VPBasicBlock>(Block); 152 } 153 154 void VPBlockBase::setPlan(VPlan *ParentPlan) { 155 assert(ParentPlan->getEntry() == this && 156 "Can only set plan on its entry block."); 157 Plan = ParentPlan; 158 } 159 160 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly. 161 const VPBasicBlock *VPBlockBase::getExitBasicBlock() const { 162 const VPBlockBase *Block = this; 163 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 164 Block = Region->getExit(); 165 return cast<VPBasicBlock>(Block); 166 } 167 168 VPBasicBlock *VPBlockBase::getExitBasicBlock() { 169 VPBlockBase *Block = this; 170 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 171 Block = Region->getExit(); 172 return cast<VPBasicBlock>(Block); 173 } 174 175 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() { 176 if (!Successors.empty() || !Parent) 177 return this; 178 assert(Parent->getExit() == this && 179 "Block w/o successors not the exit of its parent."); 180 return Parent->getEnclosingBlockWithSuccessors(); 181 } 182 183 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() { 184 if (!Predecessors.empty() || !Parent) 185 return this; 186 assert(Parent->getEntry() == this && 187 "Block w/o predecessors not the entry of its parent."); 188 return Parent->getEnclosingBlockWithPredecessors(); 189 } 190 191 VPValue *VPBlockBase::getCondBit() { 192 return CondBitUser.getSingleOperandOrNull(); 193 } 194 195 const VPValue *VPBlockBase::getCondBit() const { 196 return CondBitUser.getSingleOperandOrNull(); 197 } 198 199 void VPBlockBase::setCondBit(VPValue *CV) { CondBitUser.resetSingleOpUser(CV); } 200 201 VPValue *VPBlockBase::getPredicate() { 202 return PredicateUser.getSingleOperandOrNull(); 203 } 204 205 const VPValue *VPBlockBase::getPredicate() const { 206 return PredicateUser.getSingleOperandOrNull(); 207 } 208 209 void VPBlockBase::setPredicate(VPValue *CV) { 210 PredicateUser.resetSingleOpUser(CV); 211 } 212 213 void VPBlockBase::deleteCFG(VPBlockBase *Entry) { 214 SmallVector<VPBlockBase *, 8> Blocks(depth_first(Entry)); 215 216 for (VPBlockBase *Block : Blocks) 217 delete Block; 218 } 219 220 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { 221 iterator It = begin(); 222 while (It != end() && It->isPhi()) 223 It++; 224 return It; 225 } 226 227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { 228 if (!Def->getDef()) 229 return Def->getLiveInIRValue(); 230 231 if (hasScalarValue(Def, Instance)) { 232 return Data 233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)]; 234 } 235 236 assert(hasVectorValue(Def, Instance.Part)); 237 auto *VecPart = Data.PerPartOutput[Def][Instance.Part]; 238 if (!VecPart->getType()->isVectorTy()) { 239 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar"); 240 return VecPart; 241 } 242 // TODO: Cache created scalar values. 243 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF); 244 auto *Extract = Builder.CreateExtractElement(VecPart, Lane); 245 // set(Def, Extract, Instance); 246 return Extract; 247 } 248 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) { 249 VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion(); 250 return VPBB2IRBB[LoopRegion->getPreheaderVPBB()]; 251 } 252 253 BasicBlock * 254 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { 255 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks. 256 // Pred stands for Predessor. Prev stands for Previous - last visited/created. 257 BasicBlock *PrevBB = CFG.PrevBB; 258 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), 259 PrevBB->getParent(), CFG.ExitBB); 260 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); 261 262 // Hook up the new basic block to its predecessors. 263 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 264 VPBasicBlock *PredVPBB = PredVPBlock->getExitBasicBlock(); 265 auto &PredVPSuccessors = PredVPBB->getSuccessors(); 266 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; 267 268 // In outer loop vectorization scenario, the predecessor BBlock may not yet 269 // be visited(backedge). Mark the VPBasicBlock for fixup at the end of 270 // vectorization. We do not encounter this case in inner loop vectorization 271 // as we start out by building a loop skeleton with the vector loop header 272 // and latch blocks. As a result, we never enter this function for the 273 // header block in the non VPlan-native path. 274 if (!PredBB) { 275 assert(EnableVPlanNativePath && 276 "Unexpected null predecessor in non VPlan-native path"); 277 CFG.VPBBsToFix.push_back(PredVPBB); 278 continue; 279 } 280 281 assert(PredBB && "Predecessor basic-block not found building successor."); 282 auto *PredBBTerminator = PredBB->getTerminator(); 283 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 284 285 auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator); 286 if (isa<UnreachableInst>(PredBBTerminator) || 287 (TermBr && !TermBr->isConditional())) { 288 assert(PredVPSuccessors.size() == 1 && 289 "Predecessor ending w/o branch must have single successor."); 290 if (TermBr) { 291 TermBr->setSuccessor(0, NewBB); 292 } else { 293 DebugLoc DL = PredBBTerminator->getDebugLoc(); 294 PredBBTerminator->eraseFromParent(); 295 auto *Br = BranchInst::Create(NewBB, PredBB); 296 Br->setDebugLoc(DL); 297 } 298 } else { 299 if (PredVPSuccessors.size() == 2) { 300 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 301 assert(!PredBBTerminator->getSuccessor(idx) && 302 "Trying to reset an existing successor block."); 303 PredBBTerminator->setSuccessor(idx, NewBB); 304 } else { 305 auto *Reg = dyn_cast<VPRegionBlock>(PredVPBB->getParent()); 306 assert(Reg && !Reg->isReplicator()); 307 assert(this == Reg->getSingleSuccessor()); 308 PredBBTerminator->setSuccessor(0, NewBB); 309 PredBBTerminator->setSuccessor( 310 1, CFG.VPBB2IRBB[Reg->getEntryBasicBlock()]); 311 } 312 } 313 } 314 return NewBB; 315 } 316 317 void VPBasicBlock::execute(VPTransformState *State) { 318 bool Replica = State->Instance && !State->Instance->isFirstIteration(); 319 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB; 320 VPBlockBase *SingleHPred = nullptr; 321 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible. 322 323 auto IsNonReplicateR = [](VPBlockBase *BB) { 324 auto *R = dyn_cast<VPRegionBlock>(BB); 325 return R && !R->isReplicator(); 326 }; 327 328 // 1. Create an IR basic block, or reuse the last one if possible. 329 // The last IR basic block is reused, as an optimization, in three cases: 330 // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null; 331 // B. when the current VPBB has a single (hierarchical) predecessor which 332 // is PrevVPBB and the latter has a single (hierarchical) successor which 333 // both are in the same non-replicator region; and 334 // C. when the current VPBB is an entry of a region replica - where PrevVPBB 335 // is the exit of this region from a previous instance, or the predecessor 336 // of this region. 337 if (PrevVPBB && /* A */ 338 !((SingleHPred = getSingleHierarchicalPredecessor()) && 339 SingleHPred->getExitBasicBlock() == PrevVPBB && 340 PrevVPBB->getSingleHierarchicalSuccessor() && 341 (SingleHPred->getParent() == getEnclosingLoopRegion() && 342 !IsNonReplicateR(SingleHPred))) && /* B */ 343 !(Replica && getPredecessors().empty())) { /* C */ 344 NewBB = createEmptyBasicBlock(State->CFG); 345 State->Builder.SetInsertPoint(NewBB); 346 // Temporarily terminate with unreachable until CFG is rewired. 347 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 348 // Register NewBB in its loop. In innermost loops its the same for all BB's. 349 if (State->CurrentVectorLoop) 350 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI); 351 State->Builder.SetInsertPoint(Terminator); 352 State->CFG.PrevBB = NewBB; 353 } 354 355 // 2. Fill the IR basic block with IR instructions. 356 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() 357 << " in BB:" << NewBB->getName() << '\n'); 358 359 State->CFG.VPBB2IRBB[this] = NewBB; 360 State->CFG.PrevVPBB = this; 361 362 for (VPRecipeBase &Recipe : Recipes) 363 Recipe.execute(*State); 364 365 VPValue *CBV; 366 if (EnableVPlanNativePath && (CBV = getCondBit())) { 367 assert(CBV->getUnderlyingValue() && 368 "Unexpected null underlying value for condition bit"); 369 370 // Condition bit value in a VPBasicBlock is used as the branch selector. In 371 // the VPlan-native path case, since all branches are uniform we generate a 372 // branch instruction using the condition value from vector lane 0 and dummy 373 // successors. The successors are fixed later when the successor blocks are 374 // visited. 375 Value *NewCond = State->get(CBV, {0, 0}); 376 377 // Replace the temporary unreachable terminator with the new conditional 378 // branch. 379 auto *CurrentTerminator = NewBB->getTerminator(); 380 assert(isa<UnreachableInst>(CurrentTerminator) && 381 "Expected to replace unreachable terminator with conditional " 382 "branch."); 383 auto *CondBr = BranchInst::Create(NewBB, nullptr, NewCond); 384 CondBr->setSuccessor(0, nullptr); 385 ReplaceInstWithInst(CurrentTerminator, CondBr); 386 } 387 388 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB); 389 } 390 391 void VPBasicBlock::dropAllReferences(VPValue *NewValue) { 392 for (VPRecipeBase &R : Recipes) { 393 for (auto *Def : R.definedValues()) 394 Def->replaceAllUsesWith(NewValue); 395 396 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++) 397 R.setOperand(I, NewValue); 398 } 399 } 400 401 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { 402 assert((SplitAt == end() || SplitAt->getParent() == this) && 403 "can only split at a position in the same block"); 404 405 SmallVector<VPBlockBase *, 2> Succs(successors()); 406 // First, disconnect the current block from its successors. 407 for (VPBlockBase *Succ : Succs) 408 VPBlockUtils::disconnectBlocks(this, Succ); 409 410 // Create new empty block after the block to split. 411 auto *SplitBlock = new VPBasicBlock(getName() + ".split"); 412 VPBlockUtils::insertBlockAfter(SplitBlock, this); 413 414 // Add successors for block to split to new block. 415 for (VPBlockBase *Succ : Succs) 416 VPBlockUtils::connectBlocks(SplitBlock, Succ); 417 418 // Finally, move the recipes starting at SplitAt to new block. 419 for (VPRecipeBase &ToMove : 420 make_early_inc_range(make_range(SplitAt, this->end()))) 421 ToMove.moveBefore(*SplitBlock, SplitBlock->end()); 422 423 return SplitBlock; 424 } 425 426 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() { 427 VPRegionBlock *P = getParent(); 428 if (P && P->isReplicator()) { 429 P = P->getParent(); 430 assert(!cast<VPRegionBlock>(P)->isReplicator() && 431 "unexpected nested replicate regions"); 432 } 433 return P; 434 } 435 436 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 437 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const { 438 if (getSuccessors().empty()) { 439 O << Indent << "No successors\n"; 440 } else { 441 O << Indent << "Successor(s): "; 442 ListSeparator LS; 443 for (auto *Succ : getSuccessors()) 444 O << LS << Succ->getName(); 445 O << '\n'; 446 } 447 } 448 449 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, 450 VPSlotTracker &SlotTracker) const { 451 O << Indent << getName() << ":\n"; 452 if (const VPValue *Pred = getPredicate()) { 453 O << Indent << "BlockPredicate:"; 454 Pred->printAsOperand(O, SlotTracker); 455 if (const auto *PredInst = dyn_cast<VPInstruction>(Pred)) 456 O << " (" << PredInst->getParent()->getName() << ")"; 457 O << '\n'; 458 } 459 460 auto RecipeIndent = Indent + " "; 461 for (const VPRecipeBase &Recipe : *this) { 462 Recipe.print(O, RecipeIndent, SlotTracker); 463 O << '\n'; 464 } 465 466 printSuccessors(O, Indent); 467 468 if (const VPValue *CBV = getCondBit()) { 469 O << Indent << "CondBit: "; 470 CBV->printAsOperand(O, SlotTracker); 471 if (const auto *CBI = dyn_cast<VPInstruction>(CBV)) 472 O << " (" << CBI->getParent()->getName() << ")"; 473 O << '\n'; 474 } 475 } 476 #endif 477 478 void VPRegionBlock::dropAllReferences(VPValue *NewValue) { 479 for (VPBlockBase *Block : depth_first(Entry)) 480 // Drop all references in VPBasicBlocks and replace all uses with 481 // DummyValue. 482 Block->dropAllReferences(NewValue); 483 } 484 485 void VPRegionBlock::execute(VPTransformState *State) { 486 ReversePostOrderTraversal<VPBlockBase *> RPOT(Entry); 487 488 if (!isReplicator()) { 489 // Create and register the new vector loop. 490 Loop *PrevLoop = State->CurrentVectorLoop; 491 State->CurrentVectorLoop = State->LI->AllocateLoop(); 492 BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()]; 493 Loop *ParentLoop = State->LI->getLoopFor(VectorPH); 494 495 // Insert the new loop into the loop nest and register the new basic blocks 496 // before calling any utilities such as SCEV that require valid LoopInfo. 497 if (ParentLoop) 498 ParentLoop->addChildLoop(State->CurrentVectorLoop); 499 else 500 State->LI->addTopLevelLoop(State->CurrentVectorLoop); 501 502 // Visit the VPBlocks connected to "this", starting from it. 503 for (VPBlockBase *Block : RPOT) { 504 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 505 Block->execute(State); 506 } 507 508 State->CurrentVectorLoop = PrevLoop; 509 return; 510 } 511 512 assert(!State->Instance && "Replicating a Region with non-null instance."); 513 514 // Enter replicating mode. 515 State->Instance = VPIteration(0, 0); 516 517 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) { 518 State->Instance->Part = Part; 519 assert(!State->VF.isScalable() && "VF is assumed to be non scalable."); 520 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF; 521 ++Lane) { 522 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First); 523 // Visit the VPBlocks connected to \p this, starting from it. 524 for (VPBlockBase *Block : RPOT) { 525 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 526 Block->execute(State); 527 } 528 } 529 } 530 531 // Exit replicating mode. 532 State->Instance.reset(); 533 } 534 535 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 536 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent, 537 VPSlotTracker &SlotTracker) const { 538 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {"; 539 auto NewIndent = Indent + " "; 540 for (auto *BlockBase : depth_first(Entry)) { 541 O << '\n'; 542 BlockBase->print(O, NewIndent, SlotTracker); 543 } 544 O << Indent << "}\n"; 545 546 printSuccessors(O, Indent); 547 } 548 #endif 549 550 bool VPRecipeBase::mayWriteToMemory() const { 551 switch (getVPDefID()) { 552 case VPWidenMemoryInstructionSC: { 553 return cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 554 } 555 case VPReplicateSC: 556 case VPWidenCallSC: 557 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 558 ->mayWriteToMemory(); 559 case VPBranchOnMaskSC: 560 return false; 561 case VPWidenIntOrFpInductionSC: 562 case VPWidenCanonicalIVSC: 563 case VPWidenPHISC: 564 case VPBlendSC: 565 case VPWidenSC: 566 case VPWidenGEPSC: 567 case VPReductionSC: 568 case VPWidenSelectSC: { 569 const Instruction *I = 570 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 571 (void)I; 572 assert((!I || !I->mayWriteToMemory()) && 573 "underlying instruction may write to memory"); 574 return false; 575 } 576 default: 577 return true; 578 } 579 } 580 581 bool VPRecipeBase::mayReadFromMemory() const { 582 switch (getVPDefID()) { 583 case VPWidenMemoryInstructionSC: { 584 return !cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 585 } 586 case VPReplicateSC: 587 case VPWidenCallSC: 588 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 589 ->mayReadFromMemory(); 590 case VPBranchOnMaskSC: 591 return false; 592 case VPWidenIntOrFpInductionSC: 593 case VPWidenCanonicalIVSC: 594 case VPWidenPHISC: 595 case VPBlendSC: 596 case VPWidenSC: 597 case VPWidenGEPSC: 598 case VPReductionSC: 599 case VPWidenSelectSC: { 600 const Instruction *I = 601 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 602 (void)I; 603 assert((!I || !I->mayReadFromMemory()) && 604 "underlying instruction may read from memory"); 605 return false; 606 } 607 default: 608 return true; 609 } 610 } 611 612 bool VPRecipeBase::mayHaveSideEffects() const { 613 switch (getVPDefID()) { 614 case VPBranchOnMaskSC: 615 return false; 616 case VPWidenIntOrFpInductionSC: 617 case VPWidenPointerInductionSC: 618 case VPWidenCanonicalIVSC: 619 case VPWidenPHISC: 620 case VPBlendSC: 621 case VPWidenSC: 622 case VPWidenGEPSC: 623 case VPReductionSC: 624 case VPWidenSelectSC: 625 case VPScalarIVStepsSC: { 626 const Instruction *I = 627 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 628 (void)I; 629 assert((!I || !I->mayHaveSideEffects()) && 630 "underlying instruction has side-effects"); 631 return false; 632 } 633 case VPReplicateSC: { 634 auto *R = cast<VPReplicateRecipe>(this); 635 return R->getUnderlyingInstr()->mayHaveSideEffects(); 636 } 637 default: 638 return true; 639 } 640 } 641 642 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) { 643 assert(!Parent && "Recipe already in some VPBasicBlock"); 644 assert(InsertPos->getParent() && 645 "Insertion position not in any VPBasicBlock"); 646 Parent = InsertPos->getParent(); 647 Parent->getRecipeList().insert(InsertPos->getIterator(), this); 648 } 649 650 void VPRecipeBase::insertBefore(VPBasicBlock &BB, 651 iplist<VPRecipeBase>::iterator I) { 652 assert(!Parent && "Recipe already in some VPBasicBlock"); 653 assert(I == BB.end() || I->getParent() == &BB); 654 Parent = &BB; 655 BB.getRecipeList().insert(I, this); 656 } 657 658 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) { 659 assert(!Parent && "Recipe already in some VPBasicBlock"); 660 assert(InsertPos->getParent() && 661 "Insertion position not in any VPBasicBlock"); 662 Parent = InsertPos->getParent(); 663 Parent->getRecipeList().insertAfter(InsertPos->getIterator(), this); 664 } 665 666 void VPRecipeBase::removeFromParent() { 667 assert(getParent() && "Recipe not in any VPBasicBlock"); 668 getParent()->getRecipeList().remove(getIterator()); 669 Parent = nullptr; 670 } 671 672 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() { 673 assert(getParent() && "Recipe not in any VPBasicBlock"); 674 return getParent()->getRecipeList().erase(getIterator()); 675 } 676 677 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) { 678 removeFromParent(); 679 insertAfter(InsertPos); 680 } 681 682 void VPRecipeBase::moveBefore(VPBasicBlock &BB, 683 iplist<VPRecipeBase>::iterator I) { 684 removeFromParent(); 685 insertBefore(BB, I); 686 } 687 688 void VPInstruction::generateInstruction(VPTransformState &State, 689 unsigned Part) { 690 IRBuilderBase &Builder = State.Builder; 691 Builder.SetCurrentDebugLocation(DL); 692 693 if (Instruction::isBinaryOp(getOpcode())) { 694 Value *A = State.get(getOperand(0), Part); 695 Value *B = State.get(getOperand(1), Part); 696 Value *V = Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B); 697 State.set(this, V, Part); 698 return; 699 } 700 701 switch (getOpcode()) { 702 case VPInstruction::Not: { 703 Value *A = State.get(getOperand(0), Part); 704 Value *V = Builder.CreateNot(A); 705 State.set(this, V, Part); 706 break; 707 } 708 case VPInstruction::ICmpULE: { 709 Value *IV = State.get(getOperand(0), Part); 710 Value *TC = State.get(getOperand(1), Part); 711 Value *V = Builder.CreateICmpULE(IV, TC); 712 State.set(this, V, Part); 713 break; 714 } 715 case Instruction::Select: { 716 Value *Cond = State.get(getOperand(0), Part); 717 Value *Op1 = State.get(getOperand(1), Part); 718 Value *Op2 = State.get(getOperand(2), Part); 719 Value *V = Builder.CreateSelect(Cond, Op1, Op2); 720 State.set(this, V, Part); 721 break; 722 } 723 case VPInstruction::ActiveLaneMask: { 724 // Get first lane of vector induction variable. 725 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0)); 726 // Get the original loop tripcount. 727 Value *ScalarTC = State.get(getOperand(1), Part); 728 729 auto *Int1Ty = Type::getInt1Ty(Builder.getContext()); 730 auto *PredTy = VectorType::get(Int1Ty, State.VF); 731 Instruction *Call = Builder.CreateIntrinsic( 732 Intrinsic::get_active_lane_mask, {PredTy, ScalarTC->getType()}, 733 {VIVElem0, ScalarTC}, nullptr, "active.lane.mask"); 734 State.set(this, Call, Part); 735 break; 736 } 737 case VPInstruction::FirstOrderRecurrenceSplice: { 738 // Generate code to combine the previous and current values in vector v3. 739 // 740 // vector.ph: 741 // v_init = vector(..., ..., ..., a[-1]) 742 // br vector.body 743 // 744 // vector.body 745 // i = phi [0, vector.ph], [i+4, vector.body] 746 // v1 = phi [v_init, vector.ph], [v2, vector.body] 747 // v2 = a[i, i+1, i+2, i+3]; 748 // v3 = vector(v1(3), v2(0, 1, 2)) 749 750 // For the first part, use the recurrence phi (v1), otherwise v2. 751 auto *V1 = State.get(getOperand(0), 0); 752 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1); 753 if (!PartMinus1->getType()->isVectorTy()) { 754 State.set(this, PartMinus1, Part); 755 } else { 756 Value *V2 = State.get(getOperand(1), Part); 757 State.set(this, Builder.CreateVectorSplice(PartMinus1, V2, -1), Part); 758 } 759 break; 760 } 761 762 case VPInstruction::CanonicalIVIncrement: 763 case VPInstruction::CanonicalIVIncrementNUW: { 764 Value *Next = nullptr; 765 if (Part == 0) { 766 bool IsNUW = getOpcode() == VPInstruction::CanonicalIVIncrementNUW; 767 auto *Phi = State.get(getOperand(0), 0); 768 // The loop step is equal to the vectorization factor (num of SIMD 769 // elements) times the unroll factor (num of SIMD instructions). 770 Value *Step = 771 createStepForVF(Builder, Phi->getType(), State.VF, State.UF); 772 Next = Builder.CreateAdd(Phi, Step, "index.next", IsNUW, false); 773 } else { 774 Next = State.get(this, 0); 775 } 776 777 State.set(this, Next, Part); 778 break; 779 } 780 case VPInstruction::BranchOnCount: { 781 if (Part != 0) 782 break; 783 // First create the compare. 784 Value *IV = State.get(getOperand(0), Part); 785 Value *TC = State.get(getOperand(1), Part); 786 Value *Cond = Builder.CreateICmpEQ(IV, TC); 787 788 // Now create the branch. 789 auto *Plan = getParent()->getPlan(); 790 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion(); 791 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock(); 792 if (Header->empty()) { 793 assert(EnableVPlanNativePath && 794 "empty entry block only expected in VPlanNativePath"); 795 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 796 } 797 // TODO: Once the exit block is modeled in VPlan, use it instead of going 798 // through State.CFG.ExitBB. 799 BasicBlock *Exit = State.CFG.ExitBB; 800 801 Builder.CreateCondBr(Cond, Exit, State.CFG.VPBB2IRBB[Header]); 802 Builder.GetInsertBlock()->getTerminator()->eraseFromParent(); 803 break; 804 } 805 default: 806 llvm_unreachable("Unsupported opcode for instruction"); 807 } 808 } 809 810 void VPInstruction::execute(VPTransformState &State) { 811 assert(!State.Instance && "VPInstruction executing an Instance"); 812 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 813 State.Builder.setFastMathFlags(FMF); 814 for (unsigned Part = 0; Part < State.UF; ++Part) 815 generateInstruction(State, Part); 816 } 817 818 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 819 void VPInstruction::dump() const { 820 VPSlotTracker SlotTracker(getParent()->getPlan()); 821 print(dbgs(), "", SlotTracker); 822 } 823 824 void VPInstruction::print(raw_ostream &O, const Twine &Indent, 825 VPSlotTracker &SlotTracker) const { 826 O << Indent << "EMIT "; 827 828 if (hasResult()) { 829 printAsOperand(O, SlotTracker); 830 O << " = "; 831 } 832 833 switch (getOpcode()) { 834 case VPInstruction::Not: 835 O << "not"; 836 break; 837 case VPInstruction::ICmpULE: 838 O << "icmp ule"; 839 break; 840 case VPInstruction::SLPLoad: 841 O << "combined load"; 842 break; 843 case VPInstruction::SLPStore: 844 O << "combined store"; 845 break; 846 case VPInstruction::ActiveLaneMask: 847 O << "active lane mask"; 848 break; 849 case VPInstruction::FirstOrderRecurrenceSplice: 850 O << "first-order splice"; 851 break; 852 case VPInstruction::CanonicalIVIncrement: 853 O << "VF * UF + "; 854 break; 855 case VPInstruction::CanonicalIVIncrementNUW: 856 O << "VF * UF +(nuw) "; 857 break; 858 case VPInstruction::BranchOnCount: 859 O << "branch-on-count "; 860 break; 861 default: 862 O << Instruction::getOpcodeName(getOpcode()); 863 } 864 865 O << FMF; 866 867 for (const VPValue *Operand : operands()) { 868 O << " "; 869 Operand->printAsOperand(O, SlotTracker); 870 } 871 872 if (DL) { 873 O << ", !dbg "; 874 DL.print(O); 875 } 876 } 877 #endif 878 879 void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) { 880 // Make sure the VPInstruction is a floating-point operation. 881 assert((Opcode == Instruction::FAdd || Opcode == Instruction::FMul || 882 Opcode == Instruction::FNeg || Opcode == Instruction::FSub || 883 Opcode == Instruction::FDiv || Opcode == Instruction::FRem || 884 Opcode == Instruction::FCmp) && 885 "this op can't take fast-math flags"); 886 FMF = FMFNew; 887 } 888 889 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, 890 Value *CanonicalIVStartValue, 891 VPTransformState &State) { 892 // Check if the trip count is needed, and if so build it. 893 if (TripCount && TripCount->getNumUsers()) { 894 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 895 State.set(TripCount, TripCountV, Part); 896 } 897 898 // Check if the backedge taken count is needed, and if so build it. 899 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 900 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 901 auto *TCMO = Builder.CreateSub(TripCountV, 902 ConstantInt::get(TripCountV->getType(), 1), 903 "trip.count.minus.1"); 904 auto VF = State.VF; 905 Value *VTCMO = 906 VF.isScalar() ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast"); 907 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 908 State.set(BackedgeTakenCount, VTCMO, Part); 909 } 910 911 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 912 State.set(&VectorTripCount, VectorTripCountV, Part); 913 914 // When vectorizing the epilogue loop, the canonical induction start value 915 // needs to be changed from zero to the value after the main vector loop. 916 if (CanonicalIVStartValue) { 917 VPValue *VPV = getOrAddExternalDef(CanonicalIVStartValue); 918 auto *IV = getCanonicalIV(); 919 assert(all_of(IV->users(), 920 [](const VPUser *U) { 921 if (isa<VPScalarIVStepsRecipe>(U)) 922 return true; 923 auto *VPI = cast<VPInstruction>(U); 924 return VPI->getOpcode() == 925 VPInstruction::CanonicalIVIncrement || 926 VPI->getOpcode() == 927 VPInstruction::CanonicalIVIncrementNUW; 928 }) && 929 "the canonical IV should only be used by its increments or " 930 "ScalarIVSteps when " 931 "resetting the start value"); 932 IV->setOperand(0, VPV); 933 } 934 } 935 936 /// Generate the code inside the preheader and body of the vectorized loop. 937 /// Assumes a single pre-header basic-block was created for this. Introduce 938 /// additional basic-blocks as needed, and fill them all. 939 void VPlan::execute(VPTransformState *State) { 940 // Set the reverse mapping from VPValues to Values for code generation. 941 for (auto &Entry : Value2VPValue) 942 State->VPValue2Value[Entry.second] = Entry.first; 943 944 // Initialize CFG state. 945 State->CFG.PrevVPBB = nullptr; 946 State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor(); 947 BasicBlock *VectorPreHeader = State->CFG.PrevBB; 948 State->Builder.SetInsertPoint(VectorPreHeader->getTerminator()); 949 950 // Generate code in the loop pre-header and body. 951 for (VPBlockBase *Block : depth_first(Entry)) 952 Block->execute(State); 953 954 // Setup branch terminator successors for VPBBs in VPBBsToFix based on 955 // VPBB's successors. 956 for (auto VPBB : State->CFG.VPBBsToFix) { 957 assert(EnableVPlanNativePath && 958 "Unexpected VPBBsToFix in non VPlan-native path"); 959 BasicBlock *BB = State->CFG.VPBB2IRBB[VPBB]; 960 assert(BB && "Unexpected null basic block for VPBB"); 961 962 unsigned Idx = 0; 963 auto *BBTerminator = BB->getTerminator(); 964 965 for (VPBlockBase *SuccVPBlock : VPBB->getHierarchicalSuccessors()) { 966 VPBasicBlock *SuccVPBB = SuccVPBlock->getEntryBasicBlock(); 967 BBTerminator->setSuccessor(Idx, State->CFG.VPBB2IRBB[SuccVPBB]); 968 ++Idx; 969 } 970 } 971 972 BasicBlock *VectorLatchBB = State->CFG.PrevBB; 973 974 // Fix the latch value of canonical, reduction and first-order recurrences 975 // phis in the vector loop. 976 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock(); 977 for (VPRecipeBase &R : Header->phis()) { 978 // Skip phi-like recipes that generate their backedege values themselves. 979 if (isa<VPWidenPHIRecipe>(&R)) 980 continue; 981 982 if (isa<VPWidenPointerInductionRecipe>(&R) || 983 isa<VPWidenIntOrFpInductionRecipe>(&R)) { 984 PHINode *Phi = nullptr; 985 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) { 986 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0)); 987 } else { 988 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R); 989 // TODO: Split off the case that all users of a pointer phi are scalar 990 // from the VPWidenPointerInductionRecipe. 991 if (all_of(WidenPhi->users(), [WidenPhi](const VPUser *U) { 992 return cast<VPRecipeBase>(U)->usesScalars(WidenPhi); 993 })) 994 continue; 995 996 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0)); 997 Phi = cast<PHINode>(GEP->getPointerOperand()); 998 } 999 1000 Phi->setIncomingBlock(1, VectorLatchBB); 1001 1002 // Move the last step to the end of the latch block. This ensures 1003 // consistent placement of all induction updates. 1004 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1)); 1005 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode()); 1006 continue; 1007 } 1008 1009 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 1010 // For canonical IV, first-order recurrences and in-order reduction phis, 1011 // only a single part is generated, which provides the last part from the 1012 // previous iteration. For non-ordered reductions all UF parts are 1013 // generated. 1014 bool SinglePartNeeded = isa<VPCanonicalIVPHIRecipe>(PhiR) || 1015 isa<VPFirstOrderRecurrencePHIRecipe>(PhiR) || 1016 cast<VPReductionPHIRecipe>(PhiR)->isOrdered(); 1017 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; 1018 1019 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1020 Value *Phi = State->get(PhiR, Part); 1021 Value *Val = State->get(PhiR->getBackedgeValue(), 1022 SinglePartNeeded ? State->UF - 1 : Part); 1023 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB); 1024 } 1025 } 1026 1027 // We do not attempt to preserve DT for outer loop vectorization currently. 1028 if (!EnableVPlanNativePath) { 1029 BasicBlock *VectorHeaderBB = State->CFG.VPBB2IRBB[Header]; 1030 State->DT->addNewBlock(VectorHeaderBB, VectorPreHeader); 1031 updateDominatorTree(State->DT, VectorHeaderBB, VectorLatchBB, 1032 State->CFG.ExitBB); 1033 } 1034 } 1035 1036 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1037 LLVM_DUMP_METHOD 1038 void VPlan::print(raw_ostream &O) const { 1039 VPSlotTracker SlotTracker(this); 1040 1041 O << "VPlan '" << Name << "' {"; 1042 1043 if (VectorTripCount.getNumUsers() > 0) { 1044 O << "\nLive-in "; 1045 VectorTripCount.printAsOperand(O, SlotTracker); 1046 O << " = vector-trip-count\n"; 1047 } 1048 1049 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 1050 O << "\nLive-in "; 1051 BackedgeTakenCount->printAsOperand(O, SlotTracker); 1052 O << " = backedge-taken count\n"; 1053 } 1054 1055 for (const VPBlockBase *Block : depth_first(getEntry())) { 1056 O << '\n'; 1057 Block->print(O, "", SlotTracker); 1058 } 1059 O << "}\n"; 1060 } 1061 1062 LLVM_DUMP_METHOD 1063 void VPlan::printDOT(raw_ostream &O) const { 1064 VPlanPrinter Printer(O, *this); 1065 Printer.dump(); 1066 } 1067 1068 LLVM_DUMP_METHOD 1069 void VPlan::dump() const { print(dbgs()); } 1070 #endif 1071 1072 void VPlan::updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB, 1073 BasicBlock *LoopLatchBB, 1074 BasicBlock *LoopExitBB) { 1075 // The vector body may be more than a single basic-block by this point. 1076 // Update the dominator tree information inside the vector body by propagating 1077 // it from header to latch, expecting only triangular control-flow, if any. 1078 BasicBlock *PostDomSucc = nullptr; 1079 for (auto *BB = LoopHeaderBB; BB != LoopLatchBB; BB = PostDomSucc) { 1080 // Get the list of successors of this block. 1081 std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB)); 1082 assert(Succs.size() <= 2 && 1083 "Basic block in vector loop has more than 2 successors."); 1084 PostDomSucc = Succs[0]; 1085 if (Succs.size() == 1) { 1086 assert(PostDomSucc->getSinglePredecessor() && 1087 "PostDom successor has more than one predecessor."); 1088 DT->addNewBlock(PostDomSucc, BB); 1089 continue; 1090 } 1091 BasicBlock *InterimSucc = Succs[1]; 1092 if (PostDomSucc->getSingleSuccessor() == InterimSucc) { 1093 PostDomSucc = Succs[1]; 1094 InterimSucc = Succs[0]; 1095 } 1096 assert(InterimSucc->getSingleSuccessor() == PostDomSucc && 1097 "One successor of a basic block does not lead to the other."); 1098 assert(InterimSucc->getSinglePredecessor() && 1099 "Interim successor has more than one predecessor."); 1100 assert(PostDomSucc->hasNPredecessors(2) && 1101 "PostDom successor has more than two predecessors."); 1102 DT->addNewBlock(InterimSucc, BB); 1103 DT->addNewBlock(PostDomSucc, BB); 1104 } 1105 // Latch block is a new dominator for the loop exit. 1106 DT->changeImmediateDominator(LoopExitBB, LoopLatchBB); 1107 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 1108 } 1109 1110 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1111 Twine VPlanPrinter::getUID(const VPBlockBase *Block) { 1112 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") + 1113 Twine(getOrCreateBID(Block)); 1114 } 1115 1116 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) { 1117 const std::string &Name = Block->getName(); 1118 if (!Name.empty()) 1119 return Name; 1120 return "VPB" + Twine(getOrCreateBID(Block)); 1121 } 1122 1123 void VPlanPrinter::dump() { 1124 Depth = 1; 1125 bumpIndent(0); 1126 OS << "digraph VPlan {\n"; 1127 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan"; 1128 if (!Plan.getName().empty()) 1129 OS << "\\n" << DOT::EscapeString(Plan.getName()); 1130 if (Plan.BackedgeTakenCount) { 1131 OS << ", where:\\n"; 1132 Plan.BackedgeTakenCount->print(OS, SlotTracker); 1133 OS << " := BackedgeTakenCount"; 1134 } 1135 OS << "\"]\n"; 1136 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n"; 1137 OS << "edge [fontname=Courier, fontsize=30]\n"; 1138 OS << "compound=true\n"; 1139 1140 for (const VPBlockBase *Block : depth_first(Plan.getEntry())) 1141 dumpBlock(Block); 1142 1143 OS << "}\n"; 1144 } 1145 1146 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) { 1147 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block)) 1148 dumpBasicBlock(BasicBlock); 1149 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1150 dumpRegion(Region); 1151 else 1152 llvm_unreachable("Unsupported kind of VPBlock."); 1153 } 1154 1155 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To, 1156 bool Hidden, const Twine &Label) { 1157 // Due to "dot" we print an edge between two regions as an edge between the 1158 // exit basic block and the entry basic of the respective regions. 1159 const VPBlockBase *Tail = From->getExitBasicBlock(); 1160 const VPBlockBase *Head = To->getEntryBasicBlock(); 1161 OS << Indent << getUID(Tail) << " -> " << getUID(Head); 1162 OS << " [ label=\"" << Label << '\"'; 1163 if (Tail != From) 1164 OS << " ltail=" << getUID(From); 1165 if (Head != To) 1166 OS << " lhead=" << getUID(To); 1167 if (Hidden) 1168 OS << "; splines=none"; 1169 OS << "]\n"; 1170 } 1171 1172 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) { 1173 auto &Successors = Block->getSuccessors(); 1174 if (Successors.size() == 1) 1175 drawEdge(Block, Successors.front(), false, ""); 1176 else if (Successors.size() == 2) { 1177 drawEdge(Block, Successors.front(), false, "T"); 1178 drawEdge(Block, Successors.back(), false, "F"); 1179 } else { 1180 unsigned SuccessorNumber = 0; 1181 for (auto *Successor : Successors) 1182 drawEdge(Block, Successor, false, Twine(SuccessorNumber++)); 1183 } 1184 } 1185 1186 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) { 1187 // Implement dot-formatted dump by performing plain-text dump into the 1188 // temporary storage followed by some post-processing. 1189 OS << Indent << getUID(BasicBlock) << " [label =\n"; 1190 bumpIndent(1); 1191 std::string Str; 1192 raw_string_ostream SS(Str); 1193 // Use no indentation as we need to wrap the lines into quotes ourselves. 1194 BasicBlock->print(SS, "", SlotTracker); 1195 1196 // We need to process each line of the output separately, so split 1197 // single-string plain-text dump. 1198 SmallVector<StringRef, 0> Lines; 1199 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1200 1201 auto EmitLine = [&](StringRef Line, StringRef Suffix) { 1202 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix; 1203 }; 1204 1205 // Don't need the "+" after the last line. 1206 for (auto Line : make_range(Lines.begin(), Lines.end() - 1)) 1207 EmitLine(Line, " +\n"); 1208 EmitLine(Lines.back(), "\n"); 1209 1210 bumpIndent(-1); 1211 OS << Indent << "]\n"; 1212 1213 dumpEdges(BasicBlock); 1214 } 1215 1216 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) { 1217 OS << Indent << "subgraph " << getUID(Region) << " {\n"; 1218 bumpIndent(1); 1219 OS << Indent << "fontname=Courier\n" 1220 << Indent << "label=\"" 1221 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ") 1222 << DOT::EscapeString(Region->getName()) << "\"\n"; 1223 // Dump the blocks of the region. 1224 assert(Region->getEntry() && "Region contains no inner blocks."); 1225 for (const VPBlockBase *Block : depth_first(Region->getEntry())) 1226 dumpBlock(Block); 1227 bumpIndent(-1); 1228 OS << Indent << "}\n"; 1229 dumpEdges(Region); 1230 } 1231 1232 void VPlanIngredient::print(raw_ostream &O) const { 1233 if (auto *Inst = dyn_cast<Instruction>(V)) { 1234 if (!Inst->getType()->isVoidTy()) { 1235 Inst->printAsOperand(O, false); 1236 O << " = "; 1237 } 1238 O << Inst->getOpcodeName() << " "; 1239 unsigned E = Inst->getNumOperands(); 1240 if (E > 0) { 1241 Inst->getOperand(0)->printAsOperand(O, false); 1242 for (unsigned I = 1; I < E; ++I) 1243 Inst->getOperand(I)->printAsOperand(O << ", ", false); 1244 } 1245 } else // !Inst 1246 V->printAsOperand(O, false); 1247 } 1248 1249 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent, 1250 VPSlotTracker &SlotTracker) const { 1251 O << Indent << "WIDEN-CALL "; 1252 1253 auto *CI = cast<CallInst>(getUnderlyingInstr()); 1254 if (CI->getType()->isVoidTy()) 1255 O << "void "; 1256 else { 1257 printAsOperand(O, SlotTracker); 1258 O << " = "; 1259 } 1260 1261 O << "call @" << CI->getCalledFunction()->getName() << "("; 1262 printOperands(O, SlotTracker); 1263 O << ")"; 1264 } 1265 1266 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent, 1267 VPSlotTracker &SlotTracker) const { 1268 O << Indent << "WIDEN-SELECT "; 1269 printAsOperand(O, SlotTracker); 1270 O << " = select "; 1271 getOperand(0)->printAsOperand(O, SlotTracker); 1272 O << ", "; 1273 getOperand(1)->printAsOperand(O, SlotTracker); 1274 O << ", "; 1275 getOperand(2)->printAsOperand(O, SlotTracker); 1276 O << (InvariantCond ? " (condition is loop invariant)" : ""); 1277 } 1278 1279 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent, 1280 VPSlotTracker &SlotTracker) const { 1281 O << Indent << "WIDEN "; 1282 printAsOperand(O, SlotTracker); 1283 O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; 1284 printOperands(O, SlotTracker); 1285 } 1286 1287 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1288 VPSlotTracker &SlotTracker) const { 1289 O << Indent << "WIDEN-INDUCTION"; 1290 if (getTruncInst()) { 1291 O << "\\l\""; 1292 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 1293 O << " +\n" << Indent << "\" "; 1294 getVPValue(0)->printAsOperand(O, SlotTracker); 1295 } else 1296 O << " " << VPlanIngredient(IV); 1297 1298 O << ", "; 1299 getStepValue()->printAsOperand(O, SlotTracker); 1300 } 1301 1302 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1303 VPSlotTracker &SlotTracker) const { 1304 O << Indent << "EMIT "; 1305 printAsOperand(O, SlotTracker); 1306 O << " = WIDEN-POINTER-INDUCTION "; 1307 getStartValue()->printAsOperand(O, SlotTracker); 1308 O << ", " << *IndDesc.getStep(); 1309 } 1310 1311 #endif 1312 1313 bool VPWidenIntOrFpInductionRecipe::isCanonical() const { 1314 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue()); 1315 auto *StepC = dyn_cast<SCEVConstant>(getInductionDescriptor().getStep()); 1316 return StartC && StartC->isZero() && StepC && StepC->isOne(); 1317 } 1318 1319 VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const { 1320 return cast<VPCanonicalIVPHIRecipe>(getOperand(0)); 1321 } 1322 1323 bool VPScalarIVStepsRecipe::isCanonical() const { 1324 auto *CanIV = getCanonicalIV(); 1325 // The start value of the steps-recipe must match the start value of the 1326 // canonical induction and it must step by 1. 1327 if (CanIV->getStartValue() != getStartValue()) 1328 return false; 1329 auto *StepVPV = getStepValue(); 1330 if (StepVPV->getDef()) 1331 return false; 1332 auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue()); 1333 return StepC && StepC->isOne(); 1334 } 1335 1336 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1337 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent, 1338 VPSlotTracker &SlotTracker) const { 1339 O << Indent; 1340 printAsOperand(O, SlotTracker); 1341 O << Indent << "= SCALAR-STEPS "; 1342 printOperands(O, SlotTracker); 1343 } 1344 1345 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent, 1346 VPSlotTracker &SlotTracker) const { 1347 O << Indent << "WIDEN-GEP "; 1348 O << (IsPtrLoopInvariant ? "Inv" : "Var"); 1349 size_t IndicesNumber = IsIndexLoopInvariant.size(); 1350 for (size_t I = 0; I < IndicesNumber; ++I) 1351 O << "[" << (IsIndexLoopInvariant[I] ? "Inv" : "Var") << "]"; 1352 1353 O << " "; 1354 printAsOperand(O, SlotTracker); 1355 O << " = getelementptr "; 1356 printOperands(O, SlotTracker); 1357 } 1358 1359 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1360 VPSlotTracker &SlotTracker) const { 1361 O << Indent << "WIDEN-PHI "; 1362 1363 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue()); 1364 // Unless all incoming values are modeled in VPlan print the original PHI 1365 // directly. 1366 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming 1367 // values as VPValues. 1368 if (getNumOperands() != OriginalPhi->getNumOperands()) { 1369 O << VPlanIngredient(OriginalPhi); 1370 return; 1371 } 1372 1373 printAsOperand(O, SlotTracker); 1374 O << " = phi "; 1375 printOperands(O, SlotTracker); 1376 } 1377 1378 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent, 1379 VPSlotTracker &SlotTracker) const { 1380 O << Indent << "BLEND "; 1381 Phi->printAsOperand(O, false); 1382 O << " ="; 1383 if (getNumIncomingValues() == 1) { 1384 // Not a User of any mask: not really blending, this is a 1385 // single-predecessor phi. 1386 O << " "; 1387 getIncomingValue(0)->printAsOperand(O, SlotTracker); 1388 } else { 1389 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) { 1390 O << " "; 1391 getIncomingValue(I)->printAsOperand(O, SlotTracker); 1392 O << "/"; 1393 getMask(I)->printAsOperand(O, SlotTracker); 1394 } 1395 } 1396 } 1397 1398 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent, 1399 VPSlotTracker &SlotTracker) const { 1400 O << Indent << "REDUCE "; 1401 printAsOperand(O, SlotTracker); 1402 O << " = "; 1403 getChainOp()->printAsOperand(O, SlotTracker); 1404 O << " +"; 1405 if (isa<FPMathOperator>(getUnderlyingInstr())) 1406 O << getUnderlyingInstr()->getFastMathFlags(); 1407 O << " reduce." << Instruction::getOpcodeName(RdxDesc->getOpcode()) << " ("; 1408 getVecOp()->printAsOperand(O, SlotTracker); 1409 if (getCondOp()) { 1410 O << ", "; 1411 getCondOp()->printAsOperand(O, SlotTracker); 1412 } 1413 O << ")"; 1414 } 1415 1416 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent, 1417 VPSlotTracker &SlotTracker) const { 1418 O << Indent << (IsUniform ? "CLONE " : "REPLICATE "); 1419 1420 if (!getUnderlyingInstr()->getType()->isVoidTy()) { 1421 printAsOperand(O, SlotTracker); 1422 O << " = "; 1423 } 1424 O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode()) << " "; 1425 printOperands(O, SlotTracker); 1426 1427 if (AlsoPack) 1428 O << " (S->V)"; 1429 } 1430 1431 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1432 VPSlotTracker &SlotTracker) const { 1433 O << Indent << "PHI-PREDICATED-INSTRUCTION "; 1434 printAsOperand(O, SlotTracker); 1435 O << " = "; 1436 printOperands(O, SlotTracker); 1437 } 1438 1439 void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent, 1440 VPSlotTracker &SlotTracker) const { 1441 O << Indent << "WIDEN "; 1442 1443 if (!isStore()) { 1444 printAsOperand(O, SlotTracker); 1445 O << " = "; 1446 } 1447 O << Instruction::getOpcodeName(Ingredient.getOpcode()) << " "; 1448 1449 printOperands(O, SlotTracker); 1450 } 1451 #endif 1452 1453 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) { 1454 Value *Start = getStartValue()->getLiveInIRValue(); 1455 PHINode *EntryPart = PHINode::Create( 1456 Start->getType(), 2, "index", &*State.CFG.PrevBB->getFirstInsertionPt()); 1457 1458 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1459 EntryPart->addIncoming(Start, VectorPH); 1460 EntryPart->setDebugLoc(DL); 1461 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1462 State.set(this, EntryPart, Part); 1463 } 1464 1465 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1466 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1467 VPSlotTracker &SlotTracker) const { 1468 O << Indent << "EMIT "; 1469 printAsOperand(O, SlotTracker); 1470 O << " = CANONICAL-INDUCTION"; 1471 } 1472 #endif 1473 1474 void VPExpandSCEVRecipe::execute(VPTransformState &State) { 1475 assert(!State.Instance && "cannot be used in per-lane"); 1476 const DataLayout &DL = State.CFG.PrevBB->getModule()->getDataLayout(); 1477 SCEVExpander Exp(SE, DL, "induction"); 1478 1479 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(), 1480 &*State.Builder.GetInsertPoint()); 1481 1482 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1483 State.set(this, Res, Part); 1484 } 1485 1486 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1487 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent, 1488 VPSlotTracker &SlotTracker) const { 1489 O << Indent << "EMIT "; 1490 getVPSingleValue()->printAsOperand(O, SlotTracker); 1491 O << " = EXPAND SCEV " << *Expr; 1492 } 1493 #endif 1494 1495 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) { 1496 Value *CanonicalIV = State.get(getOperand(0), 0); 1497 Type *STy = CanonicalIV->getType(); 1498 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 1499 ElementCount VF = State.VF; 1500 Value *VStart = VF.isScalar() 1501 ? CanonicalIV 1502 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast"); 1503 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) { 1504 Value *VStep = createStepForVF(Builder, STy, VF, Part); 1505 if (VF.isVector()) { 1506 VStep = Builder.CreateVectorSplat(VF, VStep); 1507 VStep = Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType())); 1508 } 1509 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv"); 1510 State.set(this, CanonicalVectorIV, Part); 1511 } 1512 } 1513 1514 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1515 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent, 1516 VPSlotTracker &SlotTracker) const { 1517 O << Indent << "EMIT "; 1518 printAsOperand(O, SlotTracker); 1519 O << " = WIDEN-CANONICAL-INDUCTION "; 1520 printOperands(O, SlotTracker); 1521 } 1522 #endif 1523 1524 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) { 1525 auto &Builder = State.Builder; 1526 // Create a vector from the initial value. 1527 auto *VectorInit = getStartValue()->getLiveInIRValue(); 1528 1529 Type *VecTy = State.VF.isScalar() 1530 ? VectorInit->getType() 1531 : VectorType::get(VectorInit->getType(), State.VF); 1532 1533 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1534 if (State.VF.isVector()) { 1535 auto *IdxTy = Builder.getInt32Ty(); 1536 auto *One = ConstantInt::get(IdxTy, 1); 1537 IRBuilder<>::InsertPointGuard Guard(Builder); 1538 Builder.SetInsertPoint(VectorPH->getTerminator()); 1539 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF); 1540 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 1541 VectorInit = Builder.CreateInsertElement( 1542 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init"); 1543 } 1544 1545 // Create a phi node for the new recurrence. 1546 PHINode *EntryPart = PHINode::Create( 1547 VecTy, 2, "vector.recur", &*State.CFG.PrevBB->getFirstInsertionPt()); 1548 EntryPart->addIncoming(VectorInit, VectorPH); 1549 State.set(this, EntryPart, 0); 1550 } 1551 1552 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1553 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent, 1554 VPSlotTracker &SlotTracker) const { 1555 O << Indent << "FIRST-ORDER-RECURRENCE-PHI "; 1556 printAsOperand(O, SlotTracker); 1557 O << " = phi "; 1558 printOperands(O, SlotTracker); 1559 } 1560 #endif 1561 1562 void VPReductionPHIRecipe::execute(VPTransformState &State) { 1563 PHINode *PN = cast<PHINode>(getUnderlyingValue()); 1564 auto &Builder = State.Builder; 1565 1566 // In order to support recurrences we need to be able to vectorize Phi nodes. 1567 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 1568 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 1569 // this value when we vectorize all of the instructions that use the PHI. 1570 bool ScalarPHI = State.VF.isScalar() || IsInLoop; 1571 Type *VecTy = 1572 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 1573 1574 BasicBlock *HeaderBB = State.CFG.PrevBB; 1575 assert(State.CurrentVectorLoop->getHeader() == HeaderBB && 1576 "recipe must be in the vector loop header"); 1577 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF; 1578 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1579 Value *EntryPart = 1580 PHINode::Create(VecTy, 2, "vec.phi", &*HeaderBB->getFirstInsertionPt()); 1581 State.set(this, EntryPart, Part); 1582 } 1583 1584 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 1585 1586 // Reductions do not have to start at zero. They can start with 1587 // any loop invariant values. 1588 VPValue *StartVPV = getStartValue(); 1589 Value *StartV = StartVPV->getLiveInIRValue(); 1590 1591 Value *Iden = nullptr; 1592 RecurKind RK = RdxDesc.getRecurrenceKind(); 1593 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) || 1594 RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) { 1595 // MinMax reduction have the start value as their identify. 1596 if (ScalarPHI) { 1597 Iden = StartV; 1598 } else { 1599 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1600 Builder.SetInsertPoint(VectorPH->getTerminator()); 1601 StartV = Iden = 1602 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 1603 } 1604 } else { 1605 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(), 1606 RdxDesc.getFastMathFlags()); 1607 1608 if (!ScalarPHI) { 1609 Iden = Builder.CreateVectorSplat(State.VF, Iden); 1610 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1611 Builder.SetInsertPoint(VectorPH->getTerminator()); 1612 Constant *Zero = Builder.getInt32(0); 1613 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 1614 } 1615 } 1616 1617 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1618 Value *EntryPart = State.get(this, Part); 1619 // Make sure to add the reduction start value only to the 1620 // first unroll part. 1621 Value *StartVal = (Part == 0) ? StartV : Iden; 1622 cast<PHINode>(EntryPart)->addIncoming(StartVal, VectorPH); 1623 } 1624 } 1625 1626 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1627 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1628 VPSlotTracker &SlotTracker) const { 1629 O << Indent << "WIDEN-REDUCTION-PHI "; 1630 1631 printAsOperand(O, SlotTracker); 1632 O << " = phi "; 1633 printOperands(O, SlotTracker); 1634 } 1635 #endif 1636 1637 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT); 1638 1639 void VPValue::replaceAllUsesWith(VPValue *New) { 1640 for (unsigned J = 0; J < getNumUsers();) { 1641 VPUser *User = Users[J]; 1642 unsigned NumUsers = getNumUsers(); 1643 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) 1644 if (User->getOperand(I) == this) 1645 User->setOperand(I, New); 1646 // If a user got removed after updating the current user, the next user to 1647 // update will be moved to the current position, so we only need to 1648 // increment the index if the number of users did not change. 1649 if (NumUsers == getNumUsers()) 1650 J++; 1651 } 1652 } 1653 1654 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1655 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const { 1656 if (const Value *UV = getUnderlyingValue()) { 1657 OS << "ir<"; 1658 UV->printAsOperand(OS, false); 1659 OS << ">"; 1660 return; 1661 } 1662 1663 unsigned Slot = Tracker.getSlot(this); 1664 if (Slot == unsigned(-1)) 1665 OS << "<badref>"; 1666 else 1667 OS << "vp<%" << Tracker.getSlot(this) << ">"; 1668 } 1669 1670 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const { 1671 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) { 1672 Op->printAsOperand(O, SlotTracker); 1673 }); 1674 } 1675 #endif 1676 1677 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region, 1678 Old2NewTy &Old2New, 1679 InterleavedAccessInfo &IAI) { 1680 ReversePostOrderTraversal<VPBlockBase *> RPOT(Region->getEntry()); 1681 for (VPBlockBase *Base : RPOT) { 1682 visitBlock(Base, Old2New, IAI); 1683 } 1684 } 1685 1686 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New, 1687 InterleavedAccessInfo &IAI) { 1688 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) { 1689 for (VPRecipeBase &VPI : *VPBB) { 1690 if (isa<VPHeaderPHIRecipe>(&VPI)) 1691 continue; 1692 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions"); 1693 auto *VPInst = cast<VPInstruction>(&VPI); 1694 auto *Inst = cast<Instruction>(VPInst->getUnderlyingValue()); 1695 auto *IG = IAI.getInterleaveGroup(Inst); 1696 if (!IG) 1697 continue; 1698 1699 auto NewIGIter = Old2New.find(IG); 1700 if (NewIGIter == Old2New.end()) 1701 Old2New[IG] = new InterleaveGroup<VPInstruction>( 1702 IG->getFactor(), IG->isReverse(), IG->getAlign()); 1703 1704 if (Inst == IG->getInsertPos()) 1705 Old2New[IG]->setInsertPos(VPInst); 1706 1707 InterleaveGroupMap[VPInst] = Old2New[IG]; 1708 InterleaveGroupMap[VPInst]->insertMember( 1709 VPInst, IG->getIndex(Inst), 1710 Align(IG->isReverse() ? (-1) * int(IG->getFactor()) 1711 : IG->getFactor())); 1712 } 1713 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1714 visitRegion(Region, Old2New, IAI); 1715 else 1716 llvm_unreachable("Unsupported kind of VPBlock."); 1717 } 1718 1719 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan, 1720 InterleavedAccessInfo &IAI) { 1721 Old2NewTy Old2New; 1722 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI); 1723 } 1724 1725 void VPSlotTracker::assignSlot(const VPValue *V) { 1726 assert(Slots.find(V) == Slots.end() && "VPValue already has a slot!"); 1727 Slots[V] = NextSlot++; 1728 } 1729 1730 void VPSlotTracker::assignSlots(const VPlan &Plan) { 1731 1732 for (const auto &P : Plan.VPExternalDefs) 1733 assignSlot(P.second); 1734 1735 assignSlot(&Plan.VectorTripCount); 1736 if (Plan.BackedgeTakenCount) 1737 assignSlot(Plan.BackedgeTakenCount); 1738 1739 ReversePostOrderTraversal< 1740 VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> 1741 RPOT(VPBlockRecursiveTraversalWrapper<const VPBlockBase *>( 1742 Plan.getEntry())); 1743 for (const VPBasicBlock *VPBB : 1744 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT)) 1745 for (const VPRecipeBase &Recipe : *VPBB) 1746 for (VPValue *Def : Recipe.definedValues()) 1747 assignSlot(Def); 1748 } 1749 1750 bool vputils::onlyFirstLaneUsed(VPValue *Def) { 1751 return all_of(Def->users(), [Def](VPUser *U) { 1752 return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(Def); 1753 }); 1754 } 1755 1756 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, 1757 ScalarEvolution &SE) { 1758 if (auto *E = dyn_cast<SCEVConstant>(Expr)) 1759 return Plan.getOrAddExternalDef(E->getValue()); 1760 if (auto *E = dyn_cast<SCEVUnknown>(Expr)) 1761 return Plan.getOrAddExternalDef(E->getValue()); 1762 1763 VPBasicBlock *Preheader = Plan.getEntry()->getEntryBasicBlock(); 1764 VPValue *Step = new VPExpandSCEVRecipe(Expr, SE); 1765 Preheader->appendRecipe(cast<VPRecipeBase>(Step->getDef())); 1766 return Step; 1767 } 1768