1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/PriorityQueue.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetOperations.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/ConstantFolding.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/IVDescriptors.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #ifdef EXPENSIVE_CHECKS 73 #include "llvm/IR/Verifier.h" 74 #endif 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/LoopUtils.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <optional> 96 #include <set> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 static cl::opt<bool> 111 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 112 cl::desc("Run the SLP vectorization passes")); 113 114 static cl::opt<int> 115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 116 cl::desc("Only vectorize if you gain more than this " 117 "number ")); 118 119 static cl::opt<bool> 120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 121 cl::desc("Attempt to vectorize horizontal reductions")); 122 123 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 125 cl::desc( 126 "Attempt to vectorize horizontal reductions feeding into a store")); 127 128 // NOTE: If AllowHorRdxIdenityOptimization is true, the optimization will run 129 // even if we match a reduction but do not vectorize in the end. 130 static cl::opt<bool> AllowHorRdxIdenityOptimization( 131 "slp-optimize-identity-hor-reduction-ops", cl::init(true), cl::Hidden, 132 cl::desc("Allow optimization of original scalar identity operations on " 133 "matched horizontal reductions.")); 134 135 static cl::opt<int> 136 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> 140 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 141 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 142 143 /// Limits the size of scheduling regions in a block. 144 /// It avoid long compile times for _very_ large blocks where vector 145 /// instructions are spread over a wide range. 146 /// This limit is way higher than needed by real-world functions. 147 static cl::opt<int> 148 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 149 cl::desc("Limit the size of the SLP scheduling region per block")); 150 151 static cl::opt<int> MinVectorRegSizeOption( 152 "slp-min-reg-size", cl::init(128), cl::Hidden, 153 cl::desc("Attempt to vectorize for this register size in bits")); 154 155 static cl::opt<unsigned> RecursionMaxDepth( 156 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 157 cl::desc("Limit the recursion depth when building a vectorizable tree")); 158 159 static cl::opt<unsigned> MinTreeSize( 160 "slp-min-tree-size", cl::init(3), cl::Hidden, 161 cl::desc("Only vectorize small trees if they are fully vectorizable")); 162 163 // The maximum depth that the look-ahead score heuristic will explore. 164 // The higher this value, the higher the compilation time overhead. 165 static cl::opt<int> LookAheadMaxDepth( 166 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 167 cl::desc("The maximum look-ahead depth for operand reordering scores")); 168 169 // The maximum depth that the look-ahead score heuristic will explore 170 // when it probing among candidates for vectorization tree roots. 171 // The higher this value, the higher the compilation time overhead but unlike 172 // similar limit for operands ordering this is less frequently used, hence 173 // impact of higher value is less noticeable. 174 static cl::opt<int> RootLookAheadMaxDepth( 175 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 176 cl::desc("The maximum look-ahead depth for searching best rooting option")); 177 178 static cl::opt<bool> 179 ViewSLPTree("view-slp-tree", cl::Hidden, 180 cl::desc("Display the SLP trees with Graphviz")); 181 182 // Limit the number of alias checks. The limit is chosen so that 183 // it has no negative effect on the llvm benchmarks. 184 static const unsigned AliasedCheckLimit = 10; 185 186 // Another limit for the alias checks: The maximum distance between load/store 187 // instructions where alias checks are done. 188 // This limit is useful for very large basic blocks. 189 static const unsigned MaxMemDepDistance = 160; 190 191 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 192 /// regions to be handled. 193 static const int MinScheduleRegionSize = 16; 194 195 /// Predicate for the element types that the SLP vectorizer supports. 196 /// 197 /// The most important thing to filter here are types which are invalid in LLVM 198 /// vectors. We also filter target specific types which have absolutely no 199 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 200 /// avoids spending time checking the cost model and realizing that they will 201 /// be inevitably scalarized. 202 static bool isValidElementType(Type *Ty) { 203 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 204 !Ty->isPPC_FP128Ty(); 205 } 206 207 /// \returns True if the value is a constant (but not globals/constant 208 /// expressions). 209 static bool isConstant(Value *V) { 210 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V); 211 } 212 213 /// Checks if \p V is one of vector-like instructions, i.e. undef, 214 /// insertelement/extractelement with constant indices for fixed vector type or 215 /// extractvalue instruction. 216 static bool isVectorLikeInstWithConstOps(Value *V) { 217 if (!isa<InsertElementInst, ExtractElementInst>(V) && 218 !isa<ExtractValueInst, UndefValue>(V)) 219 return false; 220 auto *I = dyn_cast<Instruction>(V); 221 if (!I || isa<ExtractValueInst>(I)) 222 return true; 223 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 224 return false; 225 if (isa<ExtractElementInst>(I)) 226 return isConstant(I->getOperand(1)); 227 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 228 return isConstant(I->getOperand(2)); 229 } 230 231 #if !defined(NDEBUG) 232 /// Print a short descriptor of the instruction bundle suitable for debug output. 233 static std::string shortBundleName(ArrayRef<Value *> VL) { 234 std::string Result; 235 raw_string_ostream OS(Result); 236 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]"; 237 OS.flush(); 238 return Result; 239 } 240 #endif 241 242 /// \returns true if all of the instructions in \p VL are in the same block or 243 /// false otherwise. 244 static bool allSameBlock(ArrayRef<Value *> VL) { 245 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 246 if (!I0) 247 return false; 248 if (all_of(VL, isVectorLikeInstWithConstOps)) 249 return true; 250 251 BasicBlock *BB = I0->getParent(); 252 for (int I = 1, E = VL.size(); I < E; I++) { 253 auto *II = dyn_cast<Instruction>(VL[I]); 254 if (!II) 255 return false; 256 257 if (BB != II->getParent()) 258 return false; 259 } 260 return true; 261 } 262 263 /// \returns True if all of the values in \p VL are constants (but not 264 /// globals/constant expressions). 265 static bool allConstant(ArrayRef<Value *> VL) { 266 // Constant expressions and globals can't be vectorized like normal integer/FP 267 // constants. 268 return all_of(VL, isConstant); 269 } 270 271 /// \returns True if all of the values in \p VL are identical or some of them 272 /// are UndefValue. 273 static bool isSplat(ArrayRef<Value *> VL) { 274 Value *FirstNonUndef = nullptr; 275 for (Value *V : VL) { 276 if (isa<UndefValue>(V)) 277 continue; 278 if (!FirstNonUndef) { 279 FirstNonUndef = V; 280 continue; 281 } 282 if (V != FirstNonUndef) 283 return false; 284 } 285 return FirstNonUndef != nullptr; 286 } 287 288 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 289 static bool isCommutative(Instruction *I) { 290 if (auto *Cmp = dyn_cast<CmpInst>(I)) 291 return Cmp->isCommutative(); 292 if (auto *BO = dyn_cast<BinaryOperator>(I)) 293 return BO->isCommutative(); 294 // TODO: This should check for generic Instruction::isCommutative(), but 295 // we need to confirm that the caller code correctly handles Intrinsics 296 // for example (does not have 2 operands). 297 return false; 298 } 299 300 /// \returns inserting index of InsertElement or InsertValue instruction, 301 /// using Offset as base offset for index. 302 static std::optional<unsigned> getInsertIndex(const Value *InsertInst, 303 unsigned Offset = 0) { 304 int Index = Offset; 305 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 306 const auto *VT = dyn_cast<FixedVectorType>(IE->getType()); 307 if (!VT) 308 return std::nullopt; 309 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)); 310 if (!CI) 311 return std::nullopt; 312 if (CI->getValue().uge(VT->getNumElements())) 313 return std::nullopt; 314 Index *= VT->getNumElements(); 315 Index += CI->getZExtValue(); 316 return Index; 317 } 318 319 const auto *IV = cast<InsertValueInst>(InsertInst); 320 Type *CurrentType = IV->getType(); 321 for (unsigned I : IV->indices()) { 322 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 323 Index *= ST->getNumElements(); 324 CurrentType = ST->getElementType(I); 325 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 326 Index *= AT->getNumElements(); 327 CurrentType = AT->getElementType(); 328 } else { 329 return std::nullopt; 330 } 331 Index += I; 332 } 333 return Index; 334 } 335 336 namespace { 337 /// Specifies the way the mask should be analyzed for undefs/poisonous elements 338 /// in the shuffle mask. 339 enum class UseMask { 340 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors, 341 ///< check for the mask elements for the first argument (mask 342 ///< indices are in range [0:VF)). 343 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check 344 ///< for the mask elements for the second argument (mask indices 345 ///< are in range [VF:2*VF)) 346 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for 347 ///< future shuffle elements and mark them as ones as being used 348 ///< in future. Non-undef elements are considered as unused since 349 ///< they're already marked as used in the mask. 350 }; 351 } // namespace 352 353 /// Prepares a use bitset for the given mask either for the first argument or 354 /// for the second. 355 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask, 356 UseMask MaskArg) { 357 SmallBitVector UseMask(VF, true); 358 for (auto [Idx, Value] : enumerate(Mask)) { 359 if (Value == PoisonMaskElem) { 360 if (MaskArg == UseMask::UndefsAsMask) 361 UseMask.reset(Idx); 362 continue; 363 } 364 if (MaskArg == UseMask::FirstArg && Value < VF) 365 UseMask.reset(Value); 366 else if (MaskArg == UseMask::SecondArg && Value >= VF) 367 UseMask.reset(Value - VF); 368 } 369 return UseMask; 370 } 371 372 /// Checks if the given value is actually an undefined constant vector. 373 /// Also, if the \p UseMask is not empty, tries to check if the non-masked 374 /// elements actually mask the insertelement buildvector, if any. 375 template <bool IsPoisonOnly = false> 376 static SmallBitVector isUndefVector(const Value *V, 377 const SmallBitVector &UseMask = {}) { 378 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true); 379 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>; 380 if (isa<T>(V)) 381 return Res; 382 auto *VecTy = dyn_cast<FixedVectorType>(V->getType()); 383 if (!VecTy) 384 return Res.reset(); 385 auto *C = dyn_cast<Constant>(V); 386 if (!C) { 387 if (!UseMask.empty()) { 388 const Value *Base = V; 389 while (auto *II = dyn_cast<InsertElementInst>(Base)) { 390 Base = II->getOperand(0); 391 if (isa<T>(II->getOperand(1))) 392 continue; 393 std::optional<unsigned> Idx = getInsertIndex(II); 394 if (!Idx) { 395 Res.reset(); 396 return Res; 397 } 398 if (*Idx < UseMask.size() && !UseMask.test(*Idx)) 399 Res.reset(*Idx); 400 } 401 // TODO: Add analysis for shuffles here too. 402 if (V == Base) { 403 Res.reset(); 404 } else { 405 SmallBitVector SubMask(UseMask.size(), false); 406 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask); 407 } 408 } else { 409 Res.reset(); 410 } 411 return Res; 412 } 413 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 414 if (Constant *Elem = C->getAggregateElement(I)) 415 if (!isa<T>(Elem) && 416 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I)))) 417 Res.reset(I); 418 } 419 return Res; 420 } 421 422 /// Checks if the vector of instructions can be represented as a shuffle, like: 423 /// %x0 = extractelement <4 x i8> %x, i32 0 424 /// %x3 = extractelement <4 x i8> %x, i32 3 425 /// %y1 = extractelement <4 x i8> %y, i32 1 426 /// %y2 = extractelement <4 x i8> %y, i32 2 427 /// %x0x0 = mul i8 %x0, %x0 428 /// %x3x3 = mul i8 %x3, %x3 429 /// %y1y1 = mul i8 %y1, %y1 430 /// %y2y2 = mul i8 %y2, %y2 431 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 432 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 433 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 434 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 435 /// ret <4 x i8> %ins4 436 /// can be transformed into: 437 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 438 /// i32 6> 439 /// %2 = mul <4 x i8> %1, %1 440 /// ret <4 x i8> %2 441 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 442 /// TODO: Can we split off and reuse the shuffle mask detection from 443 /// ShuffleVectorInst/getShuffleCost? 444 static std::optional<TargetTransformInfo::ShuffleKind> 445 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 446 const auto *It = 447 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 448 if (It == VL.end()) 449 return std::nullopt; 450 auto *EI0 = cast<ExtractElementInst>(*It); 451 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 452 return std::nullopt; 453 unsigned Size = 454 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 455 Value *Vec1 = nullptr; 456 Value *Vec2 = nullptr; 457 enum ShuffleMode { Unknown, Select, Permute }; 458 ShuffleMode CommonShuffleMode = Unknown; 459 Mask.assign(VL.size(), PoisonMaskElem); 460 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 461 // Undef can be represented as an undef element in a vector. 462 if (isa<UndefValue>(VL[I])) 463 continue; 464 auto *EI = cast<ExtractElementInst>(VL[I]); 465 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 466 return std::nullopt; 467 auto *Vec = EI->getVectorOperand(); 468 // We can extractelement from undef or poison vector. 469 if (isUndefVector(Vec).all()) 470 continue; 471 // All vector operands must have the same number of vector elements. 472 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 473 return std::nullopt; 474 if (isa<UndefValue>(EI->getIndexOperand())) 475 continue; 476 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 477 if (!Idx) 478 return std::nullopt; 479 // Undefined behavior if Idx is negative or >= Size. 480 if (Idx->getValue().uge(Size)) 481 continue; 482 unsigned IntIdx = Idx->getValue().getZExtValue(); 483 Mask[I] = IntIdx; 484 // For correct shuffling we have to have at most 2 different vector operands 485 // in all extractelement instructions. 486 if (!Vec1 || Vec1 == Vec) { 487 Vec1 = Vec; 488 } else if (!Vec2 || Vec2 == Vec) { 489 Vec2 = Vec; 490 Mask[I] += Size; 491 } else { 492 return std::nullopt; 493 } 494 if (CommonShuffleMode == Permute) 495 continue; 496 // If the extract index is not the same as the operation number, it is a 497 // permutation. 498 if (IntIdx != I) { 499 CommonShuffleMode = Permute; 500 continue; 501 } 502 CommonShuffleMode = Select; 503 } 504 // If we're not crossing lanes in different vectors, consider it as blending. 505 if (CommonShuffleMode == Select && Vec2) 506 return TargetTransformInfo::SK_Select; 507 // If Vec2 was never used, we have a permutation of a single vector, otherwise 508 // we have permutation of 2 vectors. 509 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 510 : TargetTransformInfo::SK_PermuteSingleSrc; 511 } 512 513 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 514 static std::optional<unsigned> getExtractIndex(Instruction *E) { 515 unsigned Opcode = E->getOpcode(); 516 assert((Opcode == Instruction::ExtractElement || 517 Opcode == Instruction::ExtractValue) && 518 "Expected extractelement or extractvalue instruction."); 519 if (Opcode == Instruction::ExtractElement) { 520 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 521 if (!CI) 522 return std::nullopt; 523 return CI->getZExtValue(); 524 } 525 auto *EI = cast<ExtractValueInst>(E); 526 if (EI->getNumIndices() != 1) 527 return std::nullopt; 528 return *EI->idx_begin(); 529 } 530 531 namespace { 532 533 /// Main data required for vectorization of instructions. 534 struct InstructionsState { 535 /// The very first instruction in the list with the main opcode. 536 Value *OpValue = nullptr; 537 538 /// The main/alternate instruction. 539 Instruction *MainOp = nullptr; 540 Instruction *AltOp = nullptr; 541 542 /// The main/alternate opcodes for the list of instructions. 543 unsigned getOpcode() const { 544 return MainOp ? MainOp->getOpcode() : 0; 545 } 546 547 unsigned getAltOpcode() const { 548 return AltOp ? AltOp->getOpcode() : 0; 549 } 550 551 /// Some of the instructions in the list have alternate opcodes. 552 bool isAltShuffle() const { return AltOp != MainOp; } 553 554 bool isOpcodeOrAlt(Instruction *I) const { 555 unsigned CheckedOpcode = I->getOpcode(); 556 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 557 } 558 559 InstructionsState() = delete; 560 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 561 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 562 }; 563 564 } // end anonymous namespace 565 566 /// Chooses the correct key for scheduling data. If \p Op has the same (or 567 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 568 /// OpValue. 569 static Value *isOneOf(const InstructionsState &S, Value *Op) { 570 auto *I = dyn_cast<Instruction>(Op); 571 if (I && S.isOpcodeOrAlt(I)) 572 return Op; 573 return S.OpValue; 574 } 575 576 /// \returns true if \p Opcode is allowed as part of the main/alternate 577 /// instruction for SLP vectorization. 578 /// 579 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 580 /// "shuffled out" lane would result in division by zero. 581 static bool isValidForAlternation(unsigned Opcode) { 582 if (Instruction::isIntDivRem(Opcode)) 583 return false; 584 585 return true; 586 } 587 588 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 589 const TargetLibraryInfo &TLI, 590 unsigned BaseIndex = 0); 591 592 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 593 /// compatible instructions or constants, or just some other regular values. 594 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 595 Value *Op1, const TargetLibraryInfo &TLI) { 596 return (isConstant(BaseOp0) && isConstant(Op0)) || 597 (isConstant(BaseOp1) && isConstant(Op1)) || 598 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 599 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 600 BaseOp0 == Op0 || BaseOp1 == Op1 || 601 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() || 602 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode(); 603 } 604 605 /// \returns true if a compare instruction \p CI has similar "look" and 606 /// same predicate as \p BaseCI, "as is" or with its operands and predicate 607 /// swapped, false otherwise. 608 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI, 609 const TargetLibraryInfo &TLI) { 610 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && 611 "Assessing comparisons of different types?"); 612 CmpInst::Predicate BasePred = BaseCI->getPredicate(); 613 CmpInst::Predicate Pred = CI->getPredicate(); 614 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred); 615 616 Value *BaseOp0 = BaseCI->getOperand(0); 617 Value *BaseOp1 = BaseCI->getOperand(1); 618 Value *Op0 = CI->getOperand(0); 619 Value *Op1 = CI->getOperand(1); 620 621 return (BasePred == Pred && 622 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) || 623 (BasePred == SwappedPred && 624 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI)); 625 } 626 627 /// \returns analysis of the Instructions in \p VL described in 628 /// InstructionsState, the Opcode that we suppose the whole list 629 /// could be vectorized even if its structure is diverse. 630 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 631 const TargetLibraryInfo &TLI, 632 unsigned BaseIndex) { 633 // Make sure these are all Instructions. 634 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 635 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 636 637 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 638 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 639 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 640 CmpInst::Predicate BasePred = 641 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 642 : CmpInst::BAD_ICMP_PREDICATE; 643 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 644 unsigned AltOpcode = Opcode; 645 unsigned AltIndex = BaseIndex; 646 647 // Check for one alternate opcode from another BinaryOperator. 648 // TODO - generalize to support all operators (types, calls etc.). 649 auto *IBase = cast<Instruction>(VL[BaseIndex]); 650 Intrinsic::ID BaseID = 0; 651 SmallVector<VFInfo> BaseMappings; 652 if (auto *CallBase = dyn_cast<CallInst>(IBase)) { 653 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI); 654 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase); 655 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty()) 656 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 657 } 658 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 659 auto *I = cast<Instruction>(VL[Cnt]); 660 unsigned InstOpcode = I->getOpcode(); 661 if (IsBinOp && isa<BinaryOperator>(I)) { 662 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 663 continue; 664 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 665 isValidForAlternation(Opcode)) { 666 AltOpcode = InstOpcode; 667 AltIndex = Cnt; 668 continue; 669 } 670 } else if (IsCastOp && isa<CastInst>(I)) { 671 Value *Op0 = IBase->getOperand(0); 672 Type *Ty0 = Op0->getType(); 673 Value *Op1 = I->getOperand(0); 674 Type *Ty1 = Op1->getType(); 675 if (Ty0 == Ty1) { 676 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 677 continue; 678 if (Opcode == AltOpcode) { 679 assert(isValidForAlternation(Opcode) && 680 isValidForAlternation(InstOpcode) && 681 "Cast isn't safe for alternation, logic needs to be updated!"); 682 AltOpcode = InstOpcode; 683 AltIndex = Cnt; 684 continue; 685 } 686 } 687 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) { 688 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]); 689 Type *Ty0 = BaseInst->getOperand(0)->getType(); 690 Type *Ty1 = Inst->getOperand(0)->getType(); 691 if (Ty0 == Ty1) { 692 assert(InstOpcode == Opcode && "Expected same CmpInst opcode."); 693 // Check for compatible operands. If the corresponding operands are not 694 // compatible - need to perform alternate vectorization. 695 CmpInst::Predicate CurrentPred = Inst->getPredicate(); 696 CmpInst::Predicate SwappedCurrentPred = 697 CmpInst::getSwappedPredicate(CurrentPred); 698 699 if (E == 2 && 700 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 701 continue; 702 703 if (isCmpSameOrSwapped(BaseInst, Inst, TLI)) 704 continue; 705 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 706 if (AltIndex != BaseIndex) { 707 if (isCmpSameOrSwapped(AltInst, Inst, TLI)) 708 continue; 709 } else if (BasePred != CurrentPred) { 710 assert( 711 isValidForAlternation(InstOpcode) && 712 "CmpInst isn't safe for alternation, logic needs to be updated!"); 713 AltIndex = Cnt; 714 continue; 715 } 716 CmpInst::Predicate AltPred = AltInst->getPredicate(); 717 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 718 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 719 continue; 720 } 721 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) { 722 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 723 if (Gep->getNumOperands() != 2 || 724 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType()) 725 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 726 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) { 727 if (!isVectorLikeInstWithConstOps(EI)) 728 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 729 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 730 auto *BaseLI = cast<LoadInst>(IBase); 731 if (!LI->isSimple() || !BaseLI->isSimple()) 732 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 733 } else if (auto *Call = dyn_cast<CallInst>(I)) { 734 auto *CallBase = cast<CallInst>(IBase); 735 if (Call->getCalledFunction() != CallBase->getCalledFunction()) 736 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 737 if (Call->hasOperandBundles() && 738 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(), 739 Call->op_begin() + Call->getBundleOperandsEndIndex(), 740 CallBase->op_begin() + 741 CallBase->getBundleOperandsStartIndex())) 742 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 743 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI); 744 if (ID != BaseID) 745 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 746 if (!ID) { 747 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call); 748 if (Mappings.size() != BaseMappings.size() || 749 Mappings.front().ISA != BaseMappings.front().ISA || 750 Mappings.front().ScalarName != BaseMappings.front().ScalarName || 751 Mappings.front().VectorName != BaseMappings.front().VectorName || 752 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF || 753 Mappings.front().Shape.Parameters != 754 BaseMappings.front().Shape.Parameters) 755 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 756 } 757 } 758 continue; 759 } 760 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 761 } 762 763 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 764 cast<Instruction>(VL[AltIndex])); 765 } 766 767 /// \returns true if all of the values in \p VL have the same type or false 768 /// otherwise. 769 static bool allSameType(ArrayRef<Value *> VL) { 770 Type *Ty = VL.front()->getType(); 771 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; }); 772 } 773 774 /// \returns True if in-tree use also needs extract. This refers to 775 /// possible scalar operand in vectorized instruction. 776 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 777 TargetLibraryInfo *TLI) { 778 unsigned Opcode = UserInst->getOpcode(); 779 switch (Opcode) { 780 case Instruction::Load: { 781 LoadInst *LI = cast<LoadInst>(UserInst); 782 return (LI->getPointerOperand() == Scalar); 783 } 784 case Instruction::Store: { 785 StoreInst *SI = cast<StoreInst>(UserInst); 786 return (SI->getPointerOperand() == Scalar); 787 } 788 case Instruction::Call: { 789 CallInst *CI = cast<CallInst>(UserInst); 790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 791 return any_of(enumerate(CI->args()), [&](auto &&Arg) { 792 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) && 793 Arg.value().get() == Scalar; 794 }); 795 } 796 default: 797 return false; 798 } 799 } 800 801 /// \returns the AA location that is being access by the instruction. 802 static MemoryLocation getLocation(Instruction *I) { 803 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 804 return MemoryLocation::get(SI); 805 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 806 return MemoryLocation::get(LI); 807 return MemoryLocation(); 808 } 809 810 /// \returns True if the instruction is not a volatile or atomic load/store. 811 static bool isSimple(Instruction *I) { 812 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 813 return LI->isSimple(); 814 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 815 return SI->isSimple(); 816 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 817 return !MI->isVolatile(); 818 return true; 819 } 820 821 /// Shuffles \p Mask in accordance with the given \p SubMask. 822 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only 823 /// one but two input vectors. 824 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask, 825 bool ExtendingManyInputs = false) { 826 if (SubMask.empty()) 827 return; 828 assert( 829 (!ExtendingManyInputs || SubMask.size() > Mask.size() || 830 // Check if input scalars were extended to match the size of other node. 831 (SubMask.size() == Mask.size() && 832 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(), 833 [](int Idx) { return Idx == PoisonMaskElem; }))) && 834 "SubMask with many inputs support must be larger than the mask."); 835 if (Mask.empty()) { 836 Mask.append(SubMask.begin(), SubMask.end()); 837 return; 838 } 839 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem); 840 int TermValue = std::min(Mask.size(), SubMask.size()); 841 for (int I = 0, E = SubMask.size(); I < E; ++I) { 842 if (SubMask[I] == PoisonMaskElem || 843 (!ExtendingManyInputs && 844 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue))) 845 continue; 846 NewMask[I] = Mask[SubMask[I]]; 847 } 848 Mask.swap(NewMask); 849 } 850 851 /// Order may have elements assigned special value (size) which is out of 852 /// bounds. Such indices only appear on places which correspond to undef values 853 /// (see canReuseExtract for details) and used in order to avoid undef values 854 /// have effect on operands ordering. 855 /// The first loop below simply finds all unused indices and then the next loop 856 /// nest assigns these indices for undef values positions. 857 /// As an example below Order has two undef positions and they have assigned 858 /// values 3 and 7 respectively: 859 /// before: 6 9 5 4 9 2 1 0 860 /// after: 6 3 5 4 7 2 1 0 861 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 862 const unsigned Sz = Order.size(); 863 SmallBitVector UnusedIndices(Sz, /*t=*/true); 864 SmallBitVector MaskedIndices(Sz); 865 for (unsigned I = 0; I < Sz; ++I) { 866 if (Order[I] < Sz) 867 UnusedIndices.reset(Order[I]); 868 else 869 MaskedIndices.set(I); 870 } 871 if (MaskedIndices.none()) 872 return; 873 assert(UnusedIndices.count() == MaskedIndices.count() && 874 "Non-synced masked/available indices."); 875 int Idx = UnusedIndices.find_first(); 876 int MIdx = MaskedIndices.find_first(); 877 while (MIdx >= 0) { 878 assert(Idx >= 0 && "Indices must be synced."); 879 Order[MIdx] = Idx; 880 Idx = UnusedIndices.find_next(Idx); 881 MIdx = MaskedIndices.find_next(MIdx); 882 } 883 } 884 885 namespace llvm { 886 887 static void inversePermutation(ArrayRef<unsigned> Indices, 888 SmallVectorImpl<int> &Mask) { 889 Mask.clear(); 890 const unsigned E = Indices.size(); 891 Mask.resize(E, PoisonMaskElem); 892 for (unsigned I = 0; I < E; ++I) 893 Mask[Indices[I]] = I; 894 } 895 896 /// Reorders the list of scalars in accordance with the given \p Mask. 897 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 898 ArrayRef<int> Mask) { 899 assert(!Mask.empty() && "Expected non-empty mask."); 900 SmallVector<Value *> Prev(Scalars.size(), 901 UndefValue::get(Scalars.front()->getType())); 902 Prev.swap(Scalars); 903 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 904 if (Mask[I] != PoisonMaskElem) 905 Scalars[Mask[I]] = Prev[I]; 906 } 907 908 /// Checks if the provided value does not require scheduling. It does not 909 /// require scheduling if this is not an instruction or it is an instruction 910 /// that does not read/write memory and all operands are either not instructions 911 /// or phi nodes or instructions from different blocks. 912 static bool areAllOperandsNonInsts(Value *V) { 913 auto *I = dyn_cast<Instruction>(V); 914 if (!I) 915 return true; 916 return !mayHaveNonDefUseDependency(*I) && 917 all_of(I->operands(), [I](Value *V) { 918 auto *IO = dyn_cast<Instruction>(V); 919 if (!IO) 920 return true; 921 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 922 }); 923 } 924 925 /// Checks if the provided value does not require scheduling. It does not 926 /// require scheduling if this is not an instruction or it is an instruction 927 /// that does not read/write memory and all users are phi nodes or instructions 928 /// from the different blocks. 929 static bool isUsedOutsideBlock(Value *V) { 930 auto *I = dyn_cast<Instruction>(V); 931 if (!I) 932 return true; 933 // Limits the number of uses to save compile time. 934 constexpr int UsesLimit = 8; 935 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 936 all_of(I->users(), [I](User *U) { 937 auto *IU = dyn_cast<Instruction>(U); 938 if (!IU) 939 return true; 940 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 941 }); 942 } 943 944 /// Checks if the specified value does not require scheduling. It does not 945 /// require scheduling if all operands and all users do not need to be scheduled 946 /// in the current basic block. 947 static bool doesNotNeedToBeScheduled(Value *V) { 948 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 949 } 950 951 /// Checks if the specified array of instructions does not require scheduling. 952 /// It is so if all either instructions have operands that do not require 953 /// scheduling or their users do not require scheduling since they are phis or 954 /// in other basic blocks. 955 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 956 return !VL.empty() && 957 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 958 } 959 960 namespace slpvectorizer { 961 962 /// Bottom Up SLP Vectorizer. 963 class BoUpSLP { 964 struct TreeEntry; 965 struct ScheduleData; 966 class ShuffleCostEstimator; 967 class ShuffleInstructionBuilder; 968 969 public: 970 using ValueList = SmallVector<Value *, 8>; 971 using InstrList = SmallVector<Instruction *, 16>; 972 using ValueSet = SmallPtrSet<Value *, 16>; 973 using StoreList = SmallVector<StoreInst *, 8>; 974 using ExtraValueToDebugLocsMap = 975 MapVector<Value *, SmallVector<Instruction *, 2>>; 976 using OrdersType = SmallVector<unsigned, 4>; 977 978 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 979 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 980 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 981 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 982 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 983 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 984 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 985 // Use the vector register size specified by the target unless overridden 986 // by a command-line option. 987 // TODO: It would be better to limit the vectorization factor based on 988 // data type rather than just register size. For example, x86 AVX has 989 // 256-bit registers, but it does not support integer operations 990 // at that width (that requires AVX2). 991 if (MaxVectorRegSizeOption.getNumOccurrences()) 992 MaxVecRegSize = MaxVectorRegSizeOption; 993 else 994 MaxVecRegSize = 995 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 996 .getFixedValue(); 997 998 if (MinVectorRegSizeOption.getNumOccurrences()) 999 MinVecRegSize = MinVectorRegSizeOption; 1000 else 1001 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 1002 } 1003 1004 /// Vectorize the tree that starts with the elements in \p VL. 1005 /// Returns the vectorized root. 1006 Value *vectorizeTree(); 1007 1008 /// Vectorize the tree but with the list of externally used values \p 1009 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 1010 /// generated extractvalue instructions. 1011 /// \param ReplacedExternals containd list of replaced external values 1012 /// {scalar, replace} after emitting extractelement for external uses. 1013 Value * 1014 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, 1015 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 1016 Instruction *ReductionRoot = nullptr); 1017 1018 /// \returns the cost incurred by unwanted spills and fills, caused by 1019 /// holding live values over call sites. 1020 InstructionCost getSpillCost() const; 1021 1022 /// \returns the vectorization cost of the subtree that starts at \p VL. 1023 /// A negative number means that this is profitable. 1024 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt); 1025 1026 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 1027 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 1028 void buildTree(ArrayRef<Value *> Roots, 1029 const SmallDenseSet<Value *> &UserIgnoreLst); 1030 1031 /// Construct a vectorizable tree that starts at \p Roots. 1032 void buildTree(ArrayRef<Value *> Roots); 1033 1034 /// Returns whether the root node has in-tree uses. 1035 bool doesRootHaveInTreeUses() const { 1036 return !VectorizableTree.empty() && 1037 !VectorizableTree.front()->UserTreeIndices.empty(); 1038 } 1039 1040 /// Return the scalars of the root node. 1041 ArrayRef<Value *> getRootNodeScalars() const { 1042 assert(!VectorizableTree.empty() && "No graph to get the first node from"); 1043 return VectorizableTree.front()->Scalars; 1044 } 1045 1046 /// Builds external uses of the vectorized scalars, i.e. the list of 1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 1048 /// ExternallyUsedValues contains additional list of external uses to handle 1049 /// vectorization of reductions. 1050 void 1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 1052 1053 /// Clear the internal data structures that are created by 'buildTree'. 1054 void deleteTree() { 1055 VectorizableTree.clear(); 1056 ScalarToTreeEntry.clear(); 1057 MultiNodeScalars.clear(); 1058 MustGather.clear(); 1059 EntryToLastInstruction.clear(); 1060 ExternalUses.clear(); 1061 for (auto &Iter : BlocksSchedules) { 1062 BlockScheduling *BS = Iter.second.get(); 1063 BS->clear(); 1064 } 1065 MinBWs.clear(); 1066 InstrElementSize.clear(); 1067 UserIgnoreList = nullptr; 1068 PostponedGathers.clear(); 1069 ValueToGatherNodes.clear(); 1070 } 1071 1072 unsigned getTreeSize() const { return VectorizableTree.size(); } 1073 1074 /// Perform LICM and CSE on the newly generated gather sequences. 1075 void optimizeGatherSequence(); 1076 1077 /// Checks if the specified gather tree entry \p TE can be represented as a 1078 /// shuffled vector entry + (possibly) permutation with other gathers. It 1079 /// implements the checks only for possibly ordered scalars (Loads, 1080 /// ExtractElement, ExtractValue), which can be part of the graph. 1081 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 1082 1083 /// Sort loads into increasing pointers offsets to allow greater clustering. 1084 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 1085 1086 /// Gets reordering data for the given tree entry. If the entry is vectorized 1087 /// - just return ReorderIndices, otherwise check if the scalars can be 1088 /// reordered and return the most optimal order. 1089 /// \return std::nullopt if ordering is not important, empty order, if 1090 /// identity order is important, or the actual order. 1091 /// \param TopToBottom If true, include the order of vectorized stores and 1092 /// insertelement nodes, otherwise skip them. 1093 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, 1094 bool TopToBottom); 1095 1096 /// Reorders the current graph to the most profitable order starting from the 1097 /// root node to the leaf nodes. The best order is chosen only from the nodes 1098 /// of the same size (vectorization factor). Smaller nodes are considered 1099 /// parts of subgraph with smaller VF and they are reordered independently. We 1100 /// can make it because we still need to extend smaller nodes to the wider VF 1101 /// and we can merge reordering shuffles with the widening shuffles. 1102 void reorderTopToBottom(); 1103 1104 /// Reorders the current graph to the most profitable order starting from 1105 /// leaves to the root. It allows to rotate small subgraphs and reduce the 1106 /// number of reshuffles if the leaf nodes use the same order. In this case we 1107 /// can merge the orders and just shuffle user node instead of shuffling its 1108 /// operands. Plus, even the leaf nodes have different orders, it allows to 1109 /// sink reordering in the graph closer to the root node and merge it later 1110 /// during analysis. 1111 void reorderBottomToTop(bool IgnoreReorder = false); 1112 1113 /// \return The vector element size in bits to use when vectorizing the 1114 /// expression tree ending at \p V. If V is a store, the size is the width of 1115 /// the stored value. Otherwise, the size is the width of the largest loaded 1116 /// value reaching V. This method is used by the vectorizer to calculate 1117 /// vectorization factors. 1118 unsigned getVectorElementSize(Value *V); 1119 1120 /// Compute the minimum type sizes required to represent the entries in a 1121 /// vectorizable tree. 1122 void computeMinimumValueSizes(); 1123 1124 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 1125 unsigned getMaxVecRegSize() const { 1126 return MaxVecRegSize; 1127 } 1128 1129 // \returns minimum vector register size as set by cl::opt. 1130 unsigned getMinVecRegSize() const { 1131 return MinVecRegSize; 1132 } 1133 1134 unsigned getMinVF(unsigned Sz) const { 1135 return std::max(2U, getMinVecRegSize() / Sz); 1136 } 1137 1138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 1139 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 1140 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 1141 return MaxVF ? MaxVF : UINT_MAX; 1142 } 1143 1144 /// Check if homogeneous aggregate is isomorphic to some VectorType. 1145 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 1146 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 1147 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 1148 /// 1149 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1150 unsigned canMapToVector(Type *T) const; 1151 1152 /// \returns True if the VectorizableTree is both tiny and not fully 1153 /// vectorizable. We do not vectorize such trees. 1154 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1155 1156 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1157 /// can be load combined in the backend. Load combining may not be allowed in 1158 /// the IR optimizer, so we do not want to alter the pattern. For example, 1159 /// partially transforming a scalar bswap() pattern into vector code is 1160 /// effectively impossible for the backend to undo. 1161 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1162 /// may not be necessary. 1163 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1164 1165 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1166 /// can be load combined in the backend. Load combining may not be allowed in 1167 /// the IR optimizer, so we do not want to alter the pattern. For example, 1168 /// partially transforming a scalar bswap() pattern into vector code is 1169 /// effectively impossible for the backend to undo. 1170 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1171 /// may not be necessary. 1172 bool isLoadCombineCandidate() const; 1173 1174 OptimizationRemarkEmitter *getORE() { return ORE; } 1175 1176 /// This structure holds any data we need about the edges being traversed 1177 /// during buildTree_rec(). We keep track of: 1178 /// (i) the user TreeEntry index, and 1179 /// (ii) the index of the edge. 1180 struct EdgeInfo { 1181 EdgeInfo() = default; 1182 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1183 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1184 /// The user TreeEntry. 1185 TreeEntry *UserTE = nullptr; 1186 /// The operand index of the use. 1187 unsigned EdgeIdx = UINT_MAX; 1188 #ifndef NDEBUG 1189 friend inline raw_ostream &operator<<(raw_ostream &OS, 1190 const BoUpSLP::EdgeInfo &EI) { 1191 EI.dump(OS); 1192 return OS; 1193 } 1194 /// Debug print. 1195 void dump(raw_ostream &OS) const { 1196 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1197 << " EdgeIdx:" << EdgeIdx << "}"; 1198 } 1199 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1200 #endif 1201 bool operator == (const EdgeInfo &Other) const { 1202 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx; 1203 } 1204 }; 1205 1206 /// A helper class used for scoring candidates for two consecutive lanes. 1207 class LookAheadHeuristics { 1208 const TargetLibraryInfo &TLI; 1209 const DataLayout &DL; 1210 ScalarEvolution &SE; 1211 const BoUpSLP &R; 1212 int NumLanes; // Total number of lanes (aka vectorization factor). 1213 int MaxLevel; // The maximum recursion depth for accumulating score. 1214 1215 public: 1216 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL, 1217 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes, 1218 int MaxLevel) 1219 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes), 1220 MaxLevel(MaxLevel) {} 1221 1222 // The hard-coded scores listed here are not very important, though it shall 1223 // be higher for better matches to improve the resulting cost. When 1224 // computing the scores of matching one sub-tree with another, we are 1225 // basically counting the number of values that are matching. So even if all 1226 // scores are set to 1, we would still get a decent matching result. 1227 // However, sometimes we have to break ties. For example we may have to 1228 // choose between matching loads vs matching opcodes. This is what these 1229 // scores are helping us with: they provide the order of preference. Also, 1230 // this is important if the scalar is externally used or used in another 1231 // tree entry node in the different lane. 1232 1233 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1234 static const int ScoreConsecutiveLoads = 4; 1235 /// The same load multiple times. This should have a better score than 1236 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1237 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1238 /// a vector load and 1.0 for a broadcast. 1239 static const int ScoreSplatLoads = 3; 1240 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1241 static const int ScoreReversedLoads = 3; 1242 /// A load candidate for masked gather. 1243 static const int ScoreMaskedGatherCandidate = 1; 1244 /// ExtractElementInst from same vector and consecutive indexes. 1245 static const int ScoreConsecutiveExtracts = 4; 1246 /// ExtractElementInst from same vector and reversed indices. 1247 static const int ScoreReversedExtracts = 3; 1248 /// Constants. 1249 static const int ScoreConstants = 2; 1250 /// Instructions with the same opcode. 1251 static const int ScoreSameOpcode = 2; 1252 /// Instructions with alt opcodes (e.g, add + sub). 1253 static const int ScoreAltOpcodes = 1; 1254 /// Identical instructions (a.k.a. splat or broadcast). 1255 static const int ScoreSplat = 1; 1256 /// Matching with an undef is preferable to failing. 1257 static const int ScoreUndef = 1; 1258 /// Score for failing to find a decent match. 1259 static const int ScoreFail = 0; 1260 /// Score if all users are vectorized. 1261 static const int ScoreAllUserVectorized = 1; 1262 1263 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1264 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1265 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1266 /// MainAltOps. 1267 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1268 ArrayRef<Value *> MainAltOps) const { 1269 if (!isValidElementType(V1->getType()) || 1270 !isValidElementType(V2->getType())) 1271 return LookAheadHeuristics::ScoreFail; 1272 1273 if (V1 == V2) { 1274 if (isa<LoadInst>(V1)) { 1275 // Retruns true if the users of V1 and V2 won't need to be extracted. 1276 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1277 // Bail out if we have too many uses to save compilation time. 1278 static constexpr unsigned Limit = 8; 1279 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1280 return false; 1281 1282 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1283 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1284 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1285 }); 1286 }; 1287 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1288 }; 1289 // A broadcast of a load can be cheaper on some targets. 1290 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1291 ElementCount::getFixed(NumLanes)) && 1292 ((int)V1->getNumUses() == NumLanes || 1293 AllUsersAreInternal(V1, V2))) 1294 return LookAheadHeuristics::ScoreSplatLoads; 1295 } 1296 return LookAheadHeuristics::ScoreSplat; 1297 } 1298 1299 auto *LI1 = dyn_cast<LoadInst>(V1); 1300 auto *LI2 = dyn_cast<LoadInst>(V2); 1301 if (LI1 && LI2) { 1302 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || 1303 !LI2->isSimple()) 1304 return LookAheadHeuristics::ScoreFail; 1305 1306 std::optional<int> Dist = getPointersDiff( 1307 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1308 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1309 if (!Dist || *Dist == 0) { 1310 if (getUnderlyingObject(LI1->getPointerOperand()) == 1311 getUnderlyingObject(LI2->getPointerOperand()) && 1312 R.TTI->isLegalMaskedGather( 1313 FixedVectorType::get(LI1->getType(), NumLanes), 1314 LI1->getAlign())) 1315 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1316 return LookAheadHeuristics::ScoreFail; 1317 } 1318 // The distance is too large - still may be profitable to use masked 1319 // loads/gathers. 1320 if (std::abs(*Dist) > NumLanes / 2) 1321 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1322 // This still will detect consecutive loads, but we might have "holes" 1323 // in some cases. It is ok for non-power-2 vectorization and may produce 1324 // better results. It should not affect current vectorization. 1325 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1326 : LookAheadHeuristics::ScoreReversedLoads; 1327 } 1328 1329 auto *C1 = dyn_cast<Constant>(V1); 1330 auto *C2 = dyn_cast<Constant>(V2); 1331 if (C1 && C2) 1332 return LookAheadHeuristics::ScoreConstants; 1333 1334 // Extracts from consecutive indexes of the same vector better score as 1335 // the extracts could be optimized away. 1336 Value *EV1; 1337 ConstantInt *Ex1Idx; 1338 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1339 // Undefs are always profitable for extractelements. 1340 // Compiler can easily combine poison and extractelement <non-poison> or 1341 // undef and extractelement <poison>. But combining undef + 1342 // extractelement <non-poison-but-may-produce-poison> requires some 1343 // extra operations. 1344 if (isa<UndefValue>(V2)) 1345 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all()) 1346 ? LookAheadHeuristics::ScoreConsecutiveExtracts 1347 : LookAheadHeuristics::ScoreSameOpcode; 1348 Value *EV2 = nullptr; 1349 ConstantInt *Ex2Idx = nullptr; 1350 if (match(V2, 1351 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1352 m_Undef())))) { 1353 // Undefs are always profitable for extractelements. 1354 if (!Ex2Idx) 1355 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1356 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType()) 1357 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1358 if (EV2 == EV1) { 1359 int Idx1 = Ex1Idx->getZExtValue(); 1360 int Idx2 = Ex2Idx->getZExtValue(); 1361 int Dist = Idx2 - Idx1; 1362 // The distance is too large - still may be profitable to use 1363 // shuffles. 1364 if (std::abs(Dist) == 0) 1365 return LookAheadHeuristics::ScoreSplat; 1366 if (std::abs(Dist) > NumLanes / 2) 1367 return LookAheadHeuristics::ScoreSameOpcode; 1368 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1369 : LookAheadHeuristics::ScoreReversedExtracts; 1370 } 1371 return LookAheadHeuristics::ScoreAltOpcodes; 1372 } 1373 return LookAheadHeuristics::ScoreFail; 1374 } 1375 1376 auto *I1 = dyn_cast<Instruction>(V1); 1377 auto *I2 = dyn_cast<Instruction>(V2); 1378 if (I1 && I2) { 1379 if (I1->getParent() != I2->getParent()) 1380 return LookAheadHeuristics::ScoreFail; 1381 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1382 Ops.push_back(I1); 1383 Ops.push_back(I2); 1384 InstructionsState S = getSameOpcode(Ops, TLI); 1385 // Note: Only consider instructions with <= 2 operands to avoid 1386 // complexity explosion. 1387 if (S.getOpcode() && 1388 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1389 !S.isAltShuffle()) && 1390 all_of(Ops, [&S](Value *V) { 1391 return cast<Instruction>(V)->getNumOperands() == 1392 S.MainOp->getNumOperands(); 1393 })) 1394 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1395 : LookAheadHeuristics::ScoreSameOpcode; 1396 } 1397 1398 if (isa<UndefValue>(V2)) 1399 return LookAheadHeuristics::ScoreUndef; 1400 1401 return LookAheadHeuristics::ScoreFail; 1402 } 1403 1404 /// Go through the operands of \p LHS and \p RHS recursively until 1405 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1406 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1407 /// of \p U1 and \p U2), except at the beginning of the recursion where 1408 /// these are set to nullptr. 1409 /// 1410 /// For example: 1411 /// \verbatim 1412 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1413 /// \ / \ / \ / \ / 1414 /// + + + + 1415 /// G1 G2 G3 G4 1416 /// \endverbatim 1417 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1418 /// each level recursively, accumulating the score. It starts from matching 1419 /// the additions at level 0, then moves on to the loads (level 1). The 1420 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1421 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1422 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1423 /// Please note that the order of the operands does not matter, as we 1424 /// evaluate the score of all profitable combinations of operands. In 1425 /// other words the score of G1 and G4 is the same as G1 and G2. This 1426 /// heuristic is based on ideas described in: 1427 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1428 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1429 /// Luís F. W. Góes 1430 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1431 Instruction *U2, int CurrLevel, 1432 ArrayRef<Value *> MainAltOps) const { 1433 1434 // Get the shallow score of V1 and V2. 1435 int ShallowScoreAtThisLevel = 1436 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1437 1438 // If reached MaxLevel, 1439 // or if V1 and V2 are not instructions, 1440 // or if they are SPLAT, 1441 // or if they are not consecutive, 1442 // or if profitable to vectorize loads or extractelements, early return 1443 // the current cost. 1444 auto *I1 = dyn_cast<Instruction>(LHS); 1445 auto *I2 = dyn_cast<Instruction>(RHS); 1446 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1447 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1448 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1449 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1450 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1451 ShallowScoreAtThisLevel)) 1452 return ShallowScoreAtThisLevel; 1453 assert(I1 && I2 && "Should have early exited."); 1454 1455 // Contains the I2 operand indexes that got matched with I1 operands. 1456 SmallSet<unsigned, 4> Op2Used; 1457 1458 // Recursion towards the operands of I1 and I2. We are trying all possible 1459 // operand pairs, and keeping track of the best score. 1460 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1461 OpIdx1 != NumOperands1; ++OpIdx1) { 1462 // Try to pair op1I with the best operand of I2. 1463 int MaxTmpScore = 0; 1464 unsigned MaxOpIdx2 = 0; 1465 bool FoundBest = false; 1466 // If I2 is commutative try all combinations. 1467 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1468 unsigned ToIdx = isCommutative(I2) 1469 ? I2->getNumOperands() 1470 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1471 assert(FromIdx <= ToIdx && "Bad index"); 1472 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1473 // Skip operands already paired with OpIdx1. 1474 if (Op2Used.count(OpIdx2)) 1475 continue; 1476 // Recursively calculate the cost at each level 1477 int TmpScore = 1478 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1479 I1, I2, CurrLevel + 1, std::nullopt); 1480 // Look for the best score. 1481 if (TmpScore > LookAheadHeuristics::ScoreFail && 1482 TmpScore > MaxTmpScore) { 1483 MaxTmpScore = TmpScore; 1484 MaxOpIdx2 = OpIdx2; 1485 FoundBest = true; 1486 } 1487 } 1488 if (FoundBest) { 1489 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1490 Op2Used.insert(MaxOpIdx2); 1491 ShallowScoreAtThisLevel += MaxTmpScore; 1492 } 1493 } 1494 return ShallowScoreAtThisLevel; 1495 } 1496 }; 1497 /// A helper data structure to hold the operands of a vector of instructions. 1498 /// This supports a fixed vector length for all operand vectors. 1499 class VLOperands { 1500 /// For each operand we need (i) the value, and (ii) the opcode that it 1501 /// would be attached to if the expression was in a left-linearized form. 1502 /// This is required to avoid illegal operand reordering. 1503 /// For example: 1504 /// \verbatim 1505 /// 0 Op1 1506 /// |/ 1507 /// Op1 Op2 Linearized + Op2 1508 /// \ / ----------> |/ 1509 /// - - 1510 /// 1511 /// Op1 - Op2 (0 + Op1) - Op2 1512 /// \endverbatim 1513 /// 1514 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1515 /// 1516 /// Another way to think of this is to track all the operations across the 1517 /// path from the operand all the way to the root of the tree and to 1518 /// calculate the operation that corresponds to this path. For example, the 1519 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1520 /// corresponding operation is a '-' (which matches the one in the 1521 /// linearized tree, as shown above). 1522 /// 1523 /// For lack of a better term, we refer to this operation as Accumulated 1524 /// Path Operation (APO). 1525 struct OperandData { 1526 OperandData() = default; 1527 OperandData(Value *V, bool APO, bool IsUsed) 1528 : V(V), APO(APO), IsUsed(IsUsed) {} 1529 /// The operand value. 1530 Value *V = nullptr; 1531 /// TreeEntries only allow a single opcode, or an alternate sequence of 1532 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1533 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1534 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1535 /// (e.g., Add/Mul) 1536 bool APO = false; 1537 /// Helper data for the reordering function. 1538 bool IsUsed = false; 1539 }; 1540 1541 /// During operand reordering, we are trying to select the operand at lane 1542 /// that matches best with the operand at the neighboring lane. Our 1543 /// selection is based on the type of value we are looking for. For example, 1544 /// if the neighboring lane has a load, we need to look for a load that is 1545 /// accessing a consecutive address. These strategies are summarized in the 1546 /// 'ReorderingMode' enumerator. 1547 enum class ReorderingMode { 1548 Load, ///< Matching loads to consecutive memory addresses 1549 Opcode, ///< Matching instructions based on opcode (same or alternate) 1550 Constant, ///< Matching constants 1551 Splat, ///< Matching the same instruction multiple times (broadcast) 1552 Failed, ///< We failed to create a vectorizable group 1553 }; 1554 1555 using OperandDataVec = SmallVector<OperandData, 2>; 1556 1557 /// A vector of operand vectors. 1558 SmallVector<OperandDataVec, 4> OpsVec; 1559 1560 const TargetLibraryInfo &TLI; 1561 const DataLayout &DL; 1562 ScalarEvolution &SE; 1563 const BoUpSLP &R; 1564 1565 /// \returns the operand data at \p OpIdx and \p Lane. 1566 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1567 return OpsVec[OpIdx][Lane]; 1568 } 1569 1570 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1571 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1572 return OpsVec[OpIdx][Lane]; 1573 } 1574 1575 /// Clears the used flag for all entries. 1576 void clearUsed() { 1577 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1578 OpIdx != NumOperands; ++OpIdx) 1579 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1580 ++Lane) 1581 OpsVec[OpIdx][Lane].IsUsed = false; 1582 } 1583 1584 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1585 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1586 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1587 } 1588 1589 /// \param Lane lane of the operands under analysis. 1590 /// \param OpIdx operand index in \p Lane lane we're looking the best 1591 /// candidate for. 1592 /// \param Idx operand index of the current candidate value. 1593 /// \returns The additional score due to possible broadcasting of the 1594 /// elements in the lane. It is more profitable to have power-of-2 unique 1595 /// elements in the lane, it will be vectorized with higher probability 1596 /// after removing duplicates. Currently the SLP vectorizer supports only 1597 /// vectorization of the power-of-2 number of unique scalars. 1598 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1599 Value *IdxLaneV = getData(Idx, Lane).V; 1600 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1601 return 0; 1602 SmallPtrSet<Value *, 4> Uniques; 1603 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1604 if (Ln == Lane) 1605 continue; 1606 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1607 if (!isa<Instruction>(OpIdxLnV)) 1608 return 0; 1609 Uniques.insert(OpIdxLnV); 1610 } 1611 int UniquesCount = Uniques.size(); 1612 int UniquesCntWithIdxLaneV = 1613 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1614 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1615 int UniquesCntWithOpIdxLaneV = 1616 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1617 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1618 return 0; 1619 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1620 UniquesCntWithOpIdxLaneV) - 1621 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1622 } 1623 1624 /// \param Lane lane of the operands under analysis. 1625 /// \param OpIdx operand index in \p Lane lane we're looking the best 1626 /// candidate for. 1627 /// \param Idx operand index of the current candidate value. 1628 /// \returns The additional score for the scalar which users are all 1629 /// vectorized. 1630 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1631 Value *IdxLaneV = getData(Idx, Lane).V; 1632 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1633 // Do not care about number of uses for vector-like instructions 1634 // (extractelement/extractvalue with constant indices), they are extracts 1635 // themselves and already externally used. Vectorization of such 1636 // instructions does not add extra extractelement instruction, just may 1637 // remove it. 1638 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1639 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1640 return LookAheadHeuristics::ScoreAllUserVectorized; 1641 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1642 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1643 return 0; 1644 return R.areAllUsersVectorized(IdxLaneI) 1645 ? LookAheadHeuristics::ScoreAllUserVectorized 1646 : 0; 1647 } 1648 1649 /// Score scaling factor for fully compatible instructions but with 1650 /// different number of external uses. Allows better selection of the 1651 /// instructions with less external uses. 1652 static const int ScoreScaleFactor = 10; 1653 1654 /// \Returns the look-ahead score, which tells us how much the sub-trees 1655 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1656 /// score. This helps break ties in an informed way when we cannot decide on 1657 /// the order of the operands by just considering the immediate 1658 /// predecessors. 1659 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1660 int Lane, unsigned OpIdx, unsigned Idx, 1661 bool &IsUsed) { 1662 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(), 1663 LookAheadMaxDepth); 1664 // Keep track of the instruction stack as we recurse into the operands 1665 // during the look-ahead score exploration. 1666 int Score = 1667 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1668 /*CurrLevel=*/1, MainAltOps); 1669 if (Score) { 1670 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1671 if (Score <= -SplatScore) { 1672 // Set the minimum score for splat-like sequence to avoid setting 1673 // failed state. 1674 Score = 1; 1675 } else { 1676 Score += SplatScore; 1677 // Scale score to see the difference between different operands 1678 // and similar operands but all vectorized/not all vectorized 1679 // uses. It does not affect actual selection of the best 1680 // compatible operand in general, just allows to select the 1681 // operand with all vectorized uses. 1682 Score *= ScoreScaleFactor; 1683 Score += getExternalUseScore(Lane, OpIdx, Idx); 1684 IsUsed = true; 1685 } 1686 } 1687 return Score; 1688 } 1689 1690 /// Best defined scores per lanes between the passes. Used to choose the 1691 /// best operand (with the highest score) between the passes. 1692 /// The key - {Operand Index, Lane}. 1693 /// The value - the best score between the passes for the lane and the 1694 /// operand. 1695 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1696 BestScoresPerLanes; 1697 1698 // Search all operands in Ops[*][Lane] for the one that matches best 1699 // Ops[OpIdx][LastLane] and return its opreand index. 1700 // If no good match can be found, return std::nullopt. 1701 std::optional<unsigned> 1702 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1703 ArrayRef<ReorderingMode> ReorderingModes, 1704 ArrayRef<Value *> MainAltOps) { 1705 unsigned NumOperands = getNumOperands(); 1706 1707 // The operand of the previous lane at OpIdx. 1708 Value *OpLastLane = getData(OpIdx, LastLane).V; 1709 1710 // Our strategy mode for OpIdx. 1711 ReorderingMode RMode = ReorderingModes[OpIdx]; 1712 if (RMode == ReorderingMode::Failed) 1713 return std::nullopt; 1714 1715 // The linearized opcode of the operand at OpIdx, Lane. 1716 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1717 1718 // The best operand index and its score. 1719 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1720 // are using the score to differentiate between the two. 1721 struct BestOpData { 1722 std::optional<unsigned> Idx; 1723 unsigned Score = 0; 1724 } BestOp; 1725 BestOp.Score = 1726 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1727 .first->second; 1728 1729 // Track if the operand must be marked as used. If the operand is set to 1730 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1731 // want to reestimate the operands again on the following iterations). 1732 bool IsUsed = 1733 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1734 // Iterate through all unused operands and look for the best. 1735 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1736 // Get the operand at Idx and Lane. 1737 OperandData &OpData = getData(Idx, Lane); 1738 Value *Op = OpData.V; 1739 bool OpAPO = OpData.APO; 1740 1741 // Skip already selected operands. 1742 if (OpData.IsUsed) 1743 continue; 1744 1745 // Skip if we are trying to move the operand to a position with a 1746 // different opcode in the linearized tree form. This would break the 1747 // semantics. 1748 if (OpAPO != OpIdxAPO) 1749 continue; 1750 1751 // Look for an operand that matches the current mode. 1752 switch (RMode) { 1753 case ReorderingMode::Load: 1754 case ReorderingMode::Constant: 1755 case ReorderingMode::Opcode: { 1756 bool LeftToRight = Lane > LastLane; 1757 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1758 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1759 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1760 OpIdx, Idx, IsUsed); 1761 if (Score > static_cast<int>(BestOp.Score)) { 1762 BestOp.Idx = Idx; 1763 BestOp.Score = Score; 1764 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1765 } 1766 break; 1767 } 1768 case ReorderingMode::Splat: 1769 if (Op == OpLastLane) 1770 BestOp.Idx = Idx; 1771 break; 1772 case ReorderingMode::Failed: 1773 llvm_unreachable("Not expected Failed reordering mode."); 1774 } 1775 } 1776 1777 if (BestOp.Idx) { 1778 getData(*BestOp.Idx, Lane).IsUsed = IsUsed; 1779 return BestOp.Idx; 1780 } 1781 // If we could not find a good match return std::nullopt. 1782 return std::nullopt; 1783 } 1784 1785 /// Helper for reorderOperandVecs. 1786 /// \returns the lane that we should start reordering from. This is the one 1787 /// which has the least number of operands that can freely move about or 1788 /// less profitable because it already has the most optimal set of operands. 1789 unsigned getBestLaneToStartReordering() const { 1790 unsigned Min = UINT_MAX; 1791 unsigned SameOpNumber = 0; 1792 // std::pair<unsigned, unsigned> is used to implement a simple voting 1793 // algorithm and choose the lane with the least number of operands that 1794 // can freely move about or less profitable because it already has the 1795 // most optimal set of operands. The first unsigned is a counter for 1796 // voting, the second unsigned is the counter of lanes with instructions 1797 // with same/alternate opcodes and same parent basic block. 1798 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1799 // Try to be closer to the original results, if we have multiple lanes 1800 // with same cost. If 2 lanes have the same cost, use the one with the 1801 // lowest index. 1802 for (int I = getNumLanes(); I > 0; --I) { 1803 unsigned Lane = I - 1; 1804 OperandsOrderData NumFreeOpsHash = 1805 getMaxNumOperandsThatCanBeReordered(Lane); 1806 // Compare the number of operands that can move and choose the one with 1807 // the least number. 1808 if (NumFreeOpsHash.NumOfAPOs < Min) { 1809 Min = NumFreeOpsHash.NumOfAPOs; 1810 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1811 HashMap.clear(); 1812 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1813 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1814 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1815 // Select the most optimal lane in terms of number of operands that 1816 // should be moved around. 1817 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1818 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1819 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1820 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1821 auto *It = HashMap.find(NumFreeOpsHash.Hash); 1822 if (It == HashMap.end()) 1823 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1824 else 1825 ++It->second.first; 1826 } 1827 } 1828 // Select the lane with the minimum counter. 1829 unsigned BestLane = 0; 1830 unsigned CntMin = UINT_MAX; 1831 for (const auto &Data : reverse(HashMap)) { 1832 if (Data.second.first < CntMin) { 1833 CntMin = Data.second.first; 1834 BestLane = Data.second.second; 1835 } 1836 } 1837 return BestLane; 1838 } 1839 1840 /// Data structure that helps to reorder operands. 1841 struct OperandsOrderData { 1842 /// The best number of operands with the same APOs, which can be 1843 /// reordered. 1844 unsigned NumOfAPOs = UINT_MAX; 1845 /// Number of operands with the same/alternate instruction opcode and 1846 /// parent. 1847 unsigned NumOpsWithSameOpcodeParent = 0; 1848 /// Hash for the actual operands ordering. 1849 /// Used to count operands, actually their position id and opcode 1850 /// value. It is used in the voting mechanism to find the lane with the 1851 /// least number of operands that can freely move about or less profitable 1852 /// because it already has the most optimal set of operands. Can be 1853 /// replaced with SmallVector<unsigned> instead but hash code is faster 1854 /// and requires less memory. 1855 unsigned Hash = 0; 1856 }; 1857 /// \returns the maximum number of operands that are allowed to be reordered 1858 /// for \p Lane and the number of compatible instructions(with the same 1859 /// parent/opcode). This is used as a heuristic for selecting the first lane 1860 /// to start operand reordering. 1861 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1862 unsigned CntTrue = 0; 1863 unsigned NumOperands = getNumOperands(); 1864 // Operands with the same APO can be reordered. We therefore need to count 1865 // how many of them we have for each APO, like this: Cnt[APO] = x. 1866 // Since we only have two APOs, namely true and false, we can avoid using 1867 // a map. Instead we can simply count the number of operands that 1868 // correspond to one of them (in this case the 'true' APO), and calculate 1869 // the other by subtracting it from the total number of operands. 1870 // Operands with the same instruction opcode and parent are more 1871 // profitable since we don't need to move them in many cases, with a high 1872 // probability such lane already can be vectorized effectively. 1873 bool AllUndefs = true; 1874 unsigned NumOpsWithSameOpcodeParent = 0; 1875 Instruction *OpcodeI = nullptr; 1876 BasicBlock *Parent = nullptr; 1877 unsigned Hash = 0; 1878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1879 const OperandData &OpData = getData(OpIdx, Lane); 1880 if (OpData.APO) 1881 ++CntTrue; 1882 // Use Boyer-Moore majority voting for finding the majority opcode and 1883 // the number of times it occurs. 1884 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1885 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() || 1886 I->getParent() != Parent) { 1887 if (NumOpsWithSameOpcodeParent == 0) { 1888 NumOpsWithSameOpcodeParent = 1; 1889 OpcodeI = I; 1890 Parent = I->getParent(); 1891 } else { 1892 --NumOpsWithSameOpcodeParent; 1893 } 1894 } else { 1895 ++NumOpsWithSameOpcodeParent; 1896 } 1897 } 1898 Hash = hash_combine( 1899 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1900 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1901 } 1902 if (AllUndefs) 1903 return {}; 1904 OperandsOrderData Data; 1905 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1906 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1907 Data.Hash = Hash; 1908 return Data; 1909 } 1910 1911 /// Go through the instructions in VL and append their operands. 1912 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1913 assert(!VL.empty() && "Bad VL"); 1914 assert((empty() || VL.size() == getNumLanes()) && 1915 "Expected same number of lanes"); 1916 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1917 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1918 OpsVec.resize(NumOperands); 1919 unsigned NumLanes = VL.size(); 1920 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1921 OpsVec[OpIdx].resize(NumLanes); 1922 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1923 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1924 // Our tree has just 3 nodes: the root and two operands. 1925 // It is therefore trivial to get the APO. We only need to check the 1926 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1927 // RHS operand. The LHS operand of both add and sub is never attached 1928 // to an inversese operation in the linearized form, therefore its APO 1929 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1930 1931 // Since operand reordering is performed on groups of commutative 1932 // operations or alternating sequences (e.g., +, -), we can safely 1933 // tell the inverse operations by checking commutativity. 1934 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1935 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1936 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1937 APO, false}; 1938 } 1939 } 1940 } 1941 1942 /// \returns the number of operands. 1943 unsigned getNumOperands() const { return OpsVec.size(); } 1944 1945 /// \returns the number of lanes. 1946 unsigned getNumLanes() const { return OpsVec[0].size(); } 1947 1948 /// \returns the operand value at \p OpIdx and \p Lane. 1949 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1950 return getData(OpIdx, Lane).V; 1951 } 1952 1953 /// \returns true if the data structure is empty. 1954 bool empty() const { return OpsVec.empty(); } 1955 1956 /// Clears the data. 1957 void clear() { OpsVec.clear(); } 1958 1959 /// \Returns true if there are enough operands identical to \p Op to fill 1960 /// the whole vector. 1961 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1962 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1963 bool OpAPO = getData(OpIdx, Lane).APO; 1964 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1965 if (Ln == Lane) 1966 continue; 1967 // This is set to true if we found a candidate for broadcast at Lane. 1968 bool FoundCandidate = false; 1969 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1970 OperandData &Data = getData(OpI, Ln); 1971 if (Data.APO != OpAPO || Data.IsUsed) 1972 continue; 1973 if (Data.V == Op) { 1974 FoundCandidate = true; 1975 Data.IsUsed = true; 1976 break; 1977 } 1978 } 1979 if (!FoundCandidate) 1980 return false; 1981 } 1982 return true; 1983 } 1984 1985 public: 1986 /// Initialize with all the operands of the instruction vector \p RootVL. 1987 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI, 1988 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) 1989 : TLI(TLI), DL(DL), SE(SE), R(R) { 1990 // Append all the operands of RootVL. 1991 appendOperandsOfVL(RootVL); 1992 } 1993 1994 /// \Returns a value vector with the operands across all lanes for the 1995 /// opearnd at \p OpIdx. 1996 ValueList getVL(unsigned OpIdx) const { 1997 ValueList OpVL(OpsVec[OpIdx].size()); 1998 assert(OpsVec[OpIdx].size() == getNumLanes() && 1999 "Expected same num of lanes across all operands"); 2000 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 2001 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 2002 return OpVL; 2003 } 2004 2005 // Performs operand reordering for 2 or more operands. 2006 // The original operands are in OrigOps[OpIdx][Lane]. 2007 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 2008 void reorder() { 2009 unsigned NumOperands = getNumOperands(); 2010 unsigned NumLanes = getNumLanes(); 2011 // Each operand has its own mode. We are using this mode to help us select 2012 // the instructions for each lane, so that they match best with the ones 2013 // we have selected so far. 2014 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 2015 2016 // This is a greedy single-pass algorithm. We are going over each lane 2017 // once and deciding on the best order right away with no back-tracking. 2018 // However, in order to increase its effectiveness, we start with the lane 2019 // that has operands that can move the least. For example, given the 2020 // following lanes: 2021 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 2022 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 2023 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 2024 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 2025 // we will start at Lane 1, since the operands of the subtraction cannot 2026 // be reordered. Then we will visit the rest of the lanes in a circular 2027 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 2028 2029 // Find the first lane that we will start our search from. 2030 unsigned FirstLane = getBestLaneToStartReordering(); 2031 2032 // Initialize the modes. 2033 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2034 Value *OpLane0 = getValue(OpIdx, FirstLane); 2035 // Keep track if we have instructions with all the same opcode on one 2036 // side. 2037 if (isa<LoadInst>(OpLane0)) 2038 ReorderingModes[OpIdx] = ReorderingMode::Load; 2039 else if (isa<Instruction>(OpLane0)) { 2040 // Check if OpLane0 should be broadcast. 2041 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 2042 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2043 else 2044 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 2045 } 2046 else if (isa<Constant>(OpLane0)) 2047 ReorderingModes[OpIdx] = ReorderingMode::Constant; 2048 else if (isa<Argument>(OpLane0)) 2049 // Our best hope is a Splat. It may save some cost in some cases. 2050 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2051 else 2052 // NOTE: This should be unreachable. 2053 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2054 } 2055 2056 // Check that we don't have same operands. No need to reorder if operands 2057 // are just perfect diamond or shuffled diamond match. Do not do it only 2058 // for possible broadcasts or non-power of 2 number of scalars (just for 2059 // now). 2060 auto &&SkipReordering = [this]() { 2061 SmallPtrSet<Value *, 4> UniqueValues; 2062 ArrayRef<OperandData> Op0 = OpsVec.front(); 2063 for (const OperandData &Data : Op0) 2064 UniqueValues.insert(Data.V); 2065 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 2066 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 2067 return !UniqueValues.contains(Data.V); 2068 })) 2069 return false; 2070 } 2071 // TODO: Check if we can remove a check for non-power-2 number of 2072 // scalars after full support of non-power-2 vectorization. 2073 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 2074 }; 2075 2076 // If the initial strategy fails for any of the operand indexes, then we 2077 // perform reordering again in a second pass. This helps avoid assigning 2078 // high priority to the failed strategy, and should improve reordering for 2079 // the non-failed operand indexes. 2080 for (int Pass = 0; Pass != 2; ++Pass) { 2081 // Check if no need to reorder operands since they're are perfect or 2082 // shuffled diamond match. 2083 // Need to do it to avoid extra external use cost counting for 2084 // shuffled matches, which may cause regressions. 2085 if (SkipReordering()) 2086 break; 2087 // Skip the second pass if the first pass did not fail. 2088 bool StrategyFailed = false; 2089 // Mark all operand data as free to use. 2090 clearUsed(); 2091 // We keep the original operand order for the FirstLane, so reorder the 2092 // rest of the lanes. We are visiting the nodes in a circular fashion, 2093 // using FirstLane as the center point and increasing the radius 2094 // distance. 2095 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 2096 for (unsigned I = 0; I < NumOperands; ++I) 2097 MainAltOps[I].push_back(getData(I, FirstLane).V); 2098 2099 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 2100 // Visit the lane on the right and then the lane on the left. 2101 for (int Direction : {+1, -1}) { 2102 int Lane = FirstLane + Direction * Distance; 2103 if (Lane < 0 || Lane >= (int)NumLanes) 2104 continue; 2105 int LastLane = Lane - Direction; 2106 assert(LastLane >= 0 && LastLane < (int)NumLanes && 2107 "Out of bounds"); 2108 // Look for a good match for each operand. 2109 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2110 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 2111 std::optional<unsigned> BestIdx = getBestOperand( 2112 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 2113 // By not selecting a value, we allow the operands that follow to 2114 // select a better matching value. We will get a non-null value in 2115 // the next run of getBestOperand(). 2116 if (BestIdx) { 2117 // Swap the current operand with the one returned by 2118 // getBestOperand(). 2119 swap(OpIdx, *BestIdx, Lane); 2120 } else { 2121 // We failed to find a best operand, set mode to 'Failed'. 2122 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2123 // Enable the second pass. 2124 StrategyFailed = true; 2125 } 2126 // Try to get the alternate opcode and follow it during analysis. 2127 if (MainAltOps[OpIdx].size() != 2) { 2128 OperandData &AltOp = getData(OpIdx, Lane); 2129 InstructionsState OpS = 2130 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI); 2131 if (OpS.getOpcode() && OpS.isAltShuffle()) 2132 MainAltOps[OpIdx].push_back(AltOp.V); 2133 } 2134 } 2135 } 2136 } 2137 // Skip second pass if the strategy did not fail. 2138 if (!StrategyFailed) 2139 break; 2140 } 2141 } 2142 2143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2144 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 2145 switch (RMode) { 2146 case ReorderingMode::Load: 2147 return "Load"; 2148 case ReorderingMode::Opcode: 2149 return "Opcode"; 2150 case ReorderingMode::Constant: 2151 return "Constant"; 2152 case ReorderingMode::Splat: 2153 return "Splat"; 2154 case ReorderingMode::Failed: 2155 return "Failed"; 2156 } 2157 llvm_unreachable("Unimplemented Reordering Type"); 2158 } 2159 2160 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 2161 raw_ostream &OS) { 2162 return OS << getModeStr(RMode); 2163 } 2164 2165 /// Debug print. 2166 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 2167 printMode(RMode, dbgs()); 2168 } 2169 2170 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 2171 return printMode(RMode, OS); 2172 } 2173 2174 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 2175 const unsigned Indent = 2; 2176 unsigned Cnt = 0; 2177 for (const OperandDataVec &OpDataVec : OpsVec) { 2178 OS << "Operand " << Cnt++ << "\n"; 2179 for (const OperandData &OpData : OpDataVec) { 2180 OS.indent(Indent) << "{"; 2181 if (Value *V = OpData.V) 2182 OS << *V; 2183 else 2184 OS << "null"; 2185 OS << ", APO:" << OpData.APO << "}\n"; 2186 } 2187 OS << "\n"; 2188 } 2189 return OS; 2190 } 2191 2192 /// Debug print. 2193 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2194 #endif 2195 }; 2196 2197 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2198 /// for a pair which have highest score deemed to have best chance to form 2199 /// root of profitable tree to vectorize. Return std::nullopt if no candidate 2200 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit 2201 /// of the cost, considered to be good enough score. 2202 std::optional<int> 2203 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2204 int Limit = LookAheadHeuristics::ScoreFail) { 2205 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2, 2206 RootLookAheadMaxDepth); 2207 int BestScore = Limit; 2208 std::optional<int> Index; 2209 for (int I : seq<int>(0, Candidates.size())) { 2210 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2211 Candidates[I].second, 2212 /*U1=*/nullptr, /*U2=*/nullptr, 2213 /*Level=*/1, std::nullopt); 2214 if (Score > BestScore) { 2215 BestScore = Score; 2216 Index = I; 2217 } 2218 } 2219 return Index; 2220 } 2221 2222 /// Checks if the instruction is marked for deletion. 2223 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2224 2225 /// Removes an instruction from its block and eventually deletes it. 2226 /// It's like Instruction::eraseFromParent() except that the actual deletion 2227 /// is delayed until BoUpSLP is destructed. 2228 void eraseInstruction(Instruction *I) { 2229 DeletedInstructions.insert(I); 2230 } 2231 2232 /// Checks if the instruction was already analyzed for being possible 2233 /// reduction root. 2234 bool isAnalyzedReductionRoot(Instruction *I) const { 2235 return AnalyzedReductionsRoots.count(I); 2236 } 2237 /// Register given instruction as already analyzed for being possible 2238 /// reduction root. 2239 void analyzedReductionRoot(Instruction *I) { 2240 AnalyzedReductionsRoots.insert(I); 2241 } 2242 /// Checks if the provided list of reduced values was checked already for 2243 /// vectorization. 2244 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const { 2245 return AnalyzedReductionVals.contains(hash_value(VL)); 2246 } 2247 /// Adds the list of reduced values to list of already checked values for the 2248 /// vectorization. 2249 void analyzedReductionVals(ArrayRef<Value *> VL) { 2250 AnalyzedReductionVals.insert(hash_value(VL)); 2251 } 2252 /// Clear the list of the analyzed reduction root instructions. 2253 void clearReductionData() { 2254 AnalyzedReductionsRoots.clear(); 2255 AnalyzedReductionVals.clear(); 2256 } 2257 /// Checks if the given value is gathered in one of the nodes. 2258 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2259 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2260 } 2261 2262 /// Check if the value is vectorized in the tree. 2263 bool isVectorized(Value *V) const { return getTreeEntry(V); } 2264 2265 ~BoUpSLP(); 2266 2267 private: 2268 /// Determine if a vectorized value \p V in can be demoted to 2269 /// a smaller type with a truncation. We collect the values that will be 2270 /// demoted in ToDemote and additional roots that require investigating in 2271 /// Roots. 2272 /// \param DemotedConsts list of Instruction/OperandIndex pairs that are 2273 /// constant and to be demoted. Required to correctly identify constant nodes 2274 /// to be demoted. 2275 bool collectValuesToDemote( 2276 Value *V, SmallVectorImpl<Value *> &ToDemote, 2277 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 2278 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const; 2279 2280 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2281 /// reordering (i.e. the operands can be reordered because they have only one 2282 /// user and reordarable). 2283 /// \param ReorderableGathers List of all gather nodes that require reordering 2284 /// (e.g., gather of extractlements or partially vectorizable loads). 2285 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2286 /// reordering, subset of \p NonVectorized. 2287 bool 2288 canReorderOperands(TreeEntry *UserTE, 2289 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2290 ArrayRef<TreeEntry *> ReorderableGathers, 2291 SmallVectorImpl<TreeEntry *> &GatherOps); 2292 2293 /// Checks if the given \p TE is a gather node with clustered reused scalars 2294 /// and reorders it per given \p Mask. 2295 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const; 2296 2297 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2298 /// if any. If it is not vectorized (gather node), returns nullptr. 2299 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2300 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2301 TreeEntry *TE = nullptr; 2302 const auto *It = find_if(VL, [&](Value *V) { 2303 TE = getTreeEntry(V); 2304 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) 2305 return true; 2306 auto It = MultiNodeScalars.find(V); 2307 if (It != MultiNodeScalars.end()) { 2308 for (TreeEntry *E : It->second) { 2309 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) { 2310 TE = E; 2311 return true; 2312 } 2313 } 2314 } 2315 return false; 2316 }); 2317 if (It != VL.end()) { 2318 assert(TE->isSame(VL) && "Expected same scalars."); 2319 return TE; 2320 } 2321 return nullptr; 2322 } 2323 2324 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2325 /// if any. If it is not vectorized (gather node), returns nullptr. 2326 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2327 unsigned OpIdx) const { 2328 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2329 const_cast<TreeEntry *>(UserTE), OpIdx); 2330 } 2331 2332 /// Checks if all users of \p I are the part of the vectorization tree. 2333 bool areAllUsersVectorized( 2334 Instruction *I, 2335 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const; 2336 2337 /// Return information about the vector formed for the specified index 2338 /// of a vector of (the same) instruction. 2339 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops); 2340 2341 /// \ returns the graph entry for the \p Idx operand of the \p E entry. 2342 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const; 2343 2344 /// \returns the cost of the vectorizable entry. 2345 InstructionCost getEntryCost(const TreeEntry *E, 2346 ArrayRef<Value *> VectorizedVals, 2347 SmallPtrSetImpl<Value *> &CheckedExtracts); 2348 2349 /// This is the recursive part of buildTree. 2350 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2351 const EdgeInfo &EI); 2352 2353 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2354 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2355 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2356 /// returns false, setting \p CurrentOrder to either an empty vector or a 2357 /// non-identity permutation that allows to reuse extract instructions. 2358 /// \param ResizeAllowed indicates whether it is allowed to handle subvector 2359 /// extract order. 2360 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2361 SmallVectorImpl<unsigned> &CurrentOrder, 2362 bool ResizeAllowed = false) const; 2363 2364 /// Vectorize a single entry in the tree. 2365 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2366 /// avoid issues with def-use order. 2367 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs); 2368 2369 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry 2370 /// \p E. 2371 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2372 /// avoid issues with def-use order. 2373 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs); 2374 2375 /// Create a new vector from a list of scalar values. Produces a sequence 2376 /// which exploits values reused across lanes, and arranges the inserts 2377 /// for ease of later optimization. 2378 template <typename BVTy, typename ResTy, typename... Args> 2379 ResTy processBuildVector(const TreeEntry *E, Args &...Params); 2380 2381 /// Create a new vector from a list of scalar values. Produces a sequence 2382 /// which exploits values reused across lanes, and arranges the inserts 2383 /// for ease of later optimization. 2384 Value *createBuildVector(const TreeEntry *E); 2385 2386 /// Returns the instruction in the bundle, which can be used as a base point 2387 /// for scheduling. Usually it is the last instruction in the bundle, except 2388 /// for the case when all operands are external (in this case, it is the first 2389 /// instruction in the list). 2390 Instruction &getLastInstructionInBundle(const TreeEntry *E); 2391 2392 /// Tries to find extractelement instructions with constant indices from fixed 2393 /// vector type and gather such instructions into a bunch, which highly likely 2394 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2395 /// was successful, the matched scalars are replaced by poison values in \p VL 2396 /// for future analysis. 2397 std::optional<TargetTransformInfo::ShuffleKind> 2398 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL, 2399 SmallVectorImpl<int> &Mask) const; 2400 2401 /// Tries to find extractelement instructions with constant indices from fixed 2402 /// vector type and gather such instructions into a bunch, which highly likely 2403 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2404 /// was successful, the matched scalars are replaced by poison values in \p VL 2405 /// for future analysis. 2406 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2407 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 2408 SmallVectorImpl<int> &Mask, 2409 unsigned NumParts) const; 2410 2411 /// Checks if the gathered \p VL can be represented as a single register 2412 /// shuffle(s) of previous tree entries. 2413 /// \param TE Tree entry checked for permutation. 2414 /// \param VL List of scalars (a subset of the TE scalar), checked for 2415 /// permutations. Must form single-register vector. 2416 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2417 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask. 2418 std::optional<TargetTransformInfo::ShuffleKind> 2419 isGatherShuffledSingleRegisterEntry( 2420 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 2421 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part); 2422 2423 /// Checks if the gathered \p VL can be represented as multi-register 2424 /// shuffle(s) of previous tree entries. 2425 /// \param TE Tree entry checked for permutation. 2426 /// \param VL List of scalars (a subset of the TE scalar), checked for 2427 /// permutations. 2428 /// \returns per-register series of ShuffleKind, if gathered values can be 2429 /// represented as shuffles of previous tree entries. \p Mask is filled with 2430 /// the shuffle mask (also on per-register base). 2431 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2432 isGatherShuffledEntry( 2433 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 2434 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 2435 unsigned NumParts); 2436 2437 /// \returns the scalarization cost for this list of values. Assuming that 2438 /// this subtree gets vectorized, we may need to extract the values from the 2439 /// roots. This method calculates the cost of extracting the values. 2440 /// \param ForPoisonSrc true if initial vector is poison, false otherwise. 2441 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc) const; 2442 2443 /// Set the Builder insert point to one after the last instruction in 2444 /// the bundle 2445 void setInsertPointAfterBundle(const TreeEntry *E); 2446 2447 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not 2448 /// specified, the starting vector value is poison. 2449 Value *gather(ArrayRef<Value *> VL, Value *Root); 2450 2451 /// \returns whether the VectorizableTree is fully vectorizable and will 2452 /// be beneficial even the tree height is tiny. 2453 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2454 2455 /// Reorder commutative or alt operands to get better probability of 2456 /// generating vectorized code. 2457 static void reorderInputsAccordingToOpcode( 2458 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 2459 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 2460 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R); 2461 2462 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2463 /// users of \p TE and collects the stores. It returns the map from the store 2464 /// pointers to the collected stores. 2465 DenseMap<Value *, SmallVector<StoreInst *>> 2466 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2467 2468 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2469 /// stores in \p StoresVec can form a vector instruction. If so it returns 2470 /// true and populates \p ReorderIndices with the shuffle indices of the 2471 /// stores when compared to the sorted vector. 2472 bool canFormVector(ArrayRef<StoreInst *> StoresVec, 2473 OrdersType &ReorderIndices) const; 2474 2475 /// Iterates through the users of \p TE, looking for scalar stores that can be 2476 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2477 /// their order and builds an order index vector for each store bundle. It 2478 /// returns all these order vectors found. 2479 /// We run this after the tree has formed, otherwise we may come across user 2480 /// instructions that are not yet in the tree. 2481 SmallVector<OrdersType, 1> 2482 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2483 2484 struct TreeEntry { 2485 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2486 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2487 2488 /// \returns Common mask for reorder indices and reused scalars. 2489 SmallVector<int> getCommonMask() const { 2490 SmallVector<int> Mask; 2491 inversePermutation(ReorderIndices, Mask); 2492 ::addMask(Mask, ReuseShuffleIndices); 2493 return Mask; 2494 } 2495 2496 /// \returns true if the scalars in VL are equal to this entry. 2497 bool isSame(ArrayRef<Value *> VL) const { 2498 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2499 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2500 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2501 return VL.size() == Mask.size() && 2502 std::equal(VL.begin(), VL.end(), Mask.begin(), 2503 [Scalars](Value *V, int Idx) { 2504 return (isa<UndefValue>(V) && 2505 Idx == PoisonMaskElem) || 2506 (Idx != PoisonMaskElem && V == Scalars[Idx]); 2507 }); 2508 }; 2509 if (!ReorderIndices.empty()) { 2510 // TODO: implement matching if the nodes are just reordered, still can 2511 // treat the vector as the same if the list of scalars matches VL 2512 // directly, without reordering. 2513 SmallVector<int> Mask; 2514 inversePermutation(ReorderIndices, Mask); 2515 if (VL.size() == Scalars.size()) 2516 return IsSame(Scalars, Mask); 2517 if (VL.size() == ReuseShuffleIndices.size()) { 2518 ::addMask(Mask, ReuseShuffleIndices); 2519 return IsSame(Scalars, Mask); 2520 } 2521 return false; 2522 } 2523 return IsSame(Scalars, ReuseShuffleIndices); 2524 } 2525 2526 bool isOperandGatherNode(const EdgeInfo &UserEI) const { 2527 return State == TreeEntry::NeedToGather && 2528 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx && 2529 UserTreeIndices.front().UserTE == UserEI.UserTE; 2530 } 2531 2532 /// \returns true if current entry has same operands as \p TE. 2533 bool hasEqualOperands(const TreeEntry &TE) const { 2534 if (TE.getNumOperands() != getNumOperands()) 2535 return false; 2536 SmallBitVector Used(getNumOperands()); 2537 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2538 unsigned PrevCount = Used.count(); 2539 for (unsigned K = 0; K < E; ++K) { 2540 if (Used.test(K)) 2541 continue; 2542 if (getOperand(K) == TE.getOperand(I)) { 2543 Used.set(K); 2544 break; 2545 } 2546 } 2547 // Check if we actually found the matching operand. 2548 if (PrevCount == Used.count()) 2549 return false; 2550 } 2551 return true; 2552 } 2553 2554 /// \return Final vectorization factor for the node. Defined by the total 2555 /// number of vectorized scalars, including those, used several times in the 2556 /// entry and counted in the \a ReuseShuffleIndices, if any. 2557 unsigned getVectorFactor() const { 2558 if (!ReuseShuffleIndices.empty()) 2559 return ReuseShuffleIndices.size(); 2560 return Scalars.size(); 2561 }; 2562 2563 /// A vector of scalars. 2564 ValueList Scalars; 2565 2566 /// The Scalars are vectorized into this value. It is initialized to Null. 2567 WeakTrackingVH VectorizedValue = nullptr; 2568 2569 /// New vector phi instructions emitted for the vectorized phi nodes. 2570 PHINode *PHI = nullptr; 2571 2572 /// Do we need to gather this sequence or vectorize it 2573 /// (either with vector instruction or with scatter/gather 2574 /// intrinsics for store/load)? 2575 enum EntryState { 2576 Vectorize, 2577 ScatterVectorize, 2578 PossibleStridedVectorize, 2579 NeedToGather 2580 }; 2581 EntryState State; 2582 2583 /// Does this sequence require some shuffling? 2584 SmallVector<int, 4> ReuseShuffleIndices; 2585 2586 /// Does this entry require reordering? 2587 SmallVector<unsigned, 4> ReorderIndices; 2588 2589 /// Points back to the VectorizableTree. 2590 /// 2591 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2592 /// to be a pointer and needs to be able to initialize the child iterator. 2593 /// Thus we need a reference back to the container to translate the indices 2594 /// to entries. 2595 VecTreeTy &Container; 2596 2597 /// The TreeEntry index containing the user of this entry. We can actually 2598 /// have multiple users so the data structure is not truly a tree. 2599 SmallVector<EdgeInfo, 1> UserTreeIndices; 2600 2601 /// The index of this treeEntry in VectorizableTree. 2602 int Idx = -1; 2603 2604 private: 2605 /// The operands of each instruction in each lane Operands[op_index][lane]. 2606 /// Note: This helps avoid the replication of the code that performs the 2607 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2608 SmallVector<ValueList, 2> Operands; 2609 2610 /// The main/alternate instruction. 2611 Instruction *MainOp = nullptr; 2612 Instruction *AltOp = nullptr; 2613 2614 public: 2615 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2616 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2617 if (Operands.size() < OpIdx + 1) 2618 Operands.resize(OpIdx + 1); 2619 assert(Operands[OpIdx].empty() && "Already resized?"); 2620 assert(OpVL.size() <= Scalars.size() && 2621 "Number of operands is greater than the number of scalars."); 2622 Operands[OpIdx].resize(OpVL.size()); 2623 copy(OpVL, Operands[OpIdx].begin()); 2624 } 2625 2626 /// Set the operands of this bundle in their original order. 2627 void setOperandsInOrder() { 2628 assert(Operands.empty() && "Already initialized?"); 2629 auto *I0 = cast<Instruction>(Scalars[0]); 2630 Operands.resize(I0->getNumOperands()); 2631 unsigned NumLanes = Scalars.size(); 2632 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2633 OpIdx != NumOperands; ++OpIdx) { 2634 Operands[OpIdx].resize(NumLanes); 2635 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2636 auto *I = cast<Instruction>(Scalars[Lane]); 2637 assert(I->getNumOperands() == NumOperands && 2638 "Expected same number of operands"); 2639 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2640 } 2641 } 2642 } 2643 2644 /// Reorders operands of the node to the given mask \p Mask. 2645 void reorderOperands(ArrayRef<int> Mask) { 2646 for (ValueList &Operand : Operands) 2647 reorderScalars(Operand, Mask); 2648 } 2649 2650 /// \returns the \p OpIdx operand of this TreeEntry. 2651 ValueList &getOperand(unsigned OpIdx) { 2652 assert(OpIdx < Operands.size() && "Off bounds"); 2653 return Operands[OpIdx]; 2654 } 2655 2656 /// \returns the \p OpIdx operand of this TreeEntry. 2657 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2658 assert(OpIdx < Operands.size() && "Off bounds"); 2659 return Operands[OpIdx]; 2660 } 2661 2662 /// \returns the number of operands. 2663 unsigned getNumOperands() const { return Operands.size(); } 2664 2665 /// \return the single \p OpIdx operand. 2666 Value *getSingleOperand(unsigned OpIdx) const { 2667 assert(OpIdx < Operands.size() && "Off bounds"); 2668 assert(!Operands[OpIdx].empty() && "No operand available"); 2669 return Operands[OpIdx][0]; 2670 } 2671 2672 /// Some of the instructions in the list have alternate opcodes. 2673 bool isAltShuffle() const { return MainOp != AltOp; } 2674 2675 bool isOpcodeOrAlt(Instruction *I) const { 2676 unsigned CheckedOpcode = I->getOpcode(); 2677 return (getOpcode() == CheckedOpcode || 2678 getAltOpcode() == CheckedOpcode); 2679 } 2680 2681 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2682 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2683 /// \p OpValue. 2684 Value *isOneOf(Value *Op) const { 2685 auto *I = dyn_cast<Instruction>(Op); 2686 if (I && isOpcodeOrAlt(I)) 2687 return Op; 2688 return MainOp; 2689 } 2690 2691 void setOperations(const InstructionsState &S) { 2692 MainOp = S.MainOp; 2693 AltOp = S.AltOp; 2694 } 2695 2696 Instruction *getMainOp() const { 2697 return MainOp; 2698 } 2699 2700 Instruction *getAltOp() const { 2701 return AltOp; 2702 } 2703 2704 /// The main/alternate opcodes for the list of instructions. 2705 unsigned getOpcode() const { 2706 return MainOp ? MainOp->getOpcode() : 0; 2707 } 2708 2709 unsigned getAltOpcode() const { 2710 return AltOp ? AltOp->getOpcode() : 0; 2711 } 2712 2713 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2714 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2715 int findLaneForValue(Value *V) const { 2716 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2717 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2718 if (!ReorderIndices.empty()) 2719 FoundLane = ReorderIndices[FoundLane]; 2720 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2721 if (!ReuseShuffleIndices.empty()) { 2722 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2723 find(ReuseShuffleIndices, FoundLane)); 2724 } 2725 return FoundLane; 2726 } 2727 2728 /// Build a shuffle mask for graph entry which represents a merge of main 2729 /// and alternate operations. 2730 void 2731 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp, 2732 SmallVectorImpl<int> &Mask, 2733 SmallVectorImpl<Value *> *OpScalars = nullptr, 2734 SmallVectorImpl<Value *> *AltScalars = nullptr) const; 2735 2736 #ifndef NDEBUG 2737 /// Debug printer. 2738 LLVM_DUMP_METHOD void dump() const { 2739 dbgs() << Idx << ".\n"; 2740 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2741 dbgs() << "Operand " << OpI << ":\n"; 2742 for (const Value *V : Operands[OpI]) 2743 dbgs().indent(2) << *V << "\n"; 2744 } 2745 dbgs() << "Scalars: \n"; 2746 for (Value *V : Scalars) 2747 dbgs().indent(2) << *V << "\n"; 2748 dbgs() << "State: "; 2749 switch (State) { 2750 case Vectorize: 2751 dbgs() << "Vectorize\n"; 2752 break; 2753 case ScatterVectorize: 2754 dbgs() << "ScatterVectorize\n"; 2755 break; 2756 case PossibleStridedVectorize: 2757 dbgs() << "PossibleStridedVectorize\n"; 2758 break; 2759 case NeedToGather: 2760 dbgs() << "NeedToGather\n"; 2761 break; 2762 } 2763 dbgs() << "MainOp: "; 2764 if (MainOp) 2765 dbgs() << *MainOp << "\n"; 2766 else 2767 dbgs() << "NULL\n"; 2768 dbgs() << "AltOp: "; 2769 if (AltOp) 2770 dbgs() << *AltOp << "\n"; 2771 else 2772 dbgs() << "NULL\n"; 2773 dbgs() << "VectorizedValue: "; 2774 if (VectorizedValue) 2775 dbgs() << *VectorizedValue << "\n"; 2776 else 2777 dbgs() << "NULL\n"; 2778 dbgs() << "ReuseShuffleIndices: "; 2779 if (ReuseShuffleIndices.empty()) 2780 dbgs() << "Empty"; 2781 else 2782 for (int ReuseIdx : ReuseShuffleIndices) 2783 dbgs() << ReuseIdx << ", "; 2784 dbgs() << "\n"; 2785 dbgs() << "ReorderIndices: "; 2786 for (unsigned ReorderIdx : ReorderIndices) 2787 dbgs() << ReorderIdx << ", "; 2788 dbgs() << "\n"; 2789 dbgs() << "UserTreeIndices: "; 2790 for (const auto &EInfo : UserTreeIndices) 2791 dbgs() << EInfo << ", "; 2792 dbgs() << "\n"; 2793 } 2794 #endif 2795 }; 2796 2797 #ifndef NDEBUG 2798 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2799 InstructionCost VecCost, InstructionCost ScalarCost, 2800 StringRef Banner) const { 2801 dbgs() << "SLP: " << Banner << ":\n"; 2802 E->dump(); 2803 dbgs() << "SLP: Costs:\n"; 2804 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2805 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2806 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2807 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " 2808 << ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2809 } 2810 #endif 2811 2812 /// Create a new VectorizableTree entry. 2813 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2814 std::optional<ScheduleData *> Bundle, 2815 const InstructionsState &S, 2816 const EdgeInfo &UserTreeIdx, 2817 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2818 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2819 TreeEntry::EntryState EntryState = 2820 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2821 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2822 ReuseShuffleIndices, ReorderIndices); 2823 } 2824 2825 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2826 TreeEntry::EntryState EntryState, 2827 std::optional<ScheduleData *> Bundle, 2828 const InstructionsState &S, 2829 const EdgeInfo &UserTreeIdx, 2830 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2831 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2832 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2833 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2834 "Need to vectorize gather entry?"); 2835 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2836 TreeEntry *Last = VectorizableTree.back().get(); 2837 Last->Idx = VectorizableTree.size() - 1; 2838 Last->State = EntryState; 2839 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2840 ReuseShuffleIndices.end()); 2841 if (ReorderIndices.empty()) { 2842 Last->Scalars.assign(VL.begin(), VL.end()); 2843 Last->setOperations(S); 2844 } else { 2845 // Reorder scalars and build final mask. 2846 Last->Scalars.assign(VL.size(), nullptr); 2847 transform(ReorderIndices, Last->Scalars.begin(), 2848 [VL](unsigned Idx) -> Value * { 2849 if (Idx >= VL.size()) 2850 return UndefValue::get(VL.front()->getType()); 2851 return VL[Idx]; 2852 }); 2853 InstructionsState S = getSameOpcode(Last->Scalars, *TLI); 2854 Last->setOperations(S); 2855 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2856 } 2857 if (Last->State != TreeEntry::NeedToGather) { 2858 for (Value *V : VL) { 2859 const TreeEntry *TE = getTreeEntry(V); 2860 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) && 2861 "Scalar already in tree!"); 2862 if (TE) { 2863 if (TE != Last) 2864 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last); 2865 continue; 2866 } 2867 ScalarToTreeEntry[V] = Last; 2868 } 2869 // Update the scheduler bundle to point to this TreeEntry. 2870 ScheduleData *BundleMember = *Bundle; 2871 assert((BundleMember || isa<PHINode>(S.MainOp) || 2872 isVectorLikeInstWithConstOps(S.MainOp) || 2873 doesNotNeedToSchedule(VL)) && 2874 "Bundle and VL out of sync"); 2875 if (BundleMember) { 2876 for (Value *V : VL) { 2877 if (doesNotNeedToBeScheduled(V)) 2878 continue; 2879 if (!BundleMember) 2880 continue; 2881 BundleMember->TE = Last; 2882 BundleMember = BundleMember->NextInBundle; 2883 } 2884 } 2885 assert(!BundleMember && "Bundle and VL out of sync"); 2886 } else { 2887 MustGather.insert(VL.begin(), VL.end()); 2888 // Build a map for gathered scalars to the nodes where they are used. 2889 for (Value *V : VL) 2890 if (!isConstant(V)) 2891 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last); 2892 } 2893 2894 if (UserTreeIdx.UserTE) 2895 Last->UserTreeIndices.push_back(UserTreeIdx); 2896 2897 return Last; 2898 } 2899 2900 /// -- Vectorization State -- 2901 /// Holds all of the tree entries. 2902 TreeEntry::VecTreeTy VectorizableTree; 2903 2904 #ifndef NDEBUG 2905 /// Debug printer. 2906 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2907 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2908 VectorizableTree[Id]->dump(); 2909 dbgs() << "\n"; 2910 } 2911 } 2912 #endif 2913 2914 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2915 2916 const TreeEntry *getTreeEntry(Value *V) const { 2917 return ScalarToTreeEntry.lookup(V); 2918 } 2919 2920 /// Checks if the specified list of the instructions/values can be vectorized 2921 /// and fills required data before actual scheduling of the instructions. 2922 TreeEntry::EntryState getScalarsVectorizationState( 2923 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 2924 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const; 2925 2926 /// Maps a specific scalar to its tree entry. 2927 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry; 2928 2929 /// List of scalars, used in several vectorize nodes, and the list of the 2930 /// nodes. 2931 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars; 2932 2933 /// Maps a value to the proposed vectorizable size. 2934 SmallDenseMap<Value *, unsigned> InstrElementSize; 2935 2936 /// A list of scalars that we found that we need to keep as scalars. 2937 ValueSet MustGather; 2938 2939 /// A map between the vectorized entries and the last instructions in the 2940 /// bundles. The bundles are built in use order, not in the def order of the 2941 /// instructions. So, we cannot rely directly on the last instruction in the 2942 /// bundle being the last instruction in the program order during 2943 /// vectorization process since the basic blocks are affected, need to 2944 /// pre-gather them before. 2945 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction; 2946 2947 /// List of gather nodes, depending on other gather/vector nodes, which should 2948 /// be emitted after the vector instruction emission process to correctly 2949 /// handle order of the vector instructions and shuffles. 2950 SetVector<const TreeEntry *> PostponedGathers; 2951 2952 using ValueToGatherNodesMap = 2953 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>; 2954 ValueToGatherNodesMap ValueToGatherNodes; 2955 2956 /// This POD struct describes one external user in the vectorized tree. 2957 struct ExternalUser { 2958 ExternalUser(Value *S, llvm::User *U, int L) 2959 : Scalar(S), User(U), Lane(L) {} 2960 2961 // Which scalar in our function. 2962 Value *Scalar; 2963 2964 // Which user that uses the scalar. 2965 llvm::User *User; 2966 2967 // Which lane does the scalar belong to. 2968 int Lane; 2969 }; 2970 using UserList = SmallVector<ExternalUser, 16>; 2971 2972 /// Checks if two instructions may access the same memory. 2973 /// 2974 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2975 /// is invariant in the calling loop. 2976 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2977 Instruction *Inst2) { 2978 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2)) 2979 return true; 2980 // First check if the result is already in the cache. 2981 AliasCacheKey Key = std::make_pair(Inst1, Inst2); 2982 auto It = AliasCache.find(Key); 2983 if (It != AliasCache.end()) 2984 return It->second; 2985 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2986 // Store the result in the cache. 2987 AliasCache.try_emplace(Key, Aliased); 2988 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased); 2989 return Aliased; 2990 } 2991 2992 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2993 2994 /// Cache for alias results. 2995 /// TODO: consider moving this to the AliasAnalysis itself. 2996 DenseMap<AliasCacheKey, bool> AliasCache; 2997 2998 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2999 // globally through SLP because we don't perform any action which 3000 // invalidates capture results. 3001 BatchAAResults BatchAA; 3002 3003 /// Temporary store for deleted instructions. Instructions will be deleted 3004 /// eventually when the BoUpSLP is destructed. The deferral is required to 3005 /// ensure that there are no incorrect collisions in the AliasCache, which 3006 /// can happen if a new instruction is allocated at the same address as a 3007 /// previously deleted instruction. 3008 DenseSet<Instruction *> DeletedInstructions; 3009 3010 /// Set of the instruction, being analyzed already for reductions. 3011 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 3012 3013 /// Set of hashes for the list of reduction values already being analyzed. 3014 DenseSet<size_t> AnalyzedReductionVals; 3015 3016 /// A list of values that need to extracted out of the tree. 3017 /// This list holds pairs of (Internal Scalar : External User). External User 3018 /// can be nullptr, it means that this Internal Scalar will be used later, 3019 /// after vectorization. 3020 UserList ExternalUses; 3021 3022 /// Values used only by @llvm.assume calls. 3023 SmallPtrSet<const Value *, 32> EphValues; 3024 3025 /// Holds all of the instructions that we gathered, shuffle instructions and 3026 /// extractelements. 3027 SetVector<Instruction *> GatherShuffleExtractSeq; 3028 3029 /// A list of blocks that we are going to CSE. 3030 DenseSet<BasicBlock *> CSEBlocks; 3031 3032 /// Contains all scheduling relevant data for an instruction. 3033 /// A ScheduleData either represents a single instruction or a member of an 3034 /// instruction bundle (= a group of instructions which is combined into a 3035 /// vector instruction). 3036 struct ScheduleData { 3037 // The initial value for the dependency counters. It means that the 3038 // dependencies are not calculated yet. 3039 enum { InvalidDeps = -1 }; 3040 3041 ScheduleData() = default; 3042 3043 void init(int BlockSchedulingRegionID, Value *OpVal) { 3044 FirstInBundle = this; 3045 NextInBundle = nullptr; 3046 NextLoadStore = nullptr; 3047 IsScheduled = false; 3048 SchedulingRegionID = BlockSchedulingRegionID; 3049 clearDependencies(); 3050 OpValue = OpVal; 3051 TE = nullptr; 3052 } 3053 3054 /// Verify basic self consistency properties 3055 void verify() { 3056 if (hasValidDependencies()) { 3057 assert(UnscheduledDeps <= Dependencies && "invariant"); 3058 } else { 3059 assert(UnscheduledDeps == Dependencies && "invariant"); 3060 } 3061 3062 if (IsScheduled) { 3063 assert(isSchedulingEntity() && 3064 "unexpected scheduled state"); 3065 for (const ScheduleData *BundleMember = this; BundleMember; 3066 BundleMember = BundleMember->NextInBundle) { 3067 assert(BundleMember->hasValidDependencies() && 3068 BundleMember->UnscheduledDeps == 0 && 3069 "unexpected scheduled state"); 3070 assert((BundleMember == this || !BundleMember->IsScheduled) && 3071 "only bundle is marked scheduled"); 3072 } 3073 } 3074 3075 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 3076 "all bundle members must be in same basic block"); 3077 } 3078 3079 /// Returns true if the dependency information has been calculated. 3080 /// Note that depenendency validity can vary between instructions within 3081 /// a single bundle. 3082 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 3083 3084 /// Returns true for single instructions and for bundle representatives 3085 /// (= the head of a bundle). 3086 bool isSchedulingEntity() const { return FirstInBundle == this; } 3087 3088 /// Returns true if it represents an instruction bundle and not only a 3089 /// single instruction. 3090 bool isPartOfBundle() const { 3091 return NextInBundle != nullptr || FirstInBundle != this || TE; 3092 } 3093 3094 /// Returns true if it is ready for scheduling, i.e. it has no more 3095 /// unscheduled depending instructions/bundles. 3096 bool isReady() const { 3097 assert(isSchedulingEntity() && 3098 "can't consider non-scheduling entity for ready list"); 3099 return unscheduledDepsInBundle() == 0 && !IsScheduled; 3100 } 3101 3102 /// Modifies the number of unscheduled dependencies for this instruction, 3103 /// and returns the number of remaining dependencies for the containing 3104 /// bundle. 3105 int incrementUnscheduledDeps(int Incr) { 3106 assert(hasValidDependencies() && 3107 "increment of unscheduled deps would be meaningless"); 3108 UnscheduledDeps += Incr; 3109 return FirstInBundle->unscheduledDepsInBundle(); 3110 } 3111 3112 /// Sets the number of unscheduled dependencies to the number of 3113 /// dependencies. 3114 void resetUnscheduledDeps() { 3115 UnscheduledDeps = Dependencies; 3116 } 3117 3118 /// Clears all dependency information. 3119 void clearDependencies() { 3120 Dependencies = InvalidDeps; 3121 resetUnscheduledDeps(); 3122 MemoryDependencies.clear(); 3123 ControlDependencies.clear(); 3124 } 3125 3126 int unscheduledDepsInBundle() const { 3127 assert(isSchedulingEntity() && "only meaningful on the bundle"); 3128 int Sum = 0; 3129 for (const ScheduleData *BundleMember = this; BundleMember; 3130 BundleMember = BundleMember->NextInBundle) { 3131 if (BundleMember->UnscheduledDeps == InvalidDeps) 3132 return InvalidDeps; 3133 Sum += BundleMember->UnscheduledDeps; 3134 } 3135 return Sum; 3136 } 3137 3138 void dump(raw_ostream &os) const { 3139 if (!isSchedulingEntity()) { 3140 os << "/ " << *Inst; 3141 } else if (NextInBundle) { 3142 os << '[' << *Inst; 3143 ScheduleData *SD = NextInBundle; 3144 while (SD) { 3145 os << ';' << *SD->Inst; 3146 SD = SD->NextInBundle; 3147 } 3148 os << ']'; 3149 } else { 3150 os << *Inst; 3151 } 3152 } 3153 3154 Instruction *Inst = nullptr; 3155 3156 /// Opcode of the current instruction in the schedule data. 3157 Value *OpValue = nullptr; 3158 3159 /// The TreeEntry that this instruction corresponds to. 3160 TreeEntry *TE = nullptr; 3161 3162 /// Points to the head in an instruction bundle (and always to this for 3163 /// single instructions). 3164 ScheduleData *FirstInBundle = nullptr; 3165 3166 /// Single linked list of all instructions in a bundle. Null if it is a 3167 /// single instruction. 3168 ScheduleData *NextInBundle = nullptr; 3169 3170 /// Single linked list of all memory instructions (e.g. load, store, call) 3171 /// in the block - until the end of the scheduling region. 3172 ScheduleData *NextLoadStore = nullptr; 3173 3174 /// The dependent memory instructions. 3175 /// This list is derived on demand in calculateDependencies(). 3176 SmallVector<ScheduleData *, 4> MemoryDependencies; 3177 3178 /// List of instructions which this instruction could be control dependent 3179 /// on. Allowing such nodes to be scheduled below this one could introduce 3180 /// a runtime fault which didn't exist in the original program. 3181 /// ex: this is a load or udiv following a readonly call which inf loops 3182 SmallVector<ScheduleData *, 4> ControlDependencies; 3183 3184 /// This ScheduleData is in the current scheduling region if this matches 3185 /// the current SchedulingRegionID of BlockScheduling. 3186 int SchedulingRegionID = 0; 3187 3188 /// Used for getting a "good" final ordering of instructions. 3189 int SchedulingPriority = 0; 3190 3191 /// The number of dependencies. Constitutes of the number of users of the 3192 /// instruction plus the number of dependent memory instructions (if any). 3193 /// This value is calculated on demand. 3194 /// If InvalidDeps, the number of dependencies is not calculated yet. 3195 int Dependencies = InvalidDeps; 3196 3197 /// The number of dependencies minus the number of dependencies of scheduled 3198 /// instructions. As soon as this is zero, the instruction/bundle gets ready 3199 /// for scheduling. 3200 /// Note that this is negative as long as Dependencies is not calculated. 3201 int UnscheduledDeps = InvalidDeps; 3202 3203 /// True if this instruction is scheduled (or considered as scheduled in the 3204 /// dry-run). 3205 bool IsScheduled = false; 3206 }; 3207 3208 #ifndef NDEBUG 3209 friend inline raw_ostream &operator<<(raw_ostream &os, 3210 const BoUpSLP::ScheduleData &SD) { 3211 SD.dump(os); 3212 return os; 3213 } 3214 #endif 3215 3216 friend struct GraphTraits<BoUpSLP *>; 3217 friend struct DOTGraphTraits<BoUpSLP *>; 3218 3219 /// Contains all scheduling data for a basic block. 3220 /// It does not schedules instructions, which are not memory read/write 3221 /// instructions and their operands are either constants, or arguments, or 3222 /// phis, or instructions from others blocks, or their users are phis or from 3223 /// the other blocks. The resulting vector instructions can be placed at the 3224 /// beginning of the basic block without scheduling (if operands does not need 3225 /// to be scheduled) or at the end of the block (if users are outside of the 3226 /// block). It allows to save some compile time and memory used by the 3227 /// compiler. 3228 /// ScheduleData is assigned for each instruction in between the boundaries of 3229 /// the tree entry, even for those, which are not part of the graph. It is 3230 /// required to correctly follow the dependencies between the instructions and 3231 /// their correct scheduling. The ScheduleData is not allocated for the 3232 /// instructions, which do not require scheduling, like phis, nodes with 3233 /// extractelements/insertelements only or nodes with instructions, with 3234 /// uses/operands outside of the block. 3235 struct BlockScheduling { 3236 BlockScheduling(BasicBlock *BB) 3237 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 3238 3239 void clear() { 3240 ReadyInsts.clear(); 3241 ScheduleStart = nullptr; 3242 ScheduleEnd = nullptr; 3243 FirstLoadStoreInRegion = nullptr; 3244 LastLoadStoreInRegion = nullptr; 3245 RegionHasStackSave = false; 3246 3247 // Reduce the maximum schedule region size by the size of the 3248 // previous scheduling run. 3249 ScheduleRegionSizeLimit -= ScheduleRegionSize; 3250 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 3251 ScheduleRegionSizeLimit = MinScheduleRegionSize; 3252 ScheduleRegionSize = 0; 3253 3254 // Make a new scheduling region, i.e. all existing ScheduleData is not 3255 // in the new region yet. 3256 ++SchedulingRegionID; 3257 } 3258 3259 ScheduleData *getScheduleData(Instruction *I) { 3260 if (BB != I->getParent()) 3261 // Avoid lookup if can't possibly be in map. 3262 return nullptr; 3263 ScheduleData *SD = ScheduleDataMap.lookup(I); 3264 if (SD && isInSchedulingRegion(SD)) 3265 return SD; 3266 return nullptr; 3267 } 3268 3269 ScheduleData *getScheduleData(Value *V) { 3270 if (auto *I = dyn_cast<Instruction>(V)) 3271 return getScheduleData(I); 3272 return nullptr; 3273 } 3274 3275 ScheduleData *getScheduleData(Value *V, Value *Key) { 3276 if (V == Key) 3277 return getScheduleData(V); 3278 auto I = ExtraScheduleDataMap.find(V); 3279 if (I != ExtraScheduleDataMap.end()) { 3280 ScheduleData *SD = I->second.lookup(Key); 3281 if (SD && isInSchedulingRegion(SD)) 3282 return SD; 3283 } 3284 return nullptr; 3285 } 3286 3287 bool isInSchedulingRegion(ScheduleData *SD) const { 3288 return SD->SchedulingRegionID == SchedulingRegionID; 3289 } 3290 3291 /// Marks an instruction as scheduled and puts all dependent ready 3292 /// instructions into the ready-list. 3293 template <typename ReadyListType> 3294 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 3295 SD->IsScheduled = true; 3296 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 3297 3298 for (ScheduleData *BundleMember = SD; BundleMember; 3299 BundleMember = BundleMember->NextInBundle) { 3300 if (BundleMember->Inst != BundleMember->OpValue) 3301 continue; 3302 3303 // Handle the def-use chain dependencies. 3304 3305 // Decrement the unscheduled counter and insert to ready list if ready. 3306 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 3307 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 3308 if (OpDef && OpDef->hasValidDependencies() && 3309 OpDef->incrementUnscheduledDeps(-1) == 0) { 3310 // There are no more unscheduled dependencies after 3311 // decrementing, so we can put the dependent instruction 3312 // into the ready list. 3313 ScheduleData *DepBundle = OpDef->FirstInBundle; 3314 assert(!DepBundle->IsScheduled && 3315 "already scheduled bundle gets ready"); 3316 ReadyList.insert(DepBundle); 3317 LLVM_DEBUG(dbgs() 3318 << "SLP: gets ready (def): " << *DepBundle << "\n"); 3319 } 3320 }); 3321 }; 3322 3323 // If BundleMember is a vector bundle, its operands may have been 3324 // reordered during buildTree(). We therefore need to get its operands 3325 // through the TreeEntry. 3326 if (TreeEntry *TE = BundleMember->TE) { 3327 // Need to search for the lane since the tree entry can be reordered. 3328 int Lane = std::distance(TE->Scalars.begin(), 3329 find(TE->Scalars, BundleMember->Inst)); 3330 assert(Lane >= 0 && "Lane not set"); 3331 3332 // Since vectorization tree is being built recursively this assertion 3333 // ensures that the tree entry has all operands set before reaching 3334 // this code. Couple of exceptions known at the moment are extracts 3335 // where their second (immediate) operand is not added. Since 3336 // immediates do not affect scheduler behavior this is considered 3337 // okay. 3338 auto *In = BundleMember->Inst; 3339 assert(In && 3340 (isa<ExtractValueInst, ExtractElementInst>(In) || 3341 In->getNumOperands() == TE->getNumOperands()) && 3342 "Missed TreeEntry operands?"); 3343 (void)In; // fake use to avoid build failure when assertions disabled 3344 3345 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3346 OpIdx != NumOperands; ++OpIdx) 3347 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3348 DecrUnsched(I); 3349 } else { 3350 // If BundleMember is a stand-alone instruction, no operand reordering 3351 // has taken place, so we directly access its operands. 3352 for (Use &U : BundleMember->Inst->operands()) 3353 if (auto *I = dyn_cast<Instruction>(U.get())) 3354 DecrUnsched(I); 3355 } 3356 // Handle the memory dependencies. 3357 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3358 if (MemoryDepSD->hasValidDependencies() && 3359 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3360 // There are no more unscheduled dependencies after decrementing, 3361 // so we can put the dependent instruction into the ready list. 3362 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3363 assert(!DepBundle->IsScheduled && 3364 "already scheduled bundle gets ready"); 3365 ReadyList.insert(DepBundle); 3366 LLVM_DEBUG(dbgs() 3367 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3368 } 3369 } 3370 // Handle the control dependencies. 3371 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3372 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3373 // There are no more unscheduled dependencies after decrementing, 3374 // so we can put the dependent instruction into the ready list. 3375 ScheduleData *DepBundle = DepSD->FirstInBundle; 3376 assert(!DepBundle->IsScheduled && 3377 "already scheduled bundle gets ready"); 3378 ReadyList.insert(DepBundle); 3379 LLVM_DEBUG(dbgs() 3380 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// Verify basic self consistency properties of the data structure. 3387 void verify() { 3388 if (!ScheduleStart) 3389 return; 3390 3391 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3392 ScheduleStart->comesBefore(ScheduleEnd) && 3393 "Not a valid scheduling region?"); 3394 3395 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3396 auto *SD = getScheduleData(I); 3397 if (!SD) 3398 continue; 3399 assert(isInSchedulingRegion(SD) && 3400 "primary schedule data not in window?"); 3401 assert(isInSchedulingRegion(SD->FirstInBundle) && 3402 "entire bundle in window!"); 3403 (void)SD; 3404 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3405 } 3406 3407 for (auto *SD : ReadyInsts) { 3408 assert(SD->isSchedulingEntity() && SD->isReady() && 3409 "item in ready list not ready?"); 3410 (void)SD; 3411 } 3412 } 3413 3414 void doForAllOpcodes(Value *V, 3415 function_ref<void(ScheduleData *SD)> Action) { 3416 if (ScheduleData *SD = getScheduleData(V)) 3417 Action(SD); 3418 auto I = ExtraScheduleDataMap.find(V); 3419 if (I != ExtraScheduleDataMap.end()) 3420 for (auto &P : I->second) 3421 if (isInSchedulingRegion(P.second)) 3422 Action(P.second); 3423 } 3424 3425 /// Put all instructions into the ReadyList which are ready for scheduling. 3426 template <typename ReadyListType> 3427 void initialFillReadyList(ReadyListType &ReadyList) { 3428 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3429 doForAllOpcodes(I, [&](ScheduleData *SD) { 3430 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3431 SD->isReady()) { 3432 ReadyList.insert(SD); 3433 LLVM_DEBUG(dbgs() 3434 << "SLP: initially in ready list: " << *SD << "\n"); 3435 } 3436 }); 3437 } 3438 } 3439 3440 /// Build a bundle from the ScheduleData nodes corresponding to the 3441 /// scalar instruction for each lane. 3442 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3443 3444 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3445 /// cyclic dependencies. This is only a dry-run, no instructions are 3446 /// actually moved at this stage. 3447 /// \returns the scheduling bundle. The returned Optional value is not 3448 /// std::nullopt if \p VL is allowed to be scheduled. 3449 std::optional<ScheduleData *> 3450 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3451 const InstructionsState &S); 3452 3453 /// Un-bundles a group of instructions. 3454 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3455 3456 /// Allocates schedule data chunk. 3457 ScheduleData *allocateScheduleDataChunks(); 3458 3459 /// Extends the scheduling region so that V is inside the region. 3460 /// \returns true if the region size is within the limit. 3461 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3462 3463 /// Initialize the ScheduleData structures for new instructions in the 3464 /// scheduling region. 3465 void initScheduleData(Instruction *FromI, Instruction *ToI, 3466 ScheduleData *PrevLoadStore, 3467 ScheduleData *NextLoadStore); 3468 3469 /// Updates the dependency information of a bundle and of all instructions/ 3470 /// bundles which depend on the original bundle. 3471 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3472 BoUpSLP *SLP); 3473 3474 /// Sets all instruction in the scheduling region to un-scheduled. 3475 void resetSchedule(); 3476 3477 BasicBlock *BB; 3478 3479 /// Simple memory allocation for ScheduleData. 3480 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3481 3482 /// The size of a ScheduleData array in ScheduleDataChunks. 3483 int ChunkSize; 3484 3485 /// The allocator position in the current chunk, which is the last entry 3486 /// of ScheduleDataChunks. 3487 int ChunkPos; 3488 3489 /// Attaches ScheduleData to Instruction. 3490 /// Note that the mapping survives during all vectorization iterations, i.e. 3491 /// ScheduleData structures are recycled. 3492 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3493 3494 /// Attaches ScheduleData to Instruction with the leading key. 3495 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3496 ExtraScheduleDataMap; 3497 3498 /// The ready-list for scheduling (only used for the dry-run). 3499 SetVector<ScheduleData *> ReadyInsts; 3500 3501 /// The first instruction of the scheduling region. 3502 Instruction *ScheduleStart = nullptr; 3503 3504 /// The first instruction _after_ the scheduling region. 3505 Instruction *ScheduleEnd = nullptr; 3506 3507 /// The first memory accessing instruction in the scheduling region 3508 /// (can be null). 3509 ScheduleData *FirstLoadStoreInRegion = nullptr; 3510 3511 /// The last memory accessing instruction in the scheduling region 3512 /// (can be null). 3513 ScheduleData *LastLoadStoreInRegion = nullptr; 3514 3515 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3516 /// region? Used to optimize the dependence calculation for the 3517 /// common case where there isn't. 3518 bool RegionHasStackSave = false; 3519 3520 /// The current size of the scheduling region. 3521 int ScheduleRegionSize = 0; 3522 3523 /// The maximum size allowed for the scheduling region. 3524 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3525 3526 /// The ID of the scheduling region. For a new vectorization iteration this 3527 /// is incremented which "removes" all ScheduleData from the region. 3528 /// Make sure that the initial SchedulingRegionID is greater than the 3529 /// initial SchedulingRegionID in ScheduleData (which is 0). 3530 int SchedulingRegionID = 1; 3531 }; 3532 3533 /// Attaches the BlockScheduling structures to basic blocks. 3534 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3535 3536 /// Performs the "real" scheduling. Done before vectorization is actually 3537 /// performed in a basic block. 3538 void scheduleBlock(BlockScheduling *BS); 3539 3540 /// List of users to ignore during scheduling and that don't need extracting. 3541 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3542 3543 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3544 /// sorted SmallVectors of unsigned. 3545 struct OrdersTypeDenseMapInfo { 3546 static OrdersType getEmptyKey() { 3547 OrdersType V; 3548 V.push_back(~1U); 3549 return V; 3550 } 3551 3552 static OrdersType getTombstoneKey() { 3553 OrdersType V; 3554 V.push_back(~2U); 3555 return V; 3556 } 3557 3558 static unsigned getHashValue(const OrdersType &V) { 3559 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3560 } 3561 3562 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3563 return LHS == RHS; 3564 } 3565 }; 3566 3567 // Analysis and block reference. 3568 Function *F; 3569 ScalarEvolution *SE; 3570 TargetTransformInfo *TTI; 3571 TargetLibraryInfo *TLI; 3572 LoopInfo *LI; 3573 DominatorTree *DT; 3574 AssumptionCache *AC; 3575 DemandedBits *DB; 3576 const DataLayout *DL; 3577 OptimizationRemarkEmitter *ORE; 3578 3579 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3580 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3581 3582 /// Instruction builder to construct the vectorized tree. 3583 IRBuilder<> Builder; 3584 3585 /// A map of scalar integer values to the smallest bit width with which they 3586 /// can legally be represented. The values map to (width, signed) pairs, 3587 /// where "width" indicates the minimum bit width and "signed" is True if the 3588 /// value must be signed-extended, rather than zero-extended, back to its 3589 /// original width. 3590 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs; 3591 }; 3592 3593 } // end namespace slpvectorizer 3594 3595 template <> struct GraphTraits<BoUpSLP *> { 3596 using TreeEntry = BoUpSLP::TreeEntry; 3597 3598 /// NodeRef has to be a pointer per the GraphWriter. 3599 using NodeRef = TreeEntry *; 3600 3601 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3602 3603 /// Add the VectorizableTree to the index iterator to be able to return 3604 /// TreeEntry pointers. 3605 struct ChildIteratorType 3606 : public iterator_adaptor_base< 3607 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3608 ContainerTy &VectorizableTree; 3609 3610 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3611 ContainerTy &VT) 3612 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3613 3614 NodeRef operator*() { return I->UserTE; } 3615 }; 3616 3617 static NodeRef getEntryNode(BoUpSLP &R) { 3618 return R.VectorizableTree[0].get(); 3619 } 3620 3621 static ChildIteratorType child_begin(NodeRef N) { 3622 return {N->UserTreeIndices.begin(), N->Container}; 3623 } 3624 3625 static ChildIteratorType child_end(NodeRef N) { 3626 return {N->UserTreeIndices.end(), N->Container}; 3627 } 3628 3629 /// For the node iterator we just need to turn the TreeEntry iterator into a 3630 /// TreeEntry* iterator so that it dereferences to NodeRef. 3631 class nodes_iterator { 3632 using ItTy = ContainerTy::iterator; 3633 ItTy It; 3634 3635 public: 3636 nodes_iterator(const ItTy &It2) : It(It2) {} 3637 NodeRef operator*() { return It->get(); } 3638 nodes_iterator operator++() { 3639 ++It; 3640 return *this; 3641 } 3642 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3643 }; 3644 3645 static nodes_iterator nodes_begin(BoUpSLP *R) { 3646 return nodes_iterator(R->VectorizableTree.begin()); 3647 } 3648 3649 static nodes_iterator nodes_end(BoUpSLP *R) { 3650 return nodes_iterator(R->VectorizableTree.end()); 3651 } 3652 3653 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3654 }; 3655 3656 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3657 using TreeEntry = BoUpSLP::TreeEntry; 3658 3659 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 3660 3661 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3662 std::string Str; 3663 raw_string_ostream OS(Str); 3664 OS << Entry->Idx << ".\n"; 3665 if (isSplat(Entry->Scalars)) 3666 OS << "<splat> "; 3667 for (auto *V : Entry->Scalars) { 3668 OS << *V; 3669 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3670 return EU.Scalar == V; 3671 })) 3672 OS << " <extract>"; 3673 OS << "\n"; 3674 } 3675 return Str; 3676 } 3677 3678 static std::string getNodeAttributes(const TreeEntry *Entry, 3679 const BoUpSLP *) { 3680 if (Entry->State == TreeEntry::NeedToGather) 3681 return "color=red"; 3682 if (Entry->State == TreeEntry::ScatterVectorize || 3683 Entry->State == TreeEntry::PossibleStridedVectorize) 3684 return "color=blue"; 3685 return ""; 3686 } 3687 }; 3688 3689 } // end namespace llvm 3690 3691 BoUpSLP::~BoUpSLP() { 3692 SmallVector<WeakTrackingVH> DeadInsts; 3693 for (auto *I : DeletedInstructions) { 3694 for (Use &U : I->operands()) { 3695 auto *Op = dyn_cast<Instruction>(U.get()); 3696 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3697 wouldInstructionBeTriviallyDead(Op, TLI)) 3698 DeadInsts.emplace_back(Op); 3699 } 3700 I->dropAllReferences(); 3701 } 3702 for (auto *I : DeletedInstructions) { 3703 assert(I->use_empty() && 3704 "trying to erase instruction with users."); 3705 I->eraseFromParent(); 3706 } 3707 3708 // Cleanup any dead scalar code feeding the vectorized instructions 3709 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3710 3711 #ifdef EXPENSIVE_CHECKS 3712 // If we could guarantee that this call is not extremely slow, we could 3713 // remove the ifdef limitation (see PR47712). 3714 assert(!verifyFunction(*F, &dbgs())); 3715 #endif 3716 } 3717 3718 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3719 /// contains original mask for the scalars reused in the node. Procedure 3720 /// transform this mask in accordance with the given \p Mask. 3721 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3722 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3723 "Expected non-empty mask."); 3724 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3725 Prev.swap(Reuses); 3726 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3727 if (Mask[I] != PoisonMaskElem) 3728 Reuses[Mask[I]] = Prev[I]; 3729 } 3730 3731 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3732 /// the original order of the scalars. Procedure transforms the provided order 3733 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3734 /// identity order, \p Order is cleared. 3735 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3736 assert(!Mask.empty() && "Expected non-empty mask."); 3737 SmallVector<int> MaskOrder; 3738 if (Order.empty()) { 3739 MaskOrder.resize(Mask.size()); 3740 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3741 } else { 3742 inversePermutation(Order, MaskOrder); 3743 } 3744 reorderReuses(MaskOrder, Mask); 3745 if (ShuffleVectorInst::isIdentityMask(MaskOrder, MaskOrder.size())) { 3746 Order.clear(); 3747 return; 3748 } 3749 Order.assign(Mask.size(), Mask.size()); 3750 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3751 if (MaskOrder[I] != PoisonMaskElem) 3752 Order[MaskOrder[I]] = I; 3753 fixupOrderingIndices(Order); 3754 } 3755 3756 std::optional<BoUpSLP::OrdersType> 3757 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3758 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3759 unsigned NumScalars = TE.Scalars.size(); 3760 OrdersType CurrentOrder(NumScalars, NumScalars); 3761 SmallVector<int> Positions; 3762 SmallBitVector UsedPositions(NumScalars); 3763 const TreeEntry *STE = nullptr; 3764 // Try to find all gathered scalars that are gets vectorized in other 3765 // vectorize node. Here we can have only one single tree vector node to 3766 // correctly identify order of the gathered scalars. 3767 for (unsigned I = 0; I < NumScalars; ++I) { 3768 Value *V = TE.Scalars[I]; 3769 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3770 continue; 3771 if (const auto *LocalSTE = getTreeEntry(V)) { 3772 if (!STE) 3773 STE = LocalSTE; 3774 else if (STE != LocalSTE) 3775 // Take the order only from the single vector node. 3776 return std::nullopt; 3777 unsigned Lane = 3778 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 3779 if (Lane >= NumScalars) 3780 return std::nullopt; 3781 if (CurrentOrder[Lane] != NumScalars) { 3782 if (Lane != I) 3783 continue; 3784 UsedPositions.reset(CurrentOrder[Lane]); 3785 } 3786 // The partial identity (where only some elements of the gather node are 3787 // in the identity order) is good. 3788 CurrentOrder[Lane] = I; 3789 UsedPositions.set(I); 3790 } 3791 } 3792 // Need to keep the order if we have a vector entry and at least 2 scalars or 3793 // the vectorized entry has just 2 scalars. 3794 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 3795 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 3796 for (unsigned I = 0; I < NumScalars; ++I) 3797 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3798 return false; 3799 return true; 3800 }; 3801 if (IsIdentityOrder(CurrentOrder)) 3802 return OrdersType(); 3803 auto *It = CurrentOrder.begin(); 3804 for (unsigned I = 0; I < NumScalars;) { 3805 if (UsedPositions.test(I)) { 3806 ++I; 3807 continue; 3808 } 3809 if (*It == NumScalars) { 3810 *It = I; 3811 ++I; 3812 } 3813 ++It; 3814 } 3815 return std::move(CurrentOrder); 3816 } 3817 return std::nullopt; 3818 } 3819 3820 namespace { 3821 /// Tracks the state we can represent the loads in the given sequence. 3822 enum class LoadsState { 3823 Gather, 3824 Vectorize, 3825 ScatterVectorize, 3826 PossibleStridedVectorize 3827 }; 3828 } // anonymous namespace 3829 3830 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2, 3831 const TargetLibraryInfo &TLI, 3832 bool CompareOpcodes = true) { 3833 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2)) 3834 return false; 3835 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 3836 if (!GEP1) 3837 return false; 3838 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 3839 if (!GEP2) 3840 return false; 3841 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 && 3842 ((isConstant(GEP1->getOperand(1)) && 3843 isConstant(GEP2->getOperand(1))) || 3844 !CompareOpcodes || 3845 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI) 3846 .getOpcode()); 3847 } 3848 3849 /// Checks if the given array of loads can be represented as a vectorized, 3850 /// scatter or just simple gather. 3851 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3852 const TargetTransformInfo &TTI, 3853 const DataLayout &DL, ScalarEvolution &SE, 3854 LoopInfo &LI, const TargetLibraryInfo &TLI, 3855 SmallVectorImpl<unsigned> &Order, 3856 SmallVectorImpl<Value *> &PointerOps) { 3857 // Check that a vectorized load would load the same memory as a scalar 3858 // load. For example, we don't want to vectorize loads that are smaller 3859 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3860 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3861 // from such a struct, we read/write packed bits disagreeing with the 3862 // unvectorized version. 3863 Type *ScalarTy = VL0->getType(); 3864 3865 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3866 return LoadsState::Gather; 3867 3868 // Make sure all loads in the bundle are simple - we can't vectorize 3869 // atomic or volatile loads. 3870 PointerOps.clear(); 3871 PointerOps.resize(VL.size()); 3872 auto *POIter = PointerOps.begin(); 3873 for (Value *V : VL) { 3874 auto *L = cast<LoadInst>(V); 3875 if (!L->isSimple()) 3876 return LoadsState::Gather; 3877 *POIter = L->getPointerOperand(); 3878 ++POIter; 3879 } 3880 3881 Order.clear(); 3882 // Check the order of pointer operands or that all pointers are the same. 3883 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order); 3884 if (IsSorted || all_of(PointerOps, [&](Value *P) { 3885 return arePointersCompatible(P, PointerOps.front(), TLI); 3886 })) { 3887 bool IsPossibleStrided = false; 3888 if (IsSorted) { 3889 Value *Ptr0; 3890 Value *PtrN; 3891 if (Order.empty()) { 3892 Ptr0 = PointerOps.front(); 3893 PtrN = PointerOps.back(); 3894 } else { 3895 Ptr0 = PointerOps[Order.front()]; 3896 PtrN = PointerOps[Order.back()]; 3897 } 3898 std::optional<int> Diff = 3899 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3900 // Check that the sorted loads are consecutive. 3901 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3902 return LoadsState::Vectorize; 3903 // Simple check if not a strided access - clear order. 3904 IsPossibleStrided = *Diff % (VL.size() - 1) == 0; 3905 } 3906 // TODO: need to improve analysis of the pointers, if not all of them are 3907 // GEPs or have > 2 operands, we end up with a gather node, which just 3908 // increases the cost. 3909 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent()); 3910 bool ProfitableGatherPointers = 3911 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) { 3912 return L && L->isLoopInvariant(V); 3913 })) <= VL.size() / 2 && VL.size() > 2; 3914 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) { 3915 auto *GEP = dyn_cast<GetElementPtrInst>(P); 3916 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) || 3917 (GEP && GEP->getNumOperands() == 2); 3918 })) { 3919 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3920 for (Value *V : VL) 3921 CommonAlignment = 3922 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3923 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3924 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) && 3925 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) 3926 return IsPossibleStrided ? LoadsState::PossibleStridedVectorize 3927 : LoadsState::ScatterVectorize; 3928 } 3929 } 3930 3931 return LoadsState::Gather; 3932 } 3933 3934 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3935 const DataLayout &DL, ScalarEvolution &SE, 3936 SmallVectorImpl<unsigned> &SortedIndices) { 3937 assert(llvm::all_of( 3938 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3939 "Expected list of pointer operands."); 3940 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3941 // Ptr into, sort and return the sorted indices with values next to one 3942 // another. 3943 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3944 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3945 3946 unsigned Cnt = 1; 3947 for (Value *Ptr : VL.drop_front()) { 3948 bool Found = any_of(Bases, [&](auto &Base) { 3949 std::optional<int> Diff = 3950 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3951 /*StrictCheck=*/true); 3952 if (!Diff) 3953 return false; 3954 3955 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3956 return true; 3957 }); 3958 3959 if (!Found) { 3960 // If we haven't found enough to usefully cluster, return early. 3961 if (Bases.size() > VL.size() / 2 - 1) 3962 return false; 3963 3964 // Not found already - add a new Base 3965 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3966 } 3967 } 3968 3969 // For each of the bases sort the pointers by Offset and check if any of the 3970 // base become consecutively allocated. 3971 bool AnyConsecutive = false; 3972 for (auto &Base : Bases) { 3973 auto &Vec = Base.second; 3974 if (Vec.size() > 1) { 3975 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 3976 const std::tuple<Value *, int, unsigned> &Y) { 3977 return std::get<1>(X) < std::get<1>(Y); 3978 }); 3979 int InitialOffset = std::get<1>(Vec[0]); 3980 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](const auto &P) { 3981 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 3982 }); 3983 } 3984 } 3985 3986 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 3987 SortedIndices.clear(); 3988 if (!AnyConsecutive) 3989 return false; 3990 3991 for (auto &Base : Bases) { 3992 for (auto &T : Base.second) 3993 SortedIndices.push_back(std::get<2>(T)); 3994 } 3995 3996 assert(SortedIndices.size() == VL.size() && 3997 "Expected SortedIndices to be the size of VL"); 3998 return true; 3999 } 4000 4001 std::optional<BoUpSLP::OrdersType> 4002 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 4003 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 4004 Type *ScalarTy = TE.Scalars[0]->getType(); 4005 4006 SmallVector<Value *> Ptrs; 4007 Ptrs.reserve(TE.Scalars.size()); 4008 for (Value *V : TE.Scalars) { 4009 auto *L = dyn_cast<LoadInst>(V); 4010 if (!L || !L->isSimple()) 4011 return std::nullopt; 4012 Ptrs.push_back(L->getPointerOperand()); 4013 } 4014 4015 BoUpSLP::OrdersType Order; 4016 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 4017 return std::move(Order); 4018 return std::nullopt; 4019 } 4020 4021 /// Check if two insertelement instructions are from the same buildvector. 4022 static bool areTwoInsertFromSameBuildVector( 4023 InsertElementInst *VU, InsertElementInst *V, 4024 function_ref<Value *(InsertElementInst *)> GetBaseOperand) { 4025 // Instructions must be from the same basic blocks. 4026 if (VU->getParent() != V->getParent()) 4027 return false; 4028 // Checks if 2 insertelements are from the same buildvector. 4029 if (VU->getType() != V->getType()) 4030 return false; 4031 // Multiple used inserts are separate nodes. 4032 if (!VU->hasOneUse() && !V->hasOneUse()) 4033 return false; 4034 auto *IE1 = VU; 4035 auto *IE2 = V; 4036 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4037 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4038 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4039 return false; 4040 // Go through the vector operand of insertelement instructions trying to find 4041 // either VU as the original vector for IE2 or V as the original vector for 4042 // IE1. 4043 SmallBitVector ReusedIdx( 4044 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue()); 4045 bool IsReusedIdx = false; 4046 do { 4047 if (IE2 == VU && !IE1) 4048 return VU->hasOneUse(); 4049 if (IE1 == V && !IE2) 4050 return V->hasOneUse(); 4051 if (IE1 && IE1 != V) { 4052 unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2); 4053 IsReusedIdx |= ReusedIdx.test(Idx1); 4054 ReusedIdx.set(Idx1); 4055 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx) 4056 IE1 = nullptr; 4057 else 4058 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1)); 4059 } 4060 if (IE2 && IE2 != VU) { 4061 unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1); 4062 IsReusedIdx |= ReusedIdx.test(Idx2); 4063 ReusedIdx.set(Idx2); 4064 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx) 4065 IE2 = nullptr; 4066 else 4067 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2)); 4068 } 4069 } while (!IsReusedIdx && (IE1 || IE2)); 4070 return false; 4071 } 4072 4073 std::optional<BoUpSLP::OrdersType> 4074 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { 4075 // No need to reorder if need to shuffle reuses, still need to shuffle the 4076 // node. 4077 if (!TE.ReuseShuffleIndices.empty()) { 4078 // Check if reuse shuffle indices can be improved by reordering. 4079 // For this, check that reuse mask is "clustered", i.e. each scalar values 4080 // is used once in each submask of size <number_of_scalars>. 4081 // Example: 4 scalar values. 4082 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered. 4083 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because 4084 // element 3 is used twice in the second submask. 4085 unsigned Sz = TE.Scalars.size(); 4086 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4087 Sz)) 4088 return std::nullopt; 4089 unsigned VF = TE.getVectorFactor(); 4090 // Try build correct order for extractelement instructions. 4091 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(), 4092 TE.ReuseShuffleIndices.end()); 4093 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() && 4094 all_of(TE.Scalars, [Sz](Value *V) { 4095 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V)); 4096 return Idx && *Idx < Sz; 4097 })) { 4098 SmallVector<int> ReorderMask(Sz, PoisonMaskElem); 4099 if (TE.ReorderIndices.empty()) 4100 std::iota(ReorderMask.begin(), ReorderMask.end(), 0); 4101 else 4102 inversePermutation(TE.ReorderIndices, ReorderMask); 4103 for (unsigned I = 0; I < VF; ++I) { 4104 int &Idx = ReusedMask[I]; 4105 if (Idx == PoisonMaskElem) 4106 continue; 4107 Value *V = TE.Scalars[ReorderMask[Idx]]; 4108 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V)); 4109 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI)); 4110 } 4111 } 4112 // Build the order of the VF size, need to reorder reuses shuffles, they are 4113 // always of VF size. 4114 OrdersType ResOrder(VF); 4115 std::iota(ResOrder.begin(), ResOrder.end(), 0); 4116 auto *It = ResOrder.begin(); 4117 for (unsigned K = 0; K < VF; K += Sz) { 4118 OrdersType CurrentOrder(TE.ReorderIndices); 4119 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)}; 4120 if (SubMask.front() == PoisonMaskElem) 4121 std::iota(SubMask.begin(), SubMask.end(), 0); 4122 reorderOrder(CurrentOrder, SubMask); 4123 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; }); 4124 std::advance(It, Sz); 4125 } 4126 if (all_of(enumerate(ResOrder), 4127 [](const auto &Data) { return Data.index() == Data.value(); })) 4128 return std::nullopt; // No need to reorder. 4129 return std::move(ResOrder); 4130 } 4131 if ((TE.State == TreeEntry::Vectorize || 4132 TE.State == TreeEntry::PossibleStridedVectorize) && 4133 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 4134 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 4135 !TE.isAltShuffle()) 4136 return TE.ReorderIndices; 4137 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) { 4138 auto PHICompare = [&](unsigned I1, unsigned I2) { 4139 Value *V1 = TE.Scalars[I1]; 4140 Value *V2 = TE.Scalars[I2]; 4141 if (V1 == V2) 4142 return false; 4143 if (!V1->hasOneUse() || !V2->hasOneUse()) 4144 return false; 4145 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin()); 4146 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin()); 4147 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1)) 4148 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) { 4149 if (!areTwoInsertFromSameBuildVector( 4150 IE1, IE2, 4151 [](InsertElementInst *II) { return II->getOperand(0); })) 4152 return false; 4153 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4154 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4155 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4156 return false; 4157 return *Idx1 < *Idx2; 4158 } 4159 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1)) 4160 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) { 4161 if (EE1->getOperand(0) != EE2->getOperand(0)) 4162 return false; 4163 std::optional<unsigned> Idx1 = getExtractIndex(EE1); 4164 std::optional<unsigned> Idx2 = getExtractIndex(EE2); 4165 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4166 return false; 4167 return *Idx1 < *Idx2; 4168 } 4169 return false; 4170 }; 4171 auto IsIdentityOrder = [](const OrdersType &Order) { 4172 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4173 if (Idx != Order[Idx]) 4174 return false; 4175 return true; 4176 }; 4177 if (!TE.ReorderIndices.empty()) 4178 return TE.ReorderIndices; 4179 DenseMap<unsigned, unsigned> PhiToId; 4180 SmallVector<unsigned> Phis(TE.Scalars.size()); 4181 std::iota(Phis.begin(), Phis.end(), 0); 4182 OrdersType ResOrder(TE.Scalars.size()); 4183 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) 4184 PhiToId[Id] = Id; 4185 stable_sort(Phis, PHICompare); 4186 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id) 4187 ResOrder[Id] = PhiToId[Phis[Id]]; 4188 if (IsIdentityOrder(ResOrder)) 4189 return std::nullopt; // No need to reorder. 4190 return std::move(ResOrder); 4191 } 4192 if (TE.State == TreeEntry::NeedToGather) { 4193 // TODO: add analysis of other gather nodes with extractelement 4194 // instructions and other values/instructions, not only undefs. 4195 if (((TE.getOpcode() == Instruction::ExtractElement && 4196 !TE.isAltShuffle()) || 4197 (all_of(TE.Scalars, 4198 [](Value *V) { 4199 return isa<UndefValue, ExtractElementInst>(V); 4200 }) && 4201 any_of(TE.Scalars, 4202 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 4203 all_of(TE.Scalars, 4204 [](Value *V) { 4205 auto *EE = dyn_cast<ExtractElementInst>(V); 4206 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 4207 }) && 4208 allSameType(TE.Scalars)) { 4209 // Check that gather of extractelements can be represented as 4210 // just a shuffle of a single vector. 4211 OrdersType CurrentOrder; 4212 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder, 4213 /*ResizeAllowed=*/true); 4214 if (Reuse || !CurrentOrder.empty()) { 4215 if (!CurrentOrder.empty()) 4216 fixupOrderingIndices(CurrentOrder); 4217 return std::move(CurrentOrder); 4218 } 4219 } 4220 // If the gather node is <undef, v, .., poison> and 4221 // insertelement poison, v, 0 [+ permute] 4222 // is cheaper than 4223 // insertelement poison, v, n - try to reorder. 4224 // If rotating the whole graph, exclude the permute cost, the whole graph 4225 // might be transformed. 4226 int Sz = TE.Scalars.size(); 4227 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) && 4228 count_if(TE.Scalars, UndefValue::classof) == Sz - 1) { 4229 const auto *It = 4230 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); }); 4231 if (It == TE.Scalars.begin()) 4232 return OrdersType(); 4233 auto *Ty = FixedVectorType::get(TE.Scalars.front()->getType(), Sz); 4234 if (It != TE.Scalars.end()) { 4235 OrdersType Order(Sz, Sz); 4236 unsigned Idx = std::distance(TE.Scalars.begin(), It); 4237 Order[Idx] = 0; 4238 fixupOrderingIndices(Order); 4239 SmallVector<int> Mask; 4240 inversePermutation(Order, Mask); 4241 InstructionCost PermuteCost = 4242 TopToBottom 4243 ? 0 4244 : TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, Mask); 4245 InstructionCost InsertFirstCost = TTI->getVectorInstrCost( 4246 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0, 4247 PoisonValue::get(Ty), *It); 4248 InstructionCost InsertIdxCost = TTI->getVectorInstrCost( 4249 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx, 4250 PoisonValue::get(Ty), *It); 4251 if (InsertFirstCost + PermuteCost < InsertIdxCost) 4252 return std::move(Order); 4253 } 4254 } 4255 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 4256 return CurrentOrder; 4257 if (TE.Scalars.size() >= 4) 4258 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 4259 return Order; 4260 } 4261 return std::nullopt; 4262 } 4263 4264 /// Checks if the given mask is a "clustered" mask with the same clusters of 4265 /// size \p Sz, which are not identity submasks. 4266 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask, 4267 unsigned Sz) { 4268 ArrayRef<int> FirstCluster = Mask.slice(0, Sz); 4269 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz)) 4270 return false; 4271 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) { 4272 ArrayRef<int> Cluster = Mask.slice(I, Sz); 4273 if (Cluster != FirstCluster) 4274 return false; 4275 } 4276 return true; 4277 } 4278 4279 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const { 4280 // Reorder reuses mask. 4281 reorderReuses(TE.ReuseShuffleIndices, Mask); 4282 const unsigned Sz = TE.Scalars.size(); 4283 // For vectorized and non-clustered reused no need to do anything else. 4284 if (TE.State != TreeEntry::NeedToGather || 4285 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4286 Sz) || 4287 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) 4288 return; 4289 SmallVector<int> NewMask; 4290 inversePermutation(TE.ReorderIndices, NewMask); 4291 addMask(NewMask, TE.ReuseShuffleIndices); 4292 // Clear reorder since it is going to be applied to the new mask. 4293 TE.ReorderIndices.clear(); 4294 // Try to improve gathered nodes with clustered reuses, if possible. 4295 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz); 4296 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end()); 4297 inversePermutation(NewOrder, NewMask); 4298 reorderScalars(TE.Scalars, NewMask); 4299 // Fill the reuses mask with the identity submasks. 4300 for (auto *It = TE.ReuseShuffleIndices.begin(), 4301 *End = TE.ReuseShuffleIndices.end(); 4302 It != End; std::advance(It, Sz)) 4303 std::iota(It, std::next(It, Sz), 0); 4304 } 4305 4306 void BoUpSLP::reorderTopToBottom() { 4307 // Maps VF to the graph nodes. 4308 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 4309 // ExtractElement gather nodes which can be vectorized and need to handle 4310 // their ordering. 4311 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4312 4313 // Phi nodes can have preferred ordering based on their result users 4314 DenseMap<const TreeEntry *, OrdersType> PhisToOrders; 4315 4316 // AltShuffles can also have a preferred ordering that leads to fewer 4317 // instructions, e.g., the addsub instruction in x86. 4318 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders; 4319 4320 // Maps a TreeEntry to the reorder indices of external users. 4321 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 4322 ExternalUserReorderMap; 4323 // FIXME: Workaround for syntax error reported by MSVC buildbots. 4324 TargetTransformInfo &TTIRef = *TTI; 4325 // Find all reorderable nodes with the given VF. 4326 // Currently the are vectorized stores,loads,extracts + some gathering of 4327 // extracts. 4328 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries, 4329 &GathersToOrders, &ExternalUserReorderMap, 4330 &AltShufflesToOrders, &PhisToOrders]( 4331 const std::unique_ptr<TreeEntry> &TE) { 4332 // Look for external users that will probably be vectorized. 4333 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 4334 findExternalStoreUsersReorderIndices(TE.get()); 4335 if (!ExternalUserReorderIndices.empty()) { 4336 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4337 ExternalUserReorderMap.try_emplace(TE.get(), 4338 std::move(ExternalUserReorderIndices)); 4339 } 4340 4341 // Patterns like [fadd,fsub] can be combined into a single instruction in 4342 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need 4343 // to take into account their order when looking for the most used order. 4344 if (TE->isAltShuffle()) { 4345 VectorType *VecTy = 4346 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size()); 4347 unsigned Opcode0 = TE->getOpcode(); 4348 unsigned Opcode1 = TE->getAltOpcode(); 4349 // The opcode mask selects between the two opcodes. 4350 SmallBitVector OpcodeMask(TE->Scalars.size(), false); 4351 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) 4352 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1) 4353 OpcodeMask.set(Lane); 4354 // If this pattern is supported by the target then we consider the order. 4355 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 4356 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4357 AltShufflesToOrders.try_emplace(TE.get(), OrdersType()); 4358 } 4359 // TODO: Check the reverse order too. 4360 } 4361 4362 if (std::optional<OrdersType> CurrentOrder = 4363 getReorderingData(*TE, /*TopToBottom=*/true)) { 4364 // Do not include ordering for nodes used in the alt opcode vectorization, 4365 // better to reorder them during bottom-to-top stage. If follow the order 4366 // here, it causes reordering of the whole graph though actually it is 4367 // profitable just to reorder the subgraph that starts from the alternate 4368 // opcode vectorization node. Such nodes already end-up with the shuffle 4369 // instruction and it is just enough to change this shuffle rather than 4370 // rotate the scalars for the whole graph. 4371 unsigned Cnt = 0; 4372 const TreeEntry *UserTE = TE.get(); 4373 while (UserTE && Cnt < RecursionMaxDepth) { 4374 if (UserTE->UserTreeIndices.size() != 1) 4375 break; 4376 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 4377 return EI.UserTE->State == TreeEntry::Vectorize && 4378 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 4379 })) 4380 return; 4381 UserTE = UserTE->UserTreeIndices.back().UserTE; 4382 ++Cnt; 4383 } 4384 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4385 if (!(TE->State == TreeEntry::Vectorize || 4386 TE->State == TreeEntry::PossibleStridedVectorize) || 4387 !TE->ReuseShuffleIndices.empty()) 4388 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4389 if (TE->State == TreeEntry::Vectorize && 4390 TE->getOpcode() == Instruction::PHI) 4391 PhisToOrders.try_emplace(TE.get(), *CurrentOrder); 4392 } 4393 }); 4394 4395 // Reorder the graph nodes according to their vectorization factor. 4396 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1; 4397 VF /= 2) { 4398 auto It = VFToOrderedEntries.find(VF); 4399 if (It == VFToOrderedEntries.end()) 4400 continue; 4401 // Try to find the most profitable order. We just are looking for the most 4402 // used order and reorder scalar elements in the nodes according to this 4403 // mostly used order. 4404 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 4405 // All operands are reordered and used only in this node - propagate the 4406 // most used order to the user node. 4407 MapVector<OrdersType, unsigned, 4408 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4409 OrdersUses; 4410 // Last chance orders - scatter vectorize. Try to use their orders if no 4411 // other orders or the order is counted already. 4412 SmallVector<OrdersType> StridedVectorizeOrders; 4413 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4414 for (const TreeEntry *OpTE : OrderedEntries) { 4415 // No need to reorder this nodes, still need to extend and to use shuffle, 4416 // just need to merge reordering shuffle and the reuse shuffle. 4417 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4418 continue; 4419 // Count number of orders uses. 4420 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders, 4421 &PhisToOrders]() -> const OrdersType & { 4422 if (OpTE->State == TreeEntry::NeedToGather || 4423 !OpTE->ReuseShuffleIndices.empty()) { 4424 auto It = GathersToOrders.find(OpTE); 4425 if (It != GathersToOrders.end()) 4426 return It->second; 4427 } 4428 if (OpTE->isAltShuffle()) { 4429 auto It = AltShufflesToOrders.find(OpTE); 4430 if (It != AltShufflesToOrders.end()) 4431 return It->second; 4432 } 4433 if (OpTE->State == TreeEntry::Vectorize && 4434 OpTE->getOpcode() == Instruction::PHI) { 4435 auto It = PhisToOrders.find(OpTE); 4436 if (It != PhisToOrders.end()) 4437 return It->second; 4438 } 4439 return OpTE->ReorderIndices; 4440 }(); 4441 // First consider the order of the external scalar users. 4442 auto It = ExternalUserReorderMap.find(OpTE); 4443 if (It != ExternalUserReorderMap.end()) { 4444 const auto &ExternalUserReorderIndices = It->second; 4445 // If the OpTE vector factor != number of scalars - use natural order, 4446 // it is an attempt to reorder node with reused scalars but with 4447 // external uses. 4448 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) { 4449 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 4450 ExternalUserReorderIndices.size(); 4451 } else { 4452 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 4453 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 4454 } 4455 // No other useful reorder data in this entry. 4456 if (Order.empty()) 4457 continue; 4458 } 4459 // Postpone scatter orders. 4460 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4461 StridedVectorizeOrders.push_back(Order); 4462 continue; 4463 } 4464 // Stores actually store the mask, not the order, need to invert. 4465 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4466 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4467 SmallVector<int> Mask; 4468 inversePermutation(Order, Mask); 4469 unsigned E = Order.size(); 4470 OrdersType CurrentOrder(E, E); 4471 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4472 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4473 }); 4474 fixupOrderingIndices(CurrentOrder); 4475 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 4476 } else { 4477 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4478 } 4479 } 4480 // Set order of the user node. 4481 if (OrdersUses.empty()) { 4482 if (StridedVectorizeOrders.empty()) 4483 continue; 4484 // Add (potentially!) strided vectorize orders. 4485 for (OrdersType &Order : StridedVectorizeOrders) 4486 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4487 } else { 4488 // Account (potentially!) strided vectorize orders only if it was used 4489 // already. 4490 for (OrdersType &Order : StridedVectorizeOrders) { 4491 auto *It = OrdersUses.find(Order); 4492 if (It != OrdersUses.end()) 4493 ++It->second; 4494 } 4495 } 4496 // Choose the most used order. 4497 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4498 unsigned Cnt = OrdersUses.front().second; 4499 for (const auto &Pair : drop_begin(OrdersUses)) { 4500 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4501 BestOrder = Pair.first; 4502 Cnt = Pair.second; 4503 } 4504 } 4505 // Set order of the user node. 4506 if (BestOrder.empty()) 4507 continue; 4508 SmallVector<int> Mask; 4509 inversePermutation(BestOrder, Mask); 4510 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4511 unsigned E = BestOrder.size(); 4512 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4513 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4514 }); 4515 // Do an actual reordering, if profitable. 4516 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4517 // Just do the reordering for the nodes with the given VF. 4518 if (TE->Scalars.size() != VF) { 4519 if (TE->ReuseShuffleIndices.size() == VF) { 4520 // Need to reorder the reuses masks of the operands with smaller VF to 4521 // be able to find the match between the graph nodes and scalar 4522 // operands of the given node during vectorization/cost estimation. 4523 assert(all_of(TE->UserTreeIndices, 4524 [VF, &TE](const EdgeInfo &EI) { 4525 return EI.UserTE->Scalars.size() == VF || 4526 EI.UserTE->Scalars.size() == 4527 TE->Scalars.size(); 4528 }) && 4529 "All users must be of VF size."); 4530 // Update ordering of the operands with the smaller VF than the given 4531 // one. 4532 reorderNodeWithReuses(*TE, Mask); 4533 } 4534 continue; 4535 } 4536 if ((TE->State == TreeEntry::Vectorize || 4537 TE->State == TreeEntry::PossibleStridedVectorize) && 4538 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 4539 InsertElementInst>(TE->getMainOp()) && 4540 !TE->isAltShuffle()) { 4541 // Build correct orders for extract{element,value}, loads and 4542 // stores. 4543 reorderOrder(TE->ReorderIndices, Mask); 4544 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 4545 TE->reorderOperands(Mask); 4546 } else { 4547 // Reorder the node and its operands. 4548 TE->reorderOperands(Mask); 4549 assert(TE->ReorderIndices.empty() && 4550 "Expected empty reorder sequence."); 4551 reorderScalars(TE->Scalars, Mask); 4552 } 4553 if (!TE->ReuseShuffleIndices.empty()) { 4554 // Apply reversed order to keep the original ordering of the reused 4555 // elements to avoid extra reorder indices shuffling. 4556 OrdersType CurrentOrder; 4557 reorderOrder(CurrentOrder, MaskOrder); 4558 SmallVector<int> NewReuses; 4559 inversePermutation(CurrentOrder, NewReuses); 4560 addMask(NewReuses, TE->ReuseShuffleIndices); 4561 TE->ReuseShuffleIndices.swap(NewReuses); 4562 } 4563 } 4564 } 4565 } 4566 4567 bool BoUpSLP::canReorderOperands( 4568 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 4569 ArrayRef<TreeEntry *> ReorderableGathers, 4570 SmallVectorImpl<TreeEntry *> &GatherOps) { 4571 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 4572 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 4573 return OpData.first == I && 4574 OpData.second->State == TreeEntry::Vectorize; 4575 })) 4576 continue; 4577 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 4578 // FIXME: Do not reorder (possible!) strided vectorized nodes, they 4579 // require reordering of the operands, which is not implemented yet. 4580 if (TE->State == TreeEntry::PossibleStridedVectorize) 4581 return false; 4582 // Do not reorder if operand node is used by many user nodes. 4583 if (any_of(TE->UserTreeIndices, 4584 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 4585 return false; 4586 // Add the node to the list of the ordered nodes with the identity 4587 // order. 4588 Edges.emplace_back(I, TE); 4589 // Add ScatterVectorize nodes to the list of operands, where just 4590 // reordering of the scalars is required. Similar to the gathers, so 4591 // simply add to the list of gathered ops. 4592 // If there are reused scalars, process this node as a regular vectorize 4593 // node, just reorder reuses mask. 4594 if (TE->State != TreeEntry::Vectorize && 4595 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) 4596 GatherOps.push_back(TE); 4597 continue; 4598 } 4599 TreeEntry *Gather = nullptr; 4600 if (count_if(ReorderableGathers, 4601 [&Gather, UserTE, I](TreeEntry *TE) { 4602 assert(TE->State != TreeEntry::Vectorize && 4603 "Only non-vectorized nodes are expected."); 4604 if (any_of(TE->UserTreeIndices, 4605 [UserTE, I](const EdgeInfo &EI) { 4606 return EI.UserTE == UserTE && EI.EdgeIdx == I; 4607 })) { 4608 assert(TE->isSame(UserTE->getOperand(I)) && 4609 "Operand entry does not match operands."); 4610 Gather = TE; 4611 return true; 4612 } 4613 return false; 4614 }) > 1 && 4615 !allConstant(UserTE->getOperand(I))) 4616 return false; 4617 if (Gather) 4618 GatherOps.push_back(Gather); 4619 } 4620 return true; 4621 } 4622 4623 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 4624 SetVector<TreeEntry *> OrderedEntries; 4625 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4626 // Find all reorderable leaf nodes with the given VF. 4627 // Currently the are vectorized loads,extracts without alternate operands + 4628 // some gathering of extracts. 4629 SmallVector<TreeEntry *> NonVectorized; 4630 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4631 if (TE->State != TreeEntry::Vectorize && 4632 TE->State != TreeEntry::PossibleStridedVectorize) 4633 NonVectorized.push_back(TE.get()); 4634 if (std::optional<OrdersType> CurrentOrder = 4635 getReorderingData(*TE, /*TopToBottom=*/false)) { 4636 OrderedEntries.insert(TE.get()); 4637 if (!(TE->State == TreeEntry::Vectorize || 4638 TE->State == TreeEntry::PossibleStridedVectorize) || 4639 !TE->ReuseShuffleIndices.empty()) 4640 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4641 } 4642 } 4643 4644 // 1. Propagate order to the graph nodes, which use only reordered nodes. 4645 // I.e., if the node has operands, that are reordered, try to make at least 4646 // one operand order in the natural order and reorder others + reorder the 4647 // user node itself. 4648 SmallPtrSet<const TreeEntry *, 4> Visited; 4649 while (!OrderedEntries.empty()) { 4650 // 1. Filter out only reordered nodes. 4651 // 2. If the entry has multiple uses - skip it and jump to the next node. 4652 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 4653 SmallVector<TreeEntry *> Filtered; 4654 for (TreeEntry *TE : OrderedEntries) { 4655 if (!(TE->State == TreeEntry::Vectorize || 4656 TE->State == TreeEntry::PossibleStridedVectorize || 4657 (TE->State == TreeEntry::NeedToGather && 4658 GathersToOrders.count(TE))) || 4659 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4660 !all_of(drop_begin(TE->UserTreeIndices), 4661 [TE](const EdgeInfo &EI) { 4662 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 4663 }) || 4664 !Visited.insert(TE).second) { 4665 Filtered.push_back(TE); 4666 continue; 4667 } 4668 // Build a map between user nodes and their operands order to speedup 4669 // search. The graph currently does not provide this dependency directly. 4670 for (EdgeInfo &EI : TE->UserTreeIndices) { 4671 TreeEntry *UserTE = EI.UserTE; 4672 auto It = Users.find(UserTE); 4673 if (It == Users.end()) 4674 It = Users.insert({UserTE, {}}).first; 4675 It->second.emplace_back(EI.EdgeIdx, TE); 4676 } 4677 } 4678 // Erase filtered entries. 4679 for (TreeEntry *TE : Filtered) 4680 OrderedEntries.remove(TE); 4681 SmallVector< 4682 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>> 4683 UsersVec(Users.begin(), Users.end()); 4684 sort(UsersVec, [](const auto &Data1, const auto &Data2) { 4685 return Data1.first->Idx > Data2.first->Idx; 4686 }); 4687 for (auto &Data : UsersVec) { 4688 // Check that operands are used only in the User node. 4689 SmallVector<TreeEntry *> GatherOps; 4690 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 4691 GatherOps)) { 4692 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4693 OrderedEntries.remove(Op.second); 4694 continue; 4695 } 4696 // All operands are reordered and used only in this node - propagate the 4697 // most used order to the user node. 4698 MapVector<OrdersType, unsigned, 4699 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4700 OrdersUses; 4701 // Last chance orders - scatter vectorize. Try to use their orders if no 4702 // other orders or the order is counted already. 4703 SmallVector<std::pair<OrdersType, unsigned>> StridedVectorizeOrders; 4704 // Do the analysis for each tree entry only once, otherwise the order of 4705 // the same node my be considered several times, though might be not 4706 // profitable. 4707 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4708 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 4709 for (const auto &Op : Data.second) { 4710 TreeEntry *OpTE = Op.second; 4711 if (!VisitedOps.insert(OpTE).second) 4712 continue; 4713 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4714 continue; 4715 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 4716 if (OpTE->State == TreeEntry::NeedToGather || 4717 !OpTE->ReuseShuffleIndices.empty()) 4718 return GathersToOrders.find(OpTE)->second; 4719 return OpTE->ReorderIndices; 4720 }(); 4721 unsigned NumOps = count_if( 4722 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 4723 return P.second == OpTE; 4724 }); 4725 // Postpone scatter orders. 4726 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4727 StridedVectorizeOrders.emplace_back(Order, NumOps); 4728 continue; 4729 } 4730 // Stores actually store the mask, not the order, need to invert. 4731 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4732 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4733 SmallVector<int> Mask; 4734 inversePermutation(Order, Mask); 4735 unsigned E = Order.size(); 4736 OrdersType CurrentOrder(E, E); 4737 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4738 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4739 }); 4740 fixupOrderingIndices(CurrentOrder); 4741 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 4742 NumOps; 4743 } else { 4744 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 4745 } 4746 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 4747 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 4748 const TreeEntry *TE) { 4749 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4750 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 4751 (IgnoreReorder && TE->Idx == 0)) 4752 return true; 4753 if (TE->State == TreeEntry::NeedToGather) { 4754 auto It = GathersToOrders.find(TE); 4755 if (It != GathersToOrders.end()) 4756 return !It->second.empty(); 4757 return true; 4758 } 4759 return false; 4760 }; 4761 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 4762 TreeEntry *UserTE = EI.UserTE; 4763 if (!VisitedUsers.insert(UserTE).second) 4764 continue; 4765 // May reorder user node if it requires reordering, has reused 4766 // scalars, is an alternate op vectorize node or its op nodes require 4767 // reordering. 4768 if (AllowsReordering(UserTE)) 4769 continue; 4770 // Check if users allow reordering. 4771 // Currently look up just 1 level of operands to avoid increase of 4772 // the compile time. 4773 // Profitable to reorder if definitely more operands allow 4774 // reordering rather than those with natural order. 4775 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 4776 if (static_cast<unsigned>(count_if( 4777 Ops, [UserTE, &AllowsReordering]( 4778 const std::pair<unsigned, TreeEntry *> &Op) { 4779 return AllowsReordering(Op.second) && 4780 all_of(Op.second->UserTreeIndices, 4781 [UserTE](const EdgeInfo &EI) { 4782 return EI.UserTE == UserTE; 4783 }); 4784 })) <= Ops.size() / 2) 4785 ++Res.first->second; 4786 } 4787 } 4788 // If no orders - skip current nodes and jump to the next one, if any. 4789 if (OrdersUses.empty()) { 4790 if (StridedVectorizeOrders.empty() || 4791 (Data.first->ReorderIndices.empty() && 4792 Data.first->ReuseShuffleIndices.empty() && 4793 !(IgnoreReorder && 4794 Data.first == VectorizableTree.front().get()))) { 4795 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4796 OrderedEntries.remove(Op.second); 4797 continue; 4798 } 4799 // Add (potentially!) strided vectorize orders. 4800 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) 4801 OrdersUses.insert(std::make_pair(Pair.first, 0)).first->second += 4802 Pair.second; 4803 } else { 4804 // Account (potentially!) strided vectorize orders only if it was used 4805 // already. 4806 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) { 4807 auto *It = OrdersUses.find(Pair.first); 4808 if (It != OrdersUses.end()) 4809 It->second += Pair.second; 4810 } 4811 } 4812 // Choose the best order. 4813 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4814 unsigned Cnt = OrdersUses.front().second; 4815 for (const auto &Pair : drop_begin(OrdersUses)) { 4816 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4817 BestOrder = Pair.first; 4818 Cnt = Pair.second; 4819 } 4820 } 4821 // Set order of the user node (reordering of operands and user nodes). 4822 if (BestOrder.empty()) { 4823 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4824 OrderedEntries.remove(Op.second); 4825 continue; 4826 } 4827 // Erase operands from OrderedEntries list and adjust their orders. 4828 VisitedOps.clear(); 4829 SmallVector<int> Mask; 4830 inversePermutation(BestOrder, Mask); 4831 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4832 unsigned E = BestOrder.size(); 4833 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4834 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4835 }); 4836 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4837 TreeEntry *TE = Op.second; 4838 OrderedEntries.remove(TE); 4839 if (!VisitedOps.insert(TE).second) 4840 continue; 4841 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4842 reorderNodeWithReuses(*TE, Mask); 4843 continue; 4844 } 4845 // Gathers are processed separately. 4846 if (TE->State != TreeEntry::Vectorize && 4847 TE->State != TreeEntry::PossibleStridedVectorize && 4848 (TE->State != TreeEntry::ScatterVectorize || 4849 TE->ReorderIndices.empty())) 4850 continue; 4851 assert((BestOrder.size() == TE->ReorderIndices.size() || 4852 TE->ReorderIndices.empty()) && 4853 "Non-matching sizes of user/operand entries."); 4854 reorderOrder(TE->ReorderIndices, Mask); 4855 if (IgnoreReorder && TE == VectorizableTree.front().get()) 4856 IgnoreReorder = false; 4857 } 4858 // For gathers just need to reorder its scalars. 4859 for (TreeEntry *Gather : GatherOps) { 4860 assert(Gather->ReorderIndices.empty() && 4861 "Unexpected reordering of gathers."); 4862 if (!Gather->ReuseShuffleIndices.empty()) { 4863 // Just reorder reuses indices. 4864 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4865 continue; 4866 } 4867 reorderScalars(Gather->Scalars, Mask); 4868 OrderedEntries.remove(Gather); 4869 } 4870 // Reorder operands of the user node and set the ordering for the user 4871 // node itself. 4872 if (Data.first->State != TreeEntry::Vectorize || 4873 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4874 Data.first->getMainOp()) || 4875 Data.first->isAltShuffle()) 4876 Data.first->reorderOperands(Mask); 4877 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4878 Data.first->isAltShuffle() || 4879 Data.first->State == TreeEntry::PossibleStridedVectorize) { 4880 reorderScalars(Data.first->Scalars, Mask); 4881 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4882 if (Data.first->ReuseShuffleIndices.empty() && 4883 !Data.first->ReorderIndices.empty() && 4884 !Data.first->isAltShuffle()) { 4885 // Insert user node to the list to try to sink reordering deeper in 4886 // the graph. 4887 OrderedEntries.insert(Data.first); 4888 } 4889 } else { 4890 reorderOrder(Data.first->ReorderIndices, Mask); 4891 } 4892 } 4893 } 4894 // If the reordering is unnecessary, just remove the reorder. 4895 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4896 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4897 VectorizableTree.front()->ReorderIndices.clear(); 4898 } 4899 4900 void BoUpSLP::buildExternalUses( 4901 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4902 // Collect the values that we need to extract from the tree. 4903 for (auto &TEPtr : VectorizableTree) { 4904 TreeEntry *Entry = TEPtr.get(); 4905 4906 // No need to handle users of gathered values. 4907 if (Entry->State == TreeEntry::NeedToGather) 4908 continue; 4909 4910 // For each lane: 4911 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4912 Value *Scalar = Entry->Scalars[Lane]; 4913 if (!isa<Instruction>(Scalar)) 4914 continue; 4915 int FoundLane = Entry->findLaneForValue(Scalar); 4916 4917 // Check if the scalar is externally used as an extra arg. 4918 const auto *ExtI = ExternallyUsedValues.find(Scalar); 4919 if (ExtI != ExternallyUsedValues.end()) { 4920 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4921 << Lane << " from " << *Scalar << ".\n"); 4922 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4923 } 4924 for (User *U : Scalar->users()) { 4925 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4926 4927 Instruction *UserInst = dyn_cast<Instruction>(U); 4928 if (!UserInst || isDeleted(UserInst)) 4929 continue; 4930 4931 // Ignore users in the user ignore list. 4932 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4933 continue; 4934 4935 // Skip in-tree scalars that become vectors 4936 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4937 // Some in-tree scalars will remain as scalar in vectorized 4938 // instructions. If that is the case, the one in FoundLane will 4939 // be used. 4940 if (UseEntry->State == TreeEntry::ScatterVectorize || 4941 UseEntry->State == TreeEntry::PossibleStridedVectorize || 4942 !doesInTreeUserNeedToExtract( 4943 Scalar, cast<Instruction>(UseEntry->Scalars.front()), TLI)) { 4944 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4945 << ".\n"); 4946 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4947 continue; 4948 } 4949 U = nullptr; 4950 } 4951 4952 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *UserInst 4953 << " from lane " << Lane << " from " << *Scalar 4954 << ".\n"); 4955 ExternalUses.emplace_back(Scalar, U, FoundLane); 4956 } 4957 } 4958 } 4959 } 4960 4961 DenseMap<Value *, SmallVector<StoreInst *>> 4962 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4963 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap; 4964 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4965 Value *V = TE->Scalars[Lane]; 4966 // To save compilation time we don't visit if we have too many users. 4967 static constexpr unsigned UsersLimit = 4; 4968 if (V->hasNUsesOrMore(UsersLimit)) 4969 break; 4970 4971 // Collect stores per pointer object. 4972 for (User *U : V->users()) { 4973 auto *SI = dyn_cast<StoreInst>(U); 4974 if (SI == nullptr || !SI->isSimple() || 4975 !isValidElementType(SI->getValueOperand()->getType())) 4976 continue; 4977 // Skip entry if already 4978 if (getTreeEntry(U)) 4979 continue; 4980 4981 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 4982 auto &StoresVec = PtrToStoresMap[Ptr]; 4983 // For now just keep one store per pointer object per lane. 4984 // TODO: Extend this to support multiple stores per pointer per lane 4985 if (StoresVec.size() > Lane) 4986 continue; 4987 // Skip if in different BBs. 4988 if (!StoresVec.empty() && 4989 SI->getParent() != StoresVec.back()->getParent()) 4990 continue; 4991 // Make sure that the stores are of the same type. 4992 if (!StoresVec.empty() && 4993 SI->getValueOperand()->getType() != 4994 StoresVec.back()->getValueOperand()->getType()) 4995 continue; 4996 StoresVec.push_back(SI); 4997 } 4998 } 4999 return PtrToStoresMap; 5000 } 5001 5002 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec, 5003 OrdersType &ReorderIndices) const { 5004 // We check whether the stores in StoreVec can form a vector by sorting them 5005 // and checking whether they are consecutive. 5006 5007 // To avoid calling getPointersDiff() while sorting we create a vector of 5008 // pairs {store, offset from first} and sort this instead. 5009 SmallVector<std::pair<StoreInst *, int>> StoreOffsetVec(StoresVec.size()); 5010 StoreInst *S0 = StoresVec[0]; 5011 StoreOffsetVec[0] = {S0, 0}; 5012 Type *S0Ty = S0->getValueOperand()->getType(); 5013 Value *S0Ptr = S0->getPointerOperand(); 5014 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 5015 StoreInst *SI = StoresVec[Idx]; 5016 std::optional<int> Diff = 5017 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 5018 SI->getPointerOperand(), *DL, *SE, 5019 /*StrictCheck=*/true); 5020 // We failed to compare the pointers so just abandon this StoresVec. 5021 if (!Diff) 5022 return false; 5023 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 5024 } 5025 5026 // Sort the vector based on the pointers. We create a copy because we may 5027 // need the original later for calculating the reorder (shuffle) indices. 5028 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 5029 const std::pair<StoreInst *, int> &Pair2) { 5030 int Offset1 = Pair1.second; 5031 int Offset2 = Pair2.second; 5032 return Offset1 < Offset2; 5033 }); 5034 5035 // Check if the stores are consecutive by checking if their difference is 1. 5036 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 5037 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx - 1].second + 1) 5038 return false; 5039 5040 // Calculate the shuffle indices according to their offset against the sorted 5041 // StoreOffsetVec. 5042 ReorderIndices.reserve(StoresVec.size()); 5043 for (StoreInst *SI : StoresVec) { 5044 unsigned Idx = find_if(StoreOffsetVec, 5045 [SI](const std::pair<StoreInst *, int> &Pair) { 5046 return Pair.first == SI; 5047 }) - 5048 StoreOffsetVec.begin(); 5049 ReorderIndices.push_back(Idx); 5050 } 5051 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 5052 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 5053 // same convention here. 5054 auto IsIdentityOrder = [](const OrdersType &Order) { 5055 for (unsigned Idx : seq<unsigned>(0, Order.size())) 5056 if (Idx != Order[Idx]) 5057 return false; 5058 return true; 5059 }; 5060 if (IsIdentityOrder(ReorderIndices)) 5061 ReorderIndices.clear(); 5062 5063 return true; 5064 } 5065 5066 #ifndef NDEBUG 5067 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 5068 for (unsigned Idx : Order) 5069 dbgs() << Idx << ", "; 5070 dbgs() << "\n"; 5071 } 5072 #endif 5073 5074 SmallVector<BoUpSLP::OrdersType, 1> 5075 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 5076 unsigned NumLanes = TE->Scalars.size(); 5077 5078 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap = 5079 collectUserStores(TE); 5080 5081 // Holds the reorder indices for each candidate store vector that is a user of 5082 // the current TreeEntry. 5083 SmallVector<OrdersType, 1> ExternalReorderIndices; 5084 5085 // Now inspect the stores collected per pointer and look for vectorization 5086 // candidates. For each candidate calculate the reorder index vector and push 5087 // it into `ExternalReorderIndices` 5088 for (const auto &Pair : PtrToStoresMap) { 5089 auto &StoresVec = Pair.second; 5090 // If we have fewer than NumLanes stores, then we can't form a vector. 5091 if (StoresVec.size() != NumLanes) 5092 continue; 5093 5094 // If the stores are not consecutive then abandon this StoresVec. 5095 OrdersType ReorderIndices; 5096 if (!canFormVector(StoresVec, ReorderIndices)) 5097 continue; 5098 5099 // We now know that the scalars in StoresVec can form a vector instruction, 5100 // so set the reorder indices. 5101 ExternalReorderIndices.push_back(ReorderIndices); 5102 } 5103 return ExternalReorderIndices; 5104 } 5105 5106 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 5107 const SmallDenseSet<Value *> &UserIgnoreLst) { 5108 deleteTree(); 5109 UserIgnoreList = &UserIgnoreLst; 5110 if (!allSameType(Roots)) 5111 return; 5112 buildTree_rec(Roots, 0, EdgeInfo()); 5113 } 5114 5115 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 5116 deleteTree(); 5117 if (!allSameType(Roots)) 5118 return; 5119 buildTree_rec(Roots, 0, EdgeInfo()); 5120 } 5121 5122 /// \return true if the specified list of values has only one instruction that 5123 /// requires scheduling, false otherwise. 5124 #ifndef NDEBUG 5125 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 5126 Value *NeedsScheduling = nullptr; 5127 for (Value *V : VL) { 5128 if (doesNotNeedToBeScheduled(V)) 5129 continue; 5130 if (!NeedsScheduling) { 5131 NeedsScheduling = V; 5132 continue; 5133 } 5134 return false; 5135 } 5136 return NeedsScheduling; 5137 } 5138 #endif 5139 5140 /// Generates key/subkey pair for the given value to provide effective sorting 5141 /// of the values and better detection of the vectorizable values sequences. The 5142 /// keys/subkeys can be used for better sorting of the values themselves (keys) 5143 /// and in values subgroups (subkeys). 5144 static std::pair<size_t, size_t> generateKeySubkey( 5145 Value *V, const TargetLibraryInfo *TLI, 5146 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 5147 bool AllowAlternate) { 5148 hash_code Key = hash_value(V->getValueID() + 2); 5149 hash_code SubKey = hash_value(0); 5150 // Sort the loads by the distance between the pointers. 5151 if (auto *LI = dyn_cast<LoadInst>(V)) { 5152 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key); 5153 if (LI->isSimple()) 5154 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 5155 else 5156 Key = SubKey = hash_value(LI); 5157 } else if (isVectorLikeInstWithConstOps(V)) { 5158 // Sort extracts by the vector operands. 5159 if (isa<ExtractElementInst, UndefValue>(V)) 5160 Key = hash_value(Value::UndefValueVal + 1); 5161 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 5162 if (!isUndefVector(EI->getVectorOperand()).all() && 5163 !isa<UndefValue>(EI->getIndexOperand())) 5164 SubKey = hash_value(EI->getVectorOperand()); 5165 } 5166 } else if (auto *I = dyn_cast<Instruction>(V)) { 5167 // Sort other instructions just by the opcodes except for CMPInst. 5168 // For CMP also sort by the predicate kind. 5169 if ((isa<BinaryOperator, CastInst>(I)) && 5170 isValidForAlternation(I->getOpcode())) { 5171 if (AllowAlternate) 5172 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 5173 else 5174 Key = hash_combine(hash_value(I->getOpcode()), Key); 5175 SubKey = hash_combine( 5176 hash_value(I->getOpcode()), hash_value(I->getType()), 5177 hash_value(isa<BinaryOperator>(I) 5178 ? I->getType() 5179 : cast<CastInst>(I)->getOperand(0)->getType())); 5180 // For casts, look through the only operand to improve compile time. 5181 if (isa<CastInst>(I)) { 5182 std::pair<size_t, size_t> OpVals = 5183 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator, 5184 /*AllowAlternate=*/true); 5185 Key = hash_combine(OpVals.first, Key); 5186 SubKey = hash_combine(OpVals.first, SubKey); 5187 } 5188 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 5189 CmpInst::Predicate Pred = CI->getPredicate(); 5190 if (CI->isCommutative()) 5191 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 5192 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 5193 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 5194 hash_value(SwapPred), 5195 hash_value(CI->getOperand(0)->getType())); 5196 } else if (auto *Call = dyn_cast<CallInst>(I)) { 5197 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 5198 if (isTriviallyVectorizable(ID)) { 5199 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 5200 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) { 5201 SubKey = hash_combine(hash_value(I->getOpcode()), 5202 hash_value(Call->getCalledFunction())); 5203 } else { 5204 Key = hash_combine(hash_value(Call), Key); 5205 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 5206 } 5207 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 5208 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 5209 hash_value(Op.Tag), SubKey); 5210 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 5211 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 5212 SubKey = hash_value(Gep->getPointerOperand()); 5213 else 5214 SubKey = hash_value(Gep); 5215 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 5216 !isa<ConstantInt>(I->getOperand(1))) { 5217 // Do not try to vectorize instructions with potentially high cost. 5218 SubKey = hash_value(I); 5219 } else { 5220 SubKey = hash_value(I->getOpcode()); 5221 } 5222 Key = hash_combine(hash_value(I->getParent()), Key); 5223 } 5224 return std::make_pair(Key, SubKey); 5225 } 5226 5227 /// Checks if the specified instruction \p I is an alternate operation for 5228 /// the given \p MainOp and \p AltOp instructions. 5229 static bool isAlternateInstruction(const Instruction *I, 5230 const Instruction *MainOp, 5231 const Instruction *AltOp, 5232 const TargetLibraryInfo &TLI); 5233 5234 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState( 5235 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 5236 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const { 5237 assert(S.MainOp && "Expected instructions with same/alternate opcodes only."); 5238 5239 unsigned ShuffleOrOp = 5240 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode(); 5241 auto *VL0 = cast<Instruction>(S.OpValue); 5242 switch (ShuffleOrOp) { 5243 case Instruction::PHI: { 5244 // Check for terminator values (e.g. invoke). 5245 for (Value *V : VL) 5246 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 5247 Instruction *Term = dyn_cast<Instruction>(Incoming); 5248 if (Term && Term->isTerminator()) { 5249 LLVM_DEBUG(dbgs() 5250 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 5251 return TreeEntry::NeedToGather; 5252 } 5253 } 5254 5255 return TreeEntry::Vectorize; 5256 } 5257 case Instruction::ExtractValue: 5258 case Instruction::ExtractElement: { 5259 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 5260 if (Reuse || !CurrentOrder.empty()) 5261 return TreeEntry::Vectorize; 5262 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 5263 return TreeEntry::NeedToGather; 5264 } 5265 case Instruction::InsertElement: { 5266 // Check that we have a buildvector and not a shuffle of 2 or more 5267 // different vectors. 5268 ValueSet SourceVectors; 5269 for (Value *V : VL) { 5270 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 5271 assert(getInsertIndex(V) != std::nullopt && 5272 "Non-constant or undef index?"); 5273 } 5274 5275 if (count_if(VL, [&SourceVectors](Value *V) { 5276 return !SourceVectors.contains(V); 5277 }) >= 2) { 5278 // Found 2nd source vector - cancel. 5279 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 5280 "different source vectors.\n"); 5281 return TreeEntry::NeedToGather; 5282 } 5283 5284 return TreeEntry::Vectorize; 5285 } 5286 case Instruction::Load: { 5287 // Check that a vectorized load would load the same memory as a scalar 5288 // load. For example, we don't want to vectorize loads that are smaller 5289 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5290 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5291 // from such a struct, we read/write packed bits disagreeing with the 5292 // unvectorized version. 5293 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder, 5294 PointerOps)) { 5295 case LoadsState::Vectorize: 5296 return TreeEntry::Vectorize; 5297 case LoadsState::ScatterVectorize: 5298 return TreeEntry::ScatterVectorize; 5299 case LoadsState::PossibleStridedVectorize: 5300 return TreeEntry::PossibleStridedVectorize; 5301 case LoadsState::Gather: 5302 #ifndef NDEBUG 5303 Type *ScalarTy = VL0->getType(); 5304 if (DL->getTypeSizeInBits(ScalarTy) != 5305 DL->getTypeAllocSizeInBits(ScalarTy)) 5306 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 5307 else if (any_of(VL, 5308 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); })) 5309 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 5310 else 5311 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 5312 #endif // NDEBUG 5313 return TreeEntry::NeedToGather; 5314 } 5315 llvm_unreachable("Unexpected state of loads"); 5316 } 5317 case Instruction::ZExt: 5318 case Instruction::SExt: 5319 case Instruction::FPToUI: 5320 case Instruction::FPToSI: 5321 case Instruction::FPExt: 5322 case Instruction::PtrToInt: 5323 case Instruction::IntToPtr: 5324 case Instruction::SIToFP: 5325 case Instruction::UIToFP: 5326 case Instruction::Trunc: 5327 case Instruction::FPTrunc: 5328 case Instruction::BitCast: { 5329 Type *SrcTy = VL0->getOperand(0)->getType(); 5330 for (Value *V : VL) { 5331 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 5332 if (Ty != SrcTy || !isValidElementType(Ty)) { 5333 LLVM_DEBUG( 5334 dbgs() << "SLP: Gathering casts with different src types.\n"); 5335 return TreeEntry::NeedToGather; 5336 } 5337 } 5338 return TreeEntry::Vectorize; 5339 } 5340 case Instruction::ICmp: 5341 case Instruction::FCmp: { 5342 // Check that all of the compares have the same predicate. 5343 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5344 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 5345 Type *ComparedTy = VL0->getOperand(0)->getType(); 5346 for (Value *V : VL) { 5347 CmpInst *Cmp = cast<CmpInst>(V); 5348 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 5349 Cmp->getOperand(0)->getType() != ComparedTy) { 5350 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 5351 return TreeEntry::NeedToGather; 5352 } 5353 } 5354 return TreeEntry::Vectorize; 5355 } 5356 case Instruction::Select: 5357 case Instruction::FNeg: 5358 case Instruction::Add: 5359 case Instruction::FAdd: 5360 case Instruction::Sub: 5361 case Instruction::FSub: 5362 case Instruction::Mul: 5363 case Instruction::FMul: 5364 case Instruction::UDiv: 5365 case Instruction::SDiv: 5366 case Instruction::FDiv: 5367 case Instruction::URem: 5368 case Instruction::SRem: 5369 case Instruction::FRem: 5370 case Instruction::Shl: 5371 case Instruction::LShr: 5372 case Instruction::AShr: 5373 case Instruction::And: 5374 case Instruction::Or: 5375 case Instruction::Xor: 5376 return TreeEntry::Vectorize; 5377 case Instruction::GetElementPtr: { 5378 // We don't combine GEPs with complicated (nested) indexing. 5379 for (Value *V : VL) { 5380 auto *I = dyn_cast<GetElementPtrInst>(V); 5381 if (!I) 5382 continue; 5383 if (I->getNumOperands() != 2) { 5384 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5385 return TreeEntry::NeedToGather; 5386 } 5387 } 5388 5389 // We can't combine several GEPs into one vector if they operate on 5390 // different types. 5391 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5392 for (Value *V : VL) { 5393 auto *GEP = dyn_cast<GEPOperator>(V); 5394 if (!GEP) 5395 continue; 5396 Type *CurTy = GEP->getSourceElementType(); 5397 if (Ty0 != CurTy) { 5398 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 5399 return TreeEntry::NeedToGather; 5400 } 5401 } 5402 5403 // We don't combine GEPs with non-constant indexes. 5404 Type *Ty1 = VL0->getOperand(1)->getType(); 5405 for (Value *V : VL) { 5406 auto *I = dyn_cast<GetElementPtrInst>(V); 5407 if (!I) 5408 continue; 5409 auto *Op = I->getOperand(1); 5410 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5411 (Op->getType() != Ty1 && 5412 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5413 Op->getType()->getScalarSizeInBits() > 5414 DL->getIndexSizeInBits( 5415 V->getType()->getPointerAddressSpace())))) { 5416 LLVM_DEBUG( 5417 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5418 return TreeEntry::NeedToGather; 5419 } 5420 } 5421 5422 return TreeEntry::Vectorize; 5423 } 5424 case Instruction::Store: { 5425 // Check if the stores are consecutive or if we need to swizzle them. 5426 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5427 // Avoid types that are padded when being allocated as scalars, while 5428 // being packed together in a vector (such as i1). 5429 if (DL->getTypeSizeInBits(ScalarTy) != 5430 DL->getTypeAllocSizeInBits(ScalarTy)) { 5431 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5432 return TreeEntry::NeedToGather; 5433 } 5434 // Make sure all stores in the bundle are simple - we can't vectorize 5435 // atomic or volatile stores. 5436 for (Value *V : VL) { 5437 auto *SI = cast<StoreInst>(V); 5438 if (!SI->isSimple()) { 5439 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5440 return TreeEntry::NeedToGather; 5441 } 5442 PointerOps.push_back(SI->getPointerOperand()); 5443 } 5444 5445 // Check the order of pointer operands. 5446 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5447 Value *Ptr0; 5448 Value *PtrN; 5449 if (CurrentOrder.empty()) { 5450 Ptr0 = PointerOps.front(); 5451 PtrN = PointerOps.back(); 5452 } else { 5453 Ptr0 = PointerOps[CurrentOrder.front()]; 5454 PtrN = PointerOps[CurrentOrder.back()]; 5455 } 5456 std::optional<int> Dist = 5457 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5458 // Check that the sorted pointer operands are consecutive. 5459 if (static_cast<unsigned>(*Dist) == VL.size() - 1) 5460 return TreeEntry::Vectorize; 5461 } 5462 5463 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5464 return TreeEntry::NeedToGather; 5465 } 5466 case Instruction::Call: { 5467 // Check if the calls are all to the same vectorizable intrinsic or 5468 // library function. 5469 CallInst *CI = cast<CallInst>(VL0); 5470 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5471 5472 VFShape Shape = VFShape::get( 5473 CI->getFunctionType(), 5474 ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5475 false /*HasGlobalPred*/); 5476 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5477 5478 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5479 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5480 return TreeEntry::NeedToGather; 5481 } 5482 Function *F = CI->getCalledFunction(); 5483 unsigned NumArgs = CI->arg_size(); 5484 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr); 5485 for (unsigned J = 0; J != NumArgs; ++J) 5486 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) 5487 ScalarArgs[J] = CI->getArgOperand(J); 5488 for (Value *V : VL) { 5489 CallInst *CI2 = dyn_cast<CallInst>(V); 5490 if (!CI2 || CI2->getCalledFunction() != F || 5491 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5492 (VecFunc && 5493 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5494 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5495 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5496 << "\n"); 5497 return TreeEntry::NeedToGather; 5498 } 5499 // Some intrinsics have scalar arguments and should be same in order for 5500 // them to be vectorized. 5501 for (unsigned J = 0; J != NumArgs; ++J) { 5502 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) { 5503 Value *A1J = CI2->getArgOperand(J); 5504 if (ScalarArgs[J] != A1J) { 5505 LLVM_DEBUG(dbgs() 5506 << "SLP: mismatched arguments in call:" << *CI 5507 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n"); 5508 return TreeEntry::NeedToGather; 5509 } 5510 } 5511 } 5512 // Verify that the bundle operands are identical between the two calls. 5513 if (CI->hasOperandBundles() && 5514 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5515 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5516 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5517 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI 5518 << "!=" << *V << '\n'); 5519 return TreeEntry::NeedToGather; 5520 } 5521 } 5522 5523 return TreeEntry::Vectorize; 5524 } 5525 case Instruction::ShuffleVector: { 5526 // If this is not an alternate sequence of opcode like add-sub 5527 // then do not vectorize this instruction. 5528 if (!S.isAltShuffle()) { 5529 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5530 return TreeEntry::NeedToGather; 5531 } 5532 return TreeEntry::Vectorize; 5533 } 5534 default: 5535 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5536 return TreeEntry::NeedToGather; 5537 } 5538 } 5539 5540 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 5541 const EdgeInfo &UserTreeIdx) { 5542 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 5543 5544 SmallVector<int> ReuseShuffleIndicies; 5545 SmallVector<Value *> UniqueValues; 5546 SmallVector<Value *> NonUniqueValueVL; 5547 auto TryToFindDuplicates = [&](const InstructionsState &S, 5548 bool DoNotFail = false) { 5549 // Check that every instruction appears once in this bundle. 5550 DenseMap<Value *, unsigned> UniquePositions(VL.size()); 5551 for (Value *V : VL) { 5552 if (isConstant(V)) { 5553 ReuseShuffleIndicies.emplace_back( 5554 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size()); 5555 UniqueValues.emplace_back(V); 5556 continue; 5557 } 5558 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5559 ReuseShuffleIndicies.emplace_back(Res.first->second); 5560 if (Res.second) 5561 UniqueValues.emplace_back(V); 5562 } 5563 size_t NumUniqueScalarValues = UniqueValues.size(); 5564 if (NumUniqueScalarValues == VL.size()) { 5565 ReuseShuffleIndicies.clear(); 5566 } else { 5567 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 5568 if (NumUniqueScalarValues <= 1 || 5569 (UniquePositions.size() == 1 && all_of(UniqueValues, 5570 [](Value *V) { 5571 return isa<UndefValue>(V) || 5572 !isConstant(V); 5573 })) || 5574 !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) { 5575 if (DoNotFail && UniquePositions.size() > 1 && 5576 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() && 5577 all_of(UniqueValues, [=](Value *V) { 5578 return isa<ExtractElementInst>(V) || 5579 areAllUsersVectorized(cast<Instruction>(V), 5580 UserIgnoreList); 5581 })) { 5582 unsigned PWSz = PowerOf2Ceil(UniqueValues.size()); 5583 if (PWSz == VL.size()) { 5584 ReuseShuffleIndicies.clear(); 5585 } else { 5586 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end()); 5587 NonUniqueValueVL.append(PWSz - UniqueValues.size(), 5588 UniqueValues.back()); 5589 VL = NonUniqueValueVL; 5590 } 5591 return true; 5592 } 5593 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 5594 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5595 return false; 5596 } 5597 VL = UniqueValues; 5598 } 5599 return true; 5600 }; 5601 5602 InstructionsState S = getSameOpcode(VL, *TLI); 5603 5604 // Don't vectorize ephemeral values. 5605 if (!EphValues.empty()) { 5606 for (Value *V : VL) { 5607 if (EphValues.count(V)) { 5608 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5609 << ") is ephemeral.\n"); 5610 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5611 return; 5612 } 5613 } 5614 } 5615 5616 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of 5617 // a load), in which case peek through to include it in the tree, without 5618 // ballooning over-budget. 5619 if (Depth >= RecursionMaxDepth && 5620 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp && 5621 VL.size() >= 4 && 5622 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) { 5623 return match(I, 5624 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) && 5625 cast<Instruction>(I)->getOpcode() == 5626 cast<Instruction>(S.MainOp)->getOpcode(); 5627 })))) { 5628 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 5629 if (TryToFindDuplicates(S)) 5630 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5631 ReuseShuffleIndicies); 5632 return; 5633 } 5634 5635 // Don't handle scalable vectors 5636 if (S.getOpcode() == Instruction::ExtractElement && 5637 isa<ScalableVectorType>( 5638 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 5639 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 5640 if (TryToFindDuplicates(S)) 5641 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5642 ReuseShuffleIndicies); 5643 return; 5644 } 5645 5646 // Don't handle vectors. 5647 if (S.OpValue->getType()->isVectorTy() && 5648 !isa<InsertElementInst>(S.OpValue)) { 5649 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 5650 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5651 return; 5652 } 5653 5654 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 5655 if (SI->getValueOperand()->getType()->isVectorTy()) { 5656 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 5657 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5658 return; 5659 } 5660 5661 // If all of the operands are identical or constant we have a simple solution. 5662 // If we deal with insert/extract instructions, they all must have constant 5663 // indices, otherwise we should gather them, not try to vectorize. 5664 // If alternate op node with 2 elements with gathered operands - do not 5665 // vectorize. 5666 auto &&NotProfitableForVectorization = [&S, this, 5667 Depth](ArrayRef<Value *> VL) { 5668 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 5669 return false; 5670 if (VectorizableTree.size() < MinTreeSize) 5671 return false; 5672 if (Depth >= RecursionMaxDepth - 1) 5673 return true; 5674 // Check if all operands are extracts, part of vector node or can build a 5675 // regular vectorize node. 5676 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 5677 for (Value *V : VL) { 5678 auto *I = cast<Instruction>(V); 5679 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 5680 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 5681 })); 5682 } 5683 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 5684 if ((IsCommutative && 5685 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 5686 (!IsCommutative && 5687 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 5688 return true; 5689 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 5690 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 5691 auto *I1 = cast<Instruction>(VL.front()); 5692 auto *I2 = cast<Instruction>(VL.back()); 5693 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5694 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5695 I2->getOperand(Op)); 5696 if (static_cast<unsigned>(count_if( 5697 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5698 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5699 })) >= S.MainOp->getNumOperands() / 2) 5700 return false; 5701 if (S.MainOp->getNumOperands() > 2) 5702 return true; 5703 if (IsCommutative) { 5704 // Check permuted operands. 5705 Candidates.clear(); 5706 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5707 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5708 I2->getOperand((Op + 1) % E)); 5709 if (any_of( 5710 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5711 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5712 })) 5713 return false; 5714 } 5715 return true; 5716 }; 5717 SmallVector<unsigned> SortedIndices; 5718 BasicBlock *BB = nullptr; 5719 bool IsScatterVectorizeUserTE = 5720 UserTreeIdx.UserTE && 5721 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5722 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize); 5723 bool AreAllSameInsts = 5724 (S.getOpcode() && allSameBlock(VL)) || 5725 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE && 5726 VL.size() > 2 && 5727 all_of(VL, 5728 [&BB](Value *V) { 5729 auto *I = dyn_cast<GetElementPtrInst>(V); 5730 if (!I) 5731 return doesNotNeedToBeScheduled(V); 5732 if (!BB) 5733 BB = I->getParent(); 5734 return BB == I->getParent() && I->getNumOperands() == 2; 5735 }) && 5736 BB && 5737 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE, 5738 SortedIndices)); 5739 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) || 5740 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>( 5741 S.OpValue) && 5742 !all_of(VL, isVectorLikeInstWithConstOps)) || 5743 NotProfitableForVectorization(VL)) { 5744 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 5745 if (TryToFindDuplicates(S)) 5746 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5747 ReuseShuffleIndicies); 5748 return; 5749 } 5750 5751 // We now know that this is a vector of instructions of the same type from 5752 // the same block. 5753 5754 // Check if this is a duplicate of another entry. 5755 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 5756 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 5757 if (!E->isSame(VL)) { 5758 auto It = MultiNodeScalars.find(S.OpValue); 5759 if (It != MultiNodeScalars.end()) { 5760 auto *TEIt = find_if(It->getSecond(), 5761 [&](TreeEntry *ME) { return ME->isSame(VL); }); 5762 if (TEIt != It->getSecond().end()) 5763 E = *TEIt; 5764 else 5765 E = nullptr; 5766 } else { 5767 E = nullptr; 5768 } 5769 } 5770 if (!E) { 5771 if (!doesNotNeedToBeScheduled(S.OpValue)) { 5772 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 5773 if (TryToFindDuplicates(S)) 5774 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5775 ReuseShuffleIndicies); 5776 return; 5777 } 5778 } else { 5779 // Record the reuse of the tree node. FIXME, currently this is only used 5780 // to properly draw the graph rather than for the actual vectorization. 5781 E->UserTreeIndices.push_back(UserTreeIdx); 5782 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 5783 << ".\n"); 5784 return; 5785 } 5786 } 5787 5788 // Check that none of the instructions in the bundle are already in the tree. 5789 for (Value *V : VL) { 5790 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) || 5791 doesNotNeedToBeScheduled(V)) 5792 continue; 5793 if (getTreeEntry(V)) { 5794 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5795 << ") is already in tree.\n"); 5796 if (TryToFindDuplicates(S)) 5797 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5798 ReuseShuffleIndicies); 5799 return; 5800 } 5801 } 5802 5803 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 5804 if (UserIgnoreList && !UserIgnoreList->empty()) { 5805 for (Value *V : VL) { 5806 if (UserIgnoreList && UserIgnoreList->contains(V)) { 5807 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 5808 if (TryToFindDuplicates(S)) 5809 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5810 ReuseShuffleIndicies); 5811 return; 5812 } 5813 } 5814 } 5815 5816 // Special processing for sorted pointers for ScatterVectorize node with 5817 // constant indeces only. 5818 if (AreAllSameInsts && UserTreeIdx.UserTE && 5819 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5820 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize) && 5821 !(S.getOpcode() && allSameBlock(VL))) { 5822 assert(S.OpValue->getType()->isPointerTy() && 5823 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 5824 2 && 5825 "Expected pointers only."); 5826 // Reset S to make it GetElementPtr kind of node. 5827 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 5828 assert(It != VL.end() && "Expected at least one GEP."); 5829 S = getSameOpcode(*It, *TLI); 5830 } 5831 5832 // Check that all of the users of the scalars that we want to vectorize are 5833 // schedulable. 5834 auto *VL0 = cast<Instruction>(S.OpValue); 5835 BB = VL0->getParent(); 5836 5837 if (!DT->isReachableFromEntry(BB)) { 5838 // Don't go into unreachable blocks. They may contain instructions with 5839 // dependency cycles which confuse the final scheduling. 5840 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 5841 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5842 return; 5843 } 5844 5845 // Don't go into catchswitch blocks, which can happen with PHIs. 5846 // Such blocks can only have PHIs and the catchswitch. There is no 5847 // place to insert a shuffle if we need to, so just avoid that issue. 5848 if (isa<CatchSwitchInst>(BB->getTerminator())) { 5849 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n"); 5850 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5851 return; 5852 } 5853 5854 // Check that every instruction appears once in this bundle. 5855 if (!TryToFindDuplicates(S, /*DoNotFail=*/true)) 5856 return; 5857 5858 // Perform specific checks for each particular instruction kind. 5859 OrdersType CurrentOrder; 5860 SmallVector<Value *> PointerOps; 5861 TreeEntry::EntryState State = getScalarsVectorizationState( 5862 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps); 5863 if (State == TreeEntry::NeedToGather) { 5864 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5865 ReuseShuffleIndicies); 5866 return; 5867 } 5868 5869 auto &BSRef = BlocksSchedules[BB]; 5870 if (!BSRef) 5871 BSRef = std::make_unique<BlockScheduling>(BB); 5872 5873 BlockScheduling &BS = *BSRef; 5874 5875 std::optional<ScheduleData *> Bundle = 5876 BS.tryScheduleBundle(UniqueValues, this, S); 5877 #ifdef EXPENSIVE_CHECKS 5878 // Make sure we didn't break any internal invariants 5879 BS.verify(); 5880 #endif 5881 if (!Bundle) { 5882 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 5883 assert((!BS.getScheduleData(VL0) || 5884 !BS.getScheduleData(VL0)->isPartOfBundle()) && 5885 "tryScheduleBundle should cancelScheduling on failure"); 5886 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5887 ReuseShuffleIndicies); 5888 return; 5889 } 5890 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 5891 5892 unsigned ShuffleOrOp = S.isAltShuffle() ? 5893 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 5894 switch (ShuffleOrOp) { 5895 case Instruction::PHI: { 5896 auto *PH = cast<PHINode>(VL0); 5897 5898 TreeEntry *TE = 5899 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 5900 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 5901 5902 // Keeps the reordered operands to avoid code duplication. 5903 SmallVector<ValueList, 2> OperandsVec; 5904 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 5905 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 5906 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 5907 TE->setOperand(I, Operands); 5908 OperandsVec.push_back(Operands); 5909 continue; 5910 } 5911 ValueList Operands; 5912 // Prepare the operand vector. 5913 for (Value *V : VL) 5914 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 5915 PH->getIncomingBlock(I))); 5916 TE->setOperand(I, Operands); 5917 OperandsVec.push_back(Operands); 5918 } 5919 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 5920 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 5921 return; 5922 } 5923 case Instruction::ExtractValue: 5924 case Instruction::ExtractElement: { 5925 if (CurrentOrder.empty()) { 5926 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 5927 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5928 ReuseShuffleIndicies); 5929 // This is a special case, as it does not gather, but at the same time 5930 // we are not extending buildTree_rec() towards the operands. 5931 ValueList Op0; 5932 Op0.assign(VL.size(), VL0->getOperand(0)); 5933 VectorizableTree.back()->setOperand(0, Op0); 5934 return; 5935 } 5936 LLVM_DEBUG({ 5937 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 5938 "with order"; 5939 for (unsigned Idx : CurrentOrder) 5940 dbgs() << " " << Idx; 5941 dbgs() << "\n"; 5942 }); 5943 fixupOrderingIndices(CurrentOrder); 5944 // Insert new order with initial value 0, if it does not exist, 5945 // otherwise return the iterator to the existing one. 5946 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5947 ReuseShuffleIndicies, CurrentOrder); 5948 // This is a special case, as it does not gather, but at the same time 5949 // we are not extending buildTree_rec() towards the operands. 5950 ValueList Op0; 5951 Op0.assign(VL.size(), VL0->getOperand(0)); 5952 VectorizableTree.back()->setOperand(0, Op0); 5953 return; 5954 } 5955 case Instruction::InsertElement: { 5956 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 5957 5958 auto OrdCompare = [](const std::pair<int, int> &P1, 5959 const std::pair<int, int> &P2) { 5960 return P1.first > P2.first; 5961 }; 5962 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 5963 decltype(OrdCompare)> 5964 Indices(OrdCompare); 5965 for (int I = 0, E = VL.size(); I < E; ++I) { 5966 unsigned Idx = *getInsertIndex(VL[I]); 5967 Indices.emplace(Idx, I); 5968 } 5969 OrdersType CurrentOrder(VL.size(), VL.size()); 5970 bool IsIdentity = true; 5971 for (int I = 0, E = VL.size(); I < E; ++I) { 5972 CurrentOrder[Indices.top().second] = I; 5973 IsIdentity &= Indices.top().second == I; 5974 Indices.pop(); 5975 } 5976 if (IsIdentity) 5977 CurrentOrder.clear(); 5978 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5979 std::nullopt, CurrentOrder); 5980 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 5981 5982 constexpr int NumOps = 2; 5983 ValueList VectorOperands[NumOps]; 5984 for (int I = 0; I < NumOps; ++I) { 5985 for (Value *V : VL) 5986 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 5987 5988 TE->setOperand(I, VectorOperands[I]); 5989 } 5990 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 5991 return; 5992 } 5993 case Instruction::Load: { 5994 // Check that a vectorized load would load the same memory as a scalar 5995 // load. For example, we don't want to vectorize loads that are smaller 5996 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5997 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5998 // from such a struct, we read/write packed bits disagreeing with the 5999 // unvectorized version. 6000 TreeEntry *TE = nullptr; 6001 fixupOrderingIndices(CurrentOrder); 6002 switch (State) { 6003 case TreeEntry::Vectorize: 6004 if (CurrentOrder.empty()) { 6005 // Original loads are consecutive and does not require reordering. 6006 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6007 ReuseShuffleIndicies); 6008 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 6009 } else { 6010 // Need to reorder. 6011 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6012 ReuseShuffleIndicies, CurrentOrder); 6013 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 6014 } 6015 TE->setOperandsInOrder(); 6016 break; 6017 case TreeEntry::PossibleStridedVectorize: 6018 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6019 if (CurrentOrder.empty()) { 6020 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6021 UserTreeIdx, ReuseShuffleIndicies); 6022 } else { 6023 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6024 UserTreeIdx, ReuseShuffleIndicies, CurrentOrder); 6025 } 6026 TE->setOperandsInOrder(); 6027 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6028 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6029 break; 6030 case TreeEntry::ScatterVectorize: 6031 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6032 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 6033 UserTreeIdx, ReuseShuffleIndicies); 6034 TE->setOperandsInOrder(); 6035 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6036 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6037 break; 6038 case TreeEntry::NeedToGather: 6039 llvm_unreachable("Unexpected loads state."); 6040 } 6041 return; 6042 } 6043 case Instruction::ZExt: 6044 case Instruction::SExt: 6045 case Instruction::FPToUI: 6046 case Instruction::FPToSI: 6047 case Instruction::FPExt: 6048 case Instruction::PtrToInt: 6049 case Instruction::IntToPtr: 6050 case Instruction::SIToFP: 6051 case Instruction::UIToFP: 6052 case Instruction::Trunc: 6053 case Instruction::FPTrunc: 6054 case Instruction::BitCast: { 6055 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6056 ReuseShuffleIndicies); 6057 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 6058 6059 TE->setOperandsInOrder(); 6060 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6061 ValueList Operands; 6062 // Prepare the operand vector. 6063 for (Value *V : VL) 6064 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6065 6066 buildTree_rec(Operands, Depth + 1, {TE, I}); 6067 } 6068 return; 6069 } 6070 case Instruction::ICmp: 6071 case Instruction::FCmp: { 6072 // Check that all of the compares have the same predicate. 6073 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6074 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6075 ReuseShuffleIndicies); 6076 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 6077 6078 ValueList Left, Right; 6079 if (cast<CmpInst>(VL0)->isCommutative()) { 6080 // Commutative predicate - collect + sort operands of the instructions 6081 // so that each side is more likely to have the same opcode. 6082 assert(P0 == CmpInst::getSwappedPredicate(P0) && 6083 "Commutative Predicate mismatch"); 6084 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6085 } else { 6086 // Collect operands - commute if it uses the swapped predicate. 6087 for (Value *V : VL) { 6088 auto *Cmp = cast<CmpInst>(V); 6089 Value *LHS = Cmp->getOperand(0); 6090 Value *RHS = Cmp->getOperand(1); 6091 if (Cmp->getPredicate() != P0) 6092 std::swap(LHS, RHS); 6093 Left.push_back(LHS); 6094 Right.push_back(RHS); 6095 } 6096 } 6097 TE->setOperand(0, Left); 6098 TE->setOperand(1, Right); 6099 buildTree_rec(Left, Depth + 1, {TE, 0}); 6100 buildTree_rec(Right, Depth + 1, {TE, 1}); 6101 return; 6102 } 6103 case Instruction::Select: 6104 case Instruction::FNeg: 6105 case Instruction::Add: 6106 case Instruction::FAdd: 6107 case Instruction::Sub: 6108 case Instruction::FSub: 6109 case Instruction::Mul: 6110 case Instruction::FMul: 6111 case Instruction::UDiv: 6112 case Instruction::SDiv: 6113 case Instruction::FDiv: 6114 case Instruction::URem: 6115 case Instruction::SRem: 6116 case Instruction::FRem: 6117 case Instruction::Shl: 6118 case Instruction::LShr: 6119 case Instruction::AShr: 6120 case Instruction::And: 6121 case Instruction::Or: 6122 case Instruction::Xor: { 6123 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6124 ReuseShuffleIndicies); 6125 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 6126 6127 // Sort operands of the instructions so that each side is more likely to 6128 // have the same opcode. 6129 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 6130 ValueList Left, Right; 6131 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6132 TE->setOperand(0, Left); 6133 TE->setOperand(1, Right); 6134 buildTree_rec(Left, Depth + 1, {TE, 0}); 6135 buildTree_rec(Right, Depth + 1, {TE, 1}); 6136 return; 6137 } 6138 6139 TE->setOperandsInOrder(); 6140 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6141 ValueList Operands; 6142 // Prepare the operand vector. 6143 for (Value *V : VL) 6144 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6145 6146 buildTree_rec(Operands, Depth + 1, {TE, I}); 6147 } 6148 return; 6149 } 6150 case Instruction::GetElementPtr: { 6151 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6152 ReuseShuffleIndicies); 6153 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 6154 SmallVector<ValueList, 2> Operands(2); 6155 // Prepare the operand vector for pointer operands. 6156 for (Value *V : VL) { 6157 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6158 if (!GEP) { 6159 Operands.front().push_back(V); 6160 continue; 6161 } 6162 Operands.front().push_back(GEP->getPointerOperand()); 6163 } 6164 TE->setOperand(0, Operands.front()); 6165 // Need to cast all indices to the same type before vectorization to 6166 // avoid crash. 6167 // Required to be able to find correct matches between different gather 6168 // nodes and reuse the vectorized values rather than trying to gather them 6169 // again. 6170 int IndexIdx = 1; 6171 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 6172 Type *Ty = all_of(VL, 6173 [VL0Ty, IndexIdx](Value *V) { 6174 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6175 if (!GEP) 6176 return true; 6177 return VL0Ty == GEP->getOperand(IndexIdx)->getType(); 6178 }) 6179 ? VL0Ty 6180 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6181 ->getPointerOperandType() 6182 ->getScalarType()); 6183 // Prepare the operand vector. 6184 for (Value *V : VL) { 6185 auto *I = dyn_cast<GetElementPtrInst>(V); 6186 if (!I) { 6187 Operands.back().push_back( 6188 ConstantInt::get(Ty, 0, /*isSigned=*/false)); 6189 continue; 6190 } 6191 auto *Op = I->getOperand(IndexIdx); 6192 auto *CI = dyn_cast<ConstantInt>(Op); 6193 if (!CI) 6194 Operands.back().push_back(Op); 6195 else 6196 Operands.back().push_back(ConstantFoldIntegerCast( 6197 CI, Ty, CI->getValue().isSignBitSet(), *DL)); 6198 } 6199 TE->setOperand(IndexIdx, Operands.back()); 6200 6201 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 6202 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 6203 return; 6204 } 6205 case Instruction::Store: { 6206 // Check if the stores are consecutive or if we need to swizzle them. 6207 ValueList Operands(VL.size()); 6208 auto *OIter = Operands.begin(); 6209 for (Value *V : VL) { 6210 auto *SI = cast<StoreInst>(V); 6211 *OIter = SI->getValueOperand(); 6212 ++OIter; 6213 } 6214 // Check that the sorted pointer operands are consecutive. 6215 if (CurrentOrder.empty()) { 6216 // Original stores are consecutive and does not require reordering. 6217 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6218 ReuseShuffleIndicies); 6219 TE->setOperandsInOrder(); 6220 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6221 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 6222 } else { 6223 fixupOrderingIndices(CurrentOrder); 6224 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6225 ReuseShuffleIndicies, CurrentOrder); 6226 TE->setOperandsInOrder(); 6227 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6228 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 6229 } 6230 return; 6231 } 6232 case Instruction::Call: { 6233 // Check if the calls are all to the same vectorizable intrinsic or 6234 // library function. 6235 CallInst *CI = cast<CallInst>(VL0); 6236 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6237 6238 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6239 ReuseShuffleIndicies); 6240 TE->setOperandsInOrder(); 6241 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 6242 // For scalar operands no need to create an entry since no need to 6243 // vectorize it. 6244 if (isVectorIntrinsicWithScalarOpAtArg(ID, I)) 6245 continue; 6246 ValueList Operands; 6247 // Prepare the operand vector. 6248 for (Value *V : VL) { 6249 auto *CI2 = cast<CallInst>(V); 6250 Operands.push_back(CI2->getArgOperand(I)); 6251 } 6252 buildTree_rec(Operands, Depth + 1, {TE, I}); 6253 } 6254 return; 6255 } 6256 case Instruction::ShuffleVector: { 6257 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6258 ReuseShuffleIndicies); 6259 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 6260 6261 // Reorder operands if reordering would enable vectorization. 6262 auto *CI = dyn_cast<CmpInst>(VL0); 6263 if (isa<BinaryOperator>(VL0) || CI) { 6264 ValueList Left, Right; 6265 if (!CI || all_of(VL, [](Value *V) { 6266 return cast<CmpInst>(V)->isCommutative(); 6267 })) { 6268 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, 6269 *this); 6270 } else { 6271 auto *MainCI = cast<CmpInst>(S.MainOp); 6272 auto *AltCI = cast<CmpInst>(S.AltOp); 6273 CmpInst::Predicate MainP = MainCI->getPredicate(); 6274 CmpInst::Predicate AltP = AltCI->getPredicate(); 6275 assert(MainP != AltP && 6276 "Expected different main/alternate predicates."); 6277 // Collect operands - commute if it uses the swapped predicate or 6278 // alternate operation. 6279 for (Value *V : VL) { 6280 auto *Cmp = cast<CmpInst>(V); 6281 Value *LHS = Cmp->getOperand(0); 6282 Value *RHS = Cmp->getOperand(1); 6283 6284 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) { 6285 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6286 std::swap(LHS, RHS); 6287 } else { 6288 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6289 std::swap(LHS, RHS); 6290 } 6291 Left.push_back(LHS); 6292 Right.push_back(RHS); 6293 } 6294 } 6295 TE->setOperand(0, Left); 6296 TE->setOperand(1, Right); 6297 buildTree_rec(Left, Depth + 1, {TE, 0}); 6298 buildTree_rec(Right, Depth + 1, {TE, 1}); 6299 return; 6300 } 6301 6302 TE->setOperandsInOrder(); 6303 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6304 ValueList Operands; 6305 // Prepare the operand vector. 6306 for (Value *V : VL) 6307 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6308 6309 buildTree_rec(Operands, Depth + 1, {TE, I}); 6310 } 6311 return; 6312 } 6313 default: 6314 break; 6315 } 6316 llvm_unreachable("Unexpected vectorization of the instructions."); 6317 } 6318 6319 unsigned BoUpSLP::canMapToVector(Type *T) const { 6320 unsigned N = 1; 6321 Type *EltTy = T; 6322 6323 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) { 6324 if (auto *ST = dyn_cast<StructType>(EltTy)) { 6325 // Check that struct is homogeneous. 6326 for (const auto *Ty : ST->elements()) 6327 if (Ty != *ST->element_begin()) 6328 return 0; 6329 N *= ST->getNumElements(); 6330 EltTy = *ST->element_begin(); 6331 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 6332 N *= AT->getNumElements(); 6333 EltTy = AT->getElementType(); 6334 } else { 6335 auto *VT = cast<FixedVectorType>(EltTy); 6336 N *= VT->getNumElements(); 6337 EltTy = VT->getElementType(); 6338 } 6339 } 6340 6341 if (!isValidElementType(EltTy)) 6342 return 0; 6343 uint64_t VTSize = DL->getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 6344 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || 6345 VTSize != DL->getTypeStoreSizeInBits(T)) 6346 return 0; 6347 return N; 6348 } 6349 6350 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 6351 SmallVectorImpl<unsigned> &CurrentOrder, 6352 bool ResizeAllowed) const { 6353 const auto *It = find_if(VL, [](Value *V) { 6354 return isa<ExtractElementInst, ExtractValueInst>(V); 6355 }); 6356 assert(It != VL.end() && "Expected at least one extract instruction."); 6357 auto *E0 = cast<Instruction>(*It); 6358 assert(all_of(VL, 6359 [](Value *V) { 6360 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 6361 V); 6362 }) && 6363 "Invalid opcode"); 6364 // Check if all of the extracts come from the same vector and from the 6365 // correct offset. 6366 Value *Vec = E0->getOperand(0); 6367 6368 CurrentOrder.clear(); 6369 6370 // We have to extract from a vector/aggregate with the same number of elements. 6371 unsigned NElts; 6372 if (E0->getOpcode() == Instruction::ExtractValue) { 6373 NElts = canMapToVector(Vec->getType()); 6374 if (!NElts) 6375 return false; 6376 // Check if load can be rewritten as load of vector. 6377 LoadInst *LI = dyn_cast<LoadInst>(Vec); 6378 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 6379 return false; 6380 } else { 6381 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 6382 } 6383 6384 unsigned E = VL.size(); 6385 if (!ResizeAllowed && NElts != E) 6386 return false; 6387 SmallVector<int> Indices(E, PoisonMaskElem); 6388 unsigned MinIdx = NElts, MaxIdx = 0; 6389 for (auto [I, V] : enumerate(VL)) { 6390 auto *Inst = dyn_cast<Instruction>(V); 6391 if (!Inst) 6392 continue; 6393 if (Inst->getOperand(0) != Vec) 6394 return false; 6395 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 6396 if (isa<UndefValue>(EE->getIndexOperand())) 6397 continue; 6398 std::optional<unsigned> Idx = getExtractIndex(Inst); 6399 if (!Idx) 6400 return false; 6401 const unsigned ExtIdx = *Idx; 6402 if (ExtIdx >= NElts) 6403 continue; 6404 Indices[I] = ExtIdx; 6405 if (MinIdx > ExtIdx) 6406 MinIdx = ExtIdx; 6407 if (MaxIdx < ExtIdx) 6408 MaxIdx = ExtIdx; 6409 } 6410 if (MaxIdx - MinIdx + 1 > E) 6411 return false; 6412 if (MaxIdx + 1 <= E) 6413 MinIdx = 0; 6414 6415 // Check that all of the indices extract from the correct offset. 6416 bool ShouldKeepOrder = true; 6417 // Assign to all items the initial value E + 1 so we can check if the extract 6418 // instruction index was used already. 6419 // Also, later we can check that all the indices are used and we have a 6420 // consecutive access in the extract instructions, by checking that no 6421 // element of CurrentOrder still has value E + 1. 6422 CurrentOrder.assign(E, E); 6423 for (unsigned I = 0; I < E; ++I) { 6424 if (Indices[I] == PoisonMaskElem) 6425 continue; 6426 const unsigned ExtIdx = Indices[I] - MinIdx; 6427 if (CurrentOrder[ExtIdx] != E) { 6428 CurrentOrder.clear(); 6429 return false; 6430 } 6431 ShouldKeepOrder &= ExtIdx == I; 6432 CurrentOrder[ExtIdx] = I; 6433 } 6434 if (ShouldKeepOrder) 6435 CurrentOrder.clear(); 6436 6437 return ShouldKeepOrder; 6438 } 6439 6440 bool BoUpSLP::areAllUsersVectorized( 6441 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const { 6442 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) || 6443 all_of(I->users(), [this](User *U) { 6444 return ScalarToTreeEntry.contains(U) || 6445 isVectorLikeInstWithConstOps(U) || 6446 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 6447 }); 6448 } 6449 6450 static std::pair<InstructionCost, InstructionCost> 6451 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 6452 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 6453 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6454 6455 // Calculate the cost of the scalar and vector calls. 6456 SmallVector<Type *, 4> VecTys; 6457 for (Use &Arg : CI->args()) 6458 VecTys.push_back( 6459 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 6460 FastMathFlags FMF; 6461 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 6462 FMF = FPCI->getFastMathFlags(); 6463 SmallVector<const Value *> Arguments(CI->args()); 6464 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 6465 dyn_cast<IntrinsicInst>(CI)); 6466 auto IntrinsicCost = 6467 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 6468 6469 auto Shape = VFShape::get(CI->getFunctionType(), 6470 ElementCount::getFixed(VecTy->getNumElements()), 6471 false /*HasGlobalPred*/); 6472 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 6473 auto LibCost = IntrinsicCost; 6474 if (!CI->isNoBuiltin() && VecFunc) { 6475 // Calculate the cost of the vector library call. 6476 // If the corresponding vector call is cheaper, return its cost. 6477 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 6478 TTI::TCK_RecipThroughput); 6479 } 6480 return {IntrinsicCost, LibCost}; 6481 } 6482 6483 void BoUpSLP::TreeEntry::buildAltOpShuffleMask( 6484 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask, 6485 SmallVectorImpl<Value *> *OpScalars, 6486 SmallVectorImpl<Value *> *AltScalars) const { 6487 unsigned Sz = Scalars.size(); 6488 Mask.assign(Sz, PoisonMaskElem); 6489 SmallVector<int> OrderMask; 6490 if (!ReorderIndices.empty()) 6491 inversePermutation(ReorderIndices, OrderMask); 6492 for (unsigned I = 0; I < Sz; ++I) { 6493 unsigned Idx = I; 6494 if (!ReorderIndices.empty()) 6495 Idx = OrderMask[I]; 6496 auto *OpInst = cast<Instruction>(Scalars[Idx]); 6497 if (IsAltOp(OpInst)) { 6498 Mask[I] = Sz + Idx; 6499 if (AltScalars) 6500 AltScalars->push_back(OpInst); 6501 } else { 6502 Mask[I] = Idx; 6503 if (OpScalars) 6504 OpScalars->push_back(OpInst); 6505 } 6506 } 6507 if (!ReuseShuffleIndices.empty()) { 6508 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem); 6509 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) { 6510 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem; 6511 }); 6512 Mask.swap(NewMask); 6513 } 6514 } 6515 6516 static bool isAlternateInstruction(const Instruction *I, 6517 const Instruction *MainOp, 6518 const Instruction *AltOp, 6519 const TargetLibraryInfo &TLI) { 6520 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) { 6521 auto *AltCI = cast<CmpInst>(AltOp); 6522 CmpInst::Predicate MainP = MainCI->getPredicate(); 6523 CmpInst::Predicate AltP = AltCI->getPredicate(); 6524 assert(MainP != AltP && "Expected different main/alternate predicates."); 6525 auto *CI = cast<CmpInst>(I); 6526 if (isCmpSameOrSwapped(MainCI, CI, TLI)) 6527 return false; 6528 if (isCmpSameOrSwapped(AltCI, CI, TLI)) 6529 return true; 6530 CmpInst::Predicate P = CI->getPredicate(); 6531 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P); 6532 6533 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && 6534 "CmpInst expected to match either main or alternate predicate or " 6535 "their swap."); 6536 (void)AltP; 6537 return MainP != P && MainP != SwappedP; 6538 } 6539 return I->getOpcode() == AltOp->getOpcode(); 6540 } 6541 6542 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) { 6543 assert(!Ops.empty()); 6544 const auto *Op0 = Ops.front(); 6545 6546 const bool IsConstant = all_of(Ops, [](Value *V) { 6547 // TODO: We should allow undef elements here 6548 return isConstant(V) && !isa<UndefValue>(V); 6549 }); 6550 const bool IsUniform = all_of(Ops, [=](Value *V) { 6551 // TODO: We should allow undef elements here 6552 return V == Op0; 6553 }); 6554 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) { 6555 // TODO: We should allow undef elements here 6556 if (auto *CI = dyn_cast<ConstantInt>(V)) 6557 return CI->getValue().isPowerOf2(); 6558 return false; 6559 }); 6560 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) { 6561 // TODO: We should allow undef elements here 6562 if (auto *CI = dyn_cast<ConstantInt>(V)) 6563 return CI->getValue().isNegatedPowerOf2(); 6564 return false; 6565 }); 6566 6567 TTI::OperandValueKind VK = TTI::OK_AnyValue; 6568 if (IsConstant && IsUniform) 6569 VK = TTI::OK_UniformConstantValue; 6570 else if (IsConstant) 6571 VK = TTI::OK_NonUniformConstantValue; 6572 else if (IsUniform) 6573 VK = TTI::OK_UniformValue; 6574 6575 TTI::OperandValueProperties VP = TTI::OP_None; 6576 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP; 6577 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP; 6578 6579 return {VK, VP}; 6580 } 6581 6582 namespace { 6583 /// The base class for shuffle instruction emission and shuffle cost estimation. 6584 class BaseShuffleAnalysis { 6585 protected: 6586 /// Checks if the mask is an identity mask. 6587 /// \param IsStrict if is true the function returns false if mask size does 6588 /// not match vector size. 6589 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy, 6590 bool IsStrict) { 6591 int Limit = Mask.size(); 6592 int VF = VecTy->getNumElements(); 6593 int Index = -1; 6594 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit)) 6595 return true; 6596 if (!IsStrict) { 6597 // Consider extract subvector starting from index 0. 6598 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 6599 Index == 0) 6600 return true; 6601 // All VF-size submasks are identity (e.g. 6602 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4). 6603 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) { 6604 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF); 6605 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) || 6606 ShuffleVectorInst::isIdentityMask(Slice, VF); 6607 })) 6608 return true; 6609 } 6610 return false; 6611 } 6612 6613 /// Tries to combine 2 different masks into single one. 6614 /// \param LocalVF Vector length of the permuted input vector. \p Mask may 6615 /// change the size of the vector, \p LocalVF is the original size of the 6616 /// shuffled vector. 6617 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask, 6618 ArrayRef<int> ExtMask) { 6619 unsigned VF = Mask.size(); 6620 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 6621 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 6622 if (ExtMask[I] == PoisonMaskElem) 6623 continue; 6624 int MaskedIdx = Mask[ExtMask[I] % VF]; 6625 NewMask[I] = 6626 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF; 6627 } 6628 Mask.swap(NewMask); 6629 } 6630 6631 /// Looks through shuffles trying to reduce final number of shuffles in the 6632 /// code. The function looks through the previously emitted shuffle 6633 /// instructions and properly mark indices in mask as undef. 6634 /// For example, given the code 6635 /// \code 6636 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 6637 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 6638 /// \endcode 6639 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 6640 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6641 /// <0, 1, 2, 3> for the shuffle. 6642 /// If 2 operands are of different size, the smallest one will be resized and 6643 /// the mask recalculated properly. 6644 /// For example, given the code 6645 /// \code 6646 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 6647 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 6648 /// \endcode 6649 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 6650 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6651 /// <0, 1, 2, 3> for the shuffle. 6652 /// So, it tries to transform permutations to simple vector merge, if 6653 /// possible. 6654 /// \param V The input vector which must be shuffled using the given \p Mask. 6655 /// If the better candidate is found, \p V is set to this best candidate 6656 /// vector. 6657 /// \param Mask The input mask for the shuffle. If the best candidate is found 6658 /// during looking-through-shuffles attempt, it is updated accordingly. 6659 /// \param SinglePermute true if the shuffle operation is originally a 6660 /// single-value-permutation. In this case the look-through-shuffles procedure 6661 /// may look for resizing shuffles as the best candidates. 6662 /// \return true if the shuffle results in the non-resizing identity shuffle 6663 /// (and thus can be ignored), false - otherwise. 6664 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask, 6665 bool SinglePermute) { 6666 Value *Op = V; 6667 ShuffleVectorInst *IdentityOp = nullptr; 6668 SmallVector<int> IdentityMask; 6669 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) { 6670 // Exit if not a fixed vector type or changing size shuffle. 6671 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType()); 6672 if (!SVTy) 6673 break; 6674 // Remember the identity or broadcast mask, if it is not a resizing 6675 // shuffle. If no better candidates are found, this Op and Mask will be 6676 // used in the final shuffle. 6677 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) { 6678 if (!IdentityOp || !SinglePermute || 6679 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) && 6680 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask, 6681 IdentityMask.size()))) { 6682 IdentityOp = SV; 6683 // Store current mask in the IdentityMask so later we did not lost 6684 // this info if IdentityOp is selected as the best candidate for the 6685 // permutation. 6686 IdentityMask.assign(Mask); 6687 } 6688 } 6689 // Remember the broadcast mask. If no better candidates are found, this Op 6690 // and Mask will be used in the final shuffle. 6691 // Zero splat can be used as identity too, since it might be used with 6692 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling. 6693 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is 6694 // expensive, the analysis founds out, that the source vector is just a 6695 // broadcast, this original mask can be transformed to identity mask <0, 6696 // 1, 2, 3>. 6697 // \code 6698 // %0 = shuffle %v, poison, zeroinitalizer 6699 // %res = shuffle %0, poison, <3, 1, 2, 0> 6700 // \endcode 6701 // may be transformed to 6702 // \code 6703 // %0 = shuffle %v, poison, zeroinitalizer 6704 // %res = shuffle %0, poison, <0, 1, 2, 3> 6705 // \endcode 6706 if (SV->isZeroEltSplat()) { 6707 IdentityOp = SV; 6708 IdentityMask.assign(Mask); 6709 } 6710 int LocalVF = Mask.size(); 6711 if (auto *SVOpTy = 6712 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType())) 6713 LocalVF = SVOpTy->getNumElements(); 6714 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem); 6715 for (auto [Idx, I] : enumerate(Mask)) { 6716 if (I == PoisonMaskElem || 6717 static_cast<unsigned>(I) >= SV->getShuffleMask().size()) 6718 continue; 6719 ExtMask[Idx] = SV->getMaskValue(I); 6720 } 6721 bool IsOp1Undef = 6722 isUndefVector(SV->getOperand(0), 6723 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg)) 6724 .all(); 6725 bool IsOp2Undef = 6726 isUndefVector(SV->getOperand(1), 6727 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg)) 6728 .all(); 6729 if (!IsOp1Undef && !IsOp2Undef) { 6730 // Update mask and mark undef elems. 6731 for (int &I : Mask) { 6732 if (I == PoisonMaskElem) 6733 continue; 6734 if (SV->getMaskValue(I % SV->getShuffleMask().size()) == 6735 PoisonMaskElem) 6736 I = PoisonMaskElem; 6737 } 6738 break; 6739 } 6740 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 6741 SV->getShuffleMask().end()); 6742 combineMasks(LocalVF, ShuffleMask, Mask); 6743 Mask.swap(ShuffleMask); 6744 if (IsOp2Undef) 6745 Op = SV->getOperand(0); 6746 else 6747 Op = SV->getOperand(1); 6748 } 6749 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType()); 6750 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) || 6751 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) { 6752 if (IdentityOp) { 6753 V = IdentityOp; 6754 assert(Mask.size() == IdentityMask.size() && 6755 "Expected masks of same sizes."); 6756 // Clear known poison elements. 6757 for (auto [I, Idx] : enumerate(Mask)) 6758 if (Idx == PoisonMaskElem) 6759 IdentityMask[I] = PoisonMaskElem; 6760 Mask.swap(IdentityMask); 6761 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V); 6762 return SinglePermute && 6763 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()), 6764 /*IsStrict=*/true) || 6765 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() && 6766 Shuffle->isZeroEltSplat() && 6767 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size()))); 6768 } 6769 V = Op; 6770 return false; 6771 } 6772 V = Op; 6773 return true; 6774 } 6775 6776 /// Smart shuffle instruction emission, walks through shuffles trees and 6777 /// tries to find the best matching vector for the actual shuffle 6778 /// instruction. 6779 template <typename T, typename ShuffleBuilderTy> 6780 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask, 6781 ShuffleBuilderTy &Builder) { 6782 assert(V1 && "Expected at least one vector value."); 6783 if (V2) 6784 Builder.resizeToMatch(V1, V2); 6785 int VF = Mask.size(); 6786 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 6787 VF = FTy->getNumElements(); 6788 if (V2 && 6789 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) { 6790 // Peek through shuffles. 6791 Value *Op1 = V1; 6792 Value *Op2 = V2; 6793 int VF = 6794 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 6795 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 6796 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 6797 for (int I = 0, E = Mask.size(); I < E; ++I) { 6798 if (Mask[I] < VF) 6799 CombinedMask1[I] = Mask[I]; 6800 else 6801 CombinedMask2[I] = Mask[I] - VF; 6802 } 6803 Value *PrevOp1; 6804 Value *PrevOp2; 6805 do { 6806 PrevOp1 = Op1; 6807 PrevOp2 = Op2; 6808 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false); 6809 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false); 6810 // Check if we have 2 resizing shuffles - need to peek through operands 6811 // again. 6812 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 6813 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) { 6814 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem); 6815 for (auto [Idx, I] : enumerate(CombinedMask1)) { 6816 if (I == PoisonMaskElem) 6817 continue; 6818 ExtMask1[Idx] = SV1->getMaskValue(I); 6819 } 6820 SmallBitVector UseMask1 = buildUseMask( 6821 cast<FixedVectorType>(SV1->getOperand(1)->getType()) 6822 ->getNumElements(), 6823 ExtMask1, UseMask::SecondArg); 6824 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem); 6825 for (auto [Idx, I] : enumerate(CombinedMask2)) { 6826 if (I == PoisonMaskElem) 6827 continue; 6828 ExtMask2[Idx] = SV2->getMaskValue(I); 6829 } 6830 SmallBitVector UseMask2 = buildUseMask( 6831 cast<FixedVectorType>(SV2->getOperand(1)->getType()) 6832 ->getNumElements(), 6833 ExtMask2, UseMask::SecondArg); 6834 if (SV1->getOperand(0)->getType() == 6835 SV2->getOperand(0)->getType() && 6836 SV1->getOperand(0)->getType() != SV1->getType() && 6837 isUndefVector(SV1->getOperand(1), UseMask1).all() && 6838 isUndefVector(SV2->getOperand(1), UseMask2).all()) { 6839 Op1 = SV1->getOperand(0); 6840 Op2 = SV2->getOperand(0); 6841 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 6842 SV1->getShuffleMask().end()); 6843 int LocalVF = ShuffleMask1.size(); 6844 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType())) 6845 LocalVF = FTy->getNumElements(); 6846 combineMasks(LocalVF, ShuffleMask1, CombinedMask1); 6847 CombinedMask1.swap(ShuffleMask1); 6848 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 6849 SV2->getShuffleMask().end()); 6850 LocalVF = ShuffleMask2.size(); 6851 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType())) 6852 LocalVF = FTy->getNumElements(); 6853 combineMasks(LocalVF, ShuffleMask2, CombinedMask2); 6854 CombinedMask2.swap(ShuffleMask2); 6855 } 6856 } 6857 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 6858 Builder.resizeToMatch(Op1, Op2); 6859 VF = std::max(cast<VectorType>(Op1->getType()) 6860 ->getElementCount() 6861 .getKnownMinValue(), 6862 cast<VectorType>(Op2->getType()) 6863 ->getElementCount() 6864 .getKnownMinValue()); 6865 for (int I = 0, E = Mask.size(); I < E; ++I) { 6866 if (CombinedMask2[I] != PoisonMaskElem) { 6867 assert(CombinedMask1[I] == PoisonMaskElem && 6868 "Expected undefined mask element"); 6869 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 6870 } 6871 } 6872 if (Op1 == Op2 && 6873 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) || 6874 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) && 6875 isa<ShuffleVectorInst>(Op1) && 6876 cast<ShuffleVectorInst>(Op1)->getShuffleMask() == 6877 ArrayRef(CombinedMask1)))) 6878 return Builder.createIdentity(Op1); 6879 return Builder.createShuffleVector( 6880 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 6881 CombinedMask1); 6882 } 6883 if (isa<PoisonValue>(V1)) 6884 return Builder.createPoison( 6885 cast<VectorType>(V1->getType())->getElementType(), Mask.size()); 6886 SmallVector<int> NewMask(Mask.begin(), Mask.end()); 6887 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true); 6888 assert(V1 && "Expected non-null value after looking through shuffles."); 6889 6890 if (!IsIdentity) 6891 return Builder.createShuffleVector(V1, NewMask); 6892 return Builder.createIdentity(V1); 6893 } 6894 }; 6895 } // namespace 6896 6897 /// Returns the cost of the shuffle instructions with the given \p Kind, vector 6898 /// type \p Tp and optional \p Mask. Adds SLP-specifc cost estimation for insert 6899 /// subvector pattern. 6900 static InstructionCost 6901 getShuffleCost(const TargetTransformInfo &TTI, TTI::ShuffleKind Kind, 6902 VectorType *Tp, ArrayRef<int> Mask = std::nullopt, 6903 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 6904 int Index = 0, VectorType *SubTp = nullptr, 6905 ArrayRef<const Value *> Args = std::nullopt) { 6906 if (Kind != TTI::SK_PermuteTwoSrc) 6907 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args); 6908 int NumSrcElts = Tp->getElementCount().getKnownMinValue(); 6909 int NumSubElts; 6910 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask( 6911 Mask, NumSrcElts, NumSubElts, Index)) { 6912 if (Index + NumSubElts > NumSrcElts && 6913 Index + NumSrcElts <= static_cast<int>(Mask.size())) 6914 return TTI.getShuffleCost( 6915 TTI::SK_InsertSubvector, 6916 FixedVectorType::get(Tp->getElementType(), Mask.size()), std::nullopt, 6917 TTI::TCK_RecipThroughput, Index, Tp); 6918 } 6919 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args); 6920 } 6921 6922 /// Merges shuffle masks and emits final shuffle instruction, if required. It 6923 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 6924 /// when the actual shuffle instruction is generated only if this is actually 6925 /// required. Otherwise, the shuffle instruction emission is delayed till the 6926 /// end of the process, to reduce the number of emitted instructions and further 6927 /// analysis/transformations. 6928 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { 6929 bool IsFinalized = false; 6930 SmallVector<int> CommonMask; 6931 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors; 6932 const TargetTransformInfo &TTI; 6933 InstructionCost Cost = 0; 6934 SmallDenseSet<Value *> VectorizedVals; 6935 BoUpSLP &R; 6936 SmallPtrSetImpl<Value *> &CheckedExtracts; 6937 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6938 /// While set, still trying to estimate the cost for the same nodes and we 6939 /// can delay actual cost estimation (virtual shuffle instruction emission). 6940 /// May help better estimate the cost if same nodes must be permuted + allows 6941 /// to move most of the long shuffles cost estimation to TTI. 6942 bool SameNodesEstimated = true; 6943 6944 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) { 6945 if (Ty->getScalarType()->isPointerTy()) { 6946 Constant *Res = ConstantExpr::getIntToPtr( 6947 ConstantInt::getAllOnesValue( 6948 IntegerType::get(Ty->getContext(), 6949 DL.getTypeStoreSizeInBits(Ty->getScalarType()))), 6950 Ty->getScalarType()); 6951 if (auto *VTy = dyn_cast<VectorType>(Ty)) 6952 Res = ConstantVector::getSplat(VTy->getElementCount(), Res); 6953 return Res; 6954 } 6955 return Constant::getAllOnesValue(Ty); 6956 } 6957 6958 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) { 6959 if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof)) 6960 return TTI::TCC_Free; 6961 auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size()); 6962 InstructionCost GatherCost = 0; 6963 SmallVector<Value *> Gathers(VL.begin(), VL.end()); 6964 // Improve gather cost for gather of loads, if we can group some of the 6965 // loads into vector loads. 6966 InstructionsState S = getSameOpcode(VL, *R.TLI); 6967 const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType()); 6968 unsigned MinVF = R.getMinVF(2 * Sz); 6969 if (VL.size() > 2 && 6970 ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) || 6971 (InVectors.empty() && 6972 any_of(seq<unsigned>(0, VL.size() / MinVF), 6973 [&](unsigned Idx) { 6974 ArrayRef<Value *> SubVL = VL.slice(Idx * MinVF, MinVF); 6975 InstructionsState S = getSameOpcode(SubVL, *R.TLI); 6976 return S.getOpcode() == Instruction::Load && 6977 !S.isAltShuffle(); 6978 }))) && 6979 !all_of(Gathers, [&](Value *V) { return R.getTreeEntry(V); }) && 6980 !isSplat(Gathers)) { 6981 SetVector<Value *> VectorizedLoads; 6982 SmallVector<LoadInst *> VectorizedStarts; 6983 SmallVector<std::pair<unsigned, unsigned>> ScatterVectorized; 6984 unsigned StartIdx = 0; 6985 unsigned VF = VL.size() / 2; 6986 for (; VF >= MinVF; VF /= 2) { 6987 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 6988 Cnt += VF) { 6989 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 6990 if (S.getOpcode() != Instruction::Load || S.isAltShuffle()) { 6991 InstructionsState SliceS = getSameOpcode(Slice, *R.TLI); 6992 if (SliceS.getOpcode() != Instruction::Load || 6993 SliceS.isAltShuffle()) 6994 continue; 6995 } 6996 if (!VectorizedLoads.count(Slice.front()) && 6997 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 6998 SmallVector<Value *> PointerOps; 6999 OrdersType CurrentOrder; 7000 LoadsState LS = 7001 canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE, 7002 *R.LI, *R.TLI, CurrentOrder, PointerOps); 7003 switch (LS) { 7004 case LoadsState::Vectorize: 7005 case LoadsState::ScatterVectorize: 7006 case LoadsState::PossibleStridedVectorize: 7007 // Mark the vectorized loads so that we don't vectorize them 7008 // again. 7009 // TODO: better handling of loads with reorders. 7010 if (LS == LoadsState::Vectorize && CurrentOrder.empty()) 7011 VectorizedStarts.push_back(cast<LoadInst>(Slice.front())); 7012 else 7013 ScatterVectorized.emplace_back(Cnt, VF); 7014 VectorizedLoads.insert(Slice.begin(), Slice.end()); 7015 // If we vectorized initial block, no need to try to vectorize 7016 // it again. 7017 if (Cnt == StartIdx) 7018 StartIdx += VF; 7019 break; 7020 case LoadsState::Gather: 7021 break; 7022 } 7023 } 7024 } 7025 // Check if the whole array was vectorized already - exit. 7026 if (StartIdx >= VL.size()) 7027 break; 7028 // Found vectorizable parts - exit. 7029 if (!VectorizedLoads.empty()) 7030 break; 7031 } 7032 if (!VectorizedLoads.empty()) { 7033 unsigned NumParts = TTI.getNumberOfParts(VecTy); 7034 bool NeedInsertSubvectorAnalysis = 7035 !NumParts || (VL.size() / VF) > NumParts; 7036 // Get the cost for gathered loads. 7037 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 7038 if (VectorizedLoads.contains(VL[I])) 7039 continue; 7040 GatherCost += getBuildVectorCost(VL.slice(I, VF), Root); 7041 } 7042 // Exclude potentially vectorized loads from list of gathered 7043 // scalars. 7044 Gathers.assign(Gathers.size(), PoisonValue::get(VL.front()->getType())); 7045 // The cost for vectorized loads. 7046 InstructionCost ScalarsCost = 0; 7047 for (Value *V : VectorizedLoads) { 7048 auto *LI = cast<LoadInst>(V); 7049 ScalarsCost += 7050 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), 7051 LI->getAlign(), LI->getPointerAddressSpace(), 7052 CostKind, TTI::OperandValueInfo(), LI); 7053 } 7054 auto *LoadTy = FixedVectorType::get(VL.front()->getType(), VF); 7055 for (LoadInst *LI : VectorizedStarts) { 7056 Align Alignment = LI->getAlign(); 7057 GatherCost += 7058 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 7059 LI->getPointerAddressSpace(), CostKind, 7060 TTI::OperandValueInfo(), LI); 7061 } 7062 for (std::pair<unsigned, unsigned> P : ScatterVectorized) { 7063 auto *LI0 = cast<LoadInst>(VL[P.first]); 7064 Align CommonAlignment = LI0->getAlign(); 7065 for (Value *V : VL.slice(P.first + 1, VF - 1)) 7066 CommonAlignment = 7067 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7068 GatherCost += TTI.getGatherScatterOpCost( 7069 Instruction::Load, LoadTy, LI0->getPointerOperand(), 7070 /*VariableMask=*/false, CommonAlignment, CostKind, LI0); 7071 } 7072 if (NeedInsertSubvectorAnalysis) { 7073 // Add the cost for the subvectors insert. 7074 for (int I = VF, E = VL.size(); I < E; I += VF) 7075 GatherCost += TTI.getShuffleCost(TTI::SK_InsertSubvector, VecTy, 7076 std::nullopt, CostKind, I, LoadTy); 7077 } 7078 GatherCost -= ScalarsCost; 7079 } 7080 } else if (!Root && isSplat(VL)) { 7081 // Found the broadcasting of the single scalar, calculate the cost as 7082 // the broadcast. 7083 const auto *It = 7084 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); }); 7085 assert(It != VL.end() && "Expected at least one non-undef value."); 7086 // Add broadcast for non-identity shuffle only. 7087 bool NeedShuffle = 7088 count(VL, *It) > 1 && 7089 (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof)); 7090 InstructionCost InsertCost = TTI.getVectorInstrCost( 7091 Instruction::InsertElement, VecTy, CostKind, 7092 NeedShuffle ? 0 : std::distance(VL.begin(), It), 7093 PoisonValue::get(VecTy), *It); 7094 return InsertCost + 7095 (NeedShuffle ? TTI.getShuffleCost( 7096 TargetTransformInfo::SK_Broadcast, VecTy, 7097 /*Mask=*/std::nullopt, CostKind, /*Index=*/0, 7098 /*SubTp=*/nullptr, /*Args=*/*It) 7099 : TTI::TCC_Free); 7100 } 7101 return GatherCost + 7102 (all_of(Gathers, UndefValue::classof) 7103 ? TTI::TCC_Free 7104 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers))); 7105 }; 7106 7107 /// Compute the cost of creating a vector containing the extracted values from 7108 /// \p VL. 7109 InstructionCost 7110 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask, 7111 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7112 unsigned NumParts) { 7113 assert(VL.size() > NumParts && "Unexpected scalarized shuffle."); 7114 unsigned NumElts = 7115 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) { 7116 auto *EE = dyn_cast<ExtractElementInst>(V); 7117 if (!EE) 7118 return Sz; 7119 auto *VecTy = cast<FixedVectorType>(EE->getVectorOperandType()); 7120 return std::max(Sz, VecTy->getNumElements()); 7121 }); 7122 unsigned NumSrcRegs = TTI.getNumberOfParts( 7123 FixedVectorType::get(VL.front()->getType(), NumElts)); 7124 if (NumSrcRegs == 0) 7125 NumSrcRegs = 1; 7126 // FIXME: this must be moved to TTI for better estimation. 7127 unsigned EltsPerVector = PowerOf2Ceil(std::max( 7128 divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs))); 7129 auto CheckPerRegistersShuffle = 7130 [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> { 7131 DenseSet<int> RegIndices; 7132 // Check that if trying to permute same single/2 input vectors. 7133 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc; 7134 int FirstRegId = -1; 7135 for (int &I : Mask) { 7136 if (I == PoisonMaskElem) 7137 continue; 7138 int RegId = (I / NumElts) * NumParts + (I % NumElts) / EltsPerVector; 7139 if (FirstRegId < 0) 7140 FirstRegId = RegId; 7141 RegIndices.insert(RegId); 7142 if (RegIndices.size() > 2) 7143 return std::nullopt; 7144 if (RegIndices.size() == 2) 7145 ShuffleKind = TTI::SK_PermuteTwoSrc; 7146 I = (I % NumElts) % EltsPerVector + 7147 (RegId == FirstRegId ? 0 : EltsPerVector); 7148 } 7149 return ShuffleKind; 7150 }; 7151 InstructionCost Cost = 0; 7152 7153 // Process extracts in blocks of EltsPerVector to check if the source vector 7154 // operand can be re-used directly. If not, add the cost of creating a 7155 // shuffle to extract the values into a vector register. 7156 for (unsigned Part = 0; Part < NumParts; ++Part) { 7157 if (!ShuffleKinds[Part]) 7158 continue; 7159 ArrayRef<int> MaskSlice = 7160 Mask.slice(Part * EltsPerVector, 7161 (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0) 7162 ? Mask.size() % EltsPerVector 7163 : EltsPerVector); 7164 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem); 7165 copy(MaskSlice, SubMask.begin()); 7166 std::optional<TTI::ShuffleKind> RegShuffleKind = 7167 CheckPerRegistersShuffle(SubMask); 7168 if (!RegShuffleKind) { 7169 Cost += ::getShuffleCost( 7170 TTI, *ShuffleKinds[Part], 7171 FixedVectorType::get(VL.front()->getType(), NumElts), MaskSlice); 7172 continue; 7173 } 7174 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc || 7175 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) { 7176 Cost += ::getShuffleCost( 7177 TTI, *RegShuffleKind, 7178 FixedVectorType::get(VL.front()->getType(), EltsPerVector), 7179 SubMask); 7180 } 7181 } 7182 return Cost; 7183 } 7184 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 7185 /// shuffle emission. 7186 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 7187 ArrayRef<int> Mask) { 7188 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7189 if (Mask[Idx] != PoisonMaskElem) 7190 CommonMask[Idx] = Idx; 7191 } 7192 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given 7193 /// mask \p Mask, register number \p Part, that includes \p SliceSize 7194 /// elements. 7195 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2, 7196 ArrayRef<int> Mask, unsigned Part, 7197 unsigned SliceSize) { 7198 if (SameNodesEstimated) { 7199 // Delay the cost estimation if the same nodes are reshuffling. 7200 // If we already requested the cost of reshuffling of E1 and E2 before, no 7201 // need to estimate another cost with the sub-Mask, instead include this 7202 // sub-Mask into the CommonMask to estimate it later and avoid double cost 7203 // estimation. 7204 if ((InVectors.size() == 2 && 7205 InVectors.front().get<const TreeEntry *>() == &E1 && 7206 InVectors.back().get<const TreeEntry *>() == E2) || 7207 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) { 7208 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize), 7209 [](int Idx) { return Idx == PoisonMaskElem; }) && 7210 "Expected all poisoned elements."); 7211 ArrayRef<int> SubMask = 7212 ArrayRef(Mask).slice(Part * SliceSize, SliceSize); 7213 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part)); 7214 return; 7215 } 7216 // Found non-matching nodes - need to estimate the cost for the matched 7217 // and transform mask. 7218 Cost += createShuffle(InVectors.front(), 7219 InVectors.size() == 1 ? nullptr : InVectors.back(), 7220 CommonMask); 7221 transformMaskAfterShuffle(CommonMask, CommonMask); 7222 } 7223 SameNodesEstimated = false; 7224 Cost += createShuffle(&E1, E2, Mask); 7225 transformMaskAfterShuffle(CommonMask, Mask); 7226 } 7227 7228 class ShuffleCostBuilder { 7229 const TargetTransformInfo &TTI; 7230 7231 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) { 7232 int Index = -1; 7233 return Mask.empty() || 7234 (VF == Mask.size() && 7235 ShuffleVectorInst::isIdentityMask(Mask, VF)) || 7236 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 7237 Index == 0); 7238 } 7239 7240 public: 7241 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {} 7242 ~ShuffleCostBuilder() = default; 7243 InstructionCost createShuffleVector(Value *V1, Value *, 7244 ArrayRef<int> Mask) const { 7245 // Empty mask or identity mask are free. 7246 unsigned VF = 7247 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7248 if (isEmptyOrIdentity(Mask, VF)) 7249 return TTI::TCC_Free; 7250 return ::getShuffleCost(TTI, TTI::SK_PermuteTwoSrc, 7251 cast<VectorType>(V1->getType()), Mask); 7252 } 7253 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const { 7254 // Empty mask or identity mask are free. 7255 unsigned VF = 7256 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7257 if (isEmptyOrIdentity(Mask, VF)) 7258 return TTI::TCC_Free; 7259 return TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, 7260 cast<VectorType>(V1->getType()), Mask); 7261 } 7262 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; } 7263 InstructionCost createPoison(Type *Ty, unsigned VF) const { 7264 return TTI::TCC_Free; 7265 } 7266 void resizeToMatch(Value *&, Value *&) const {} 7267 }; 7268 7269 /// Smart shuffle instruction emission, walks through shuffles trees and 7270 /// tries to find the best matching vector for the actual shuffle 7271 /// instruction. 7272 InstructionCost 7273 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1, 7274 const PointerUnion<Value *, const TreeEntry *> &P2, 7275 ArrayRef<int> Mask) { 7276 ShuffleCostBuilder Builder(TTI); 7277 SmallVector<int> CommonMask(Mask.begin(), Mask.end()); 7278 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>(); 7279 unsigned CommonVF = Mask.size(); 7280 if (!V1 && !V2 && !P2.isNull()) { 7281 // Shuffle 2 entry nodes. 7282 const TreeEntry *E = P1.get<const TreeEntry *>(); 7283 unsigned VF = E->getVectorFactor(); 7284 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7285 CommonVF = std::max(VF, E2->getVectorFactor()); 7286 assert(all_of(Mask, 7287 [=](int Idx) { 7288 return Idx < 2 * static_cast<int>(CommonVF); 7289 }) && 7290 "All elements in mask must be less than 2 * CommonVF."); 7291 if (E->Scalars.size() == E2->Scalars.size()) { 7292 SmallVector<int> EMask = E->getCommonMask(); 7293 SmallVector<int> E2Mask = E2->getCommonMask(); 7294 if (!EMask.empty() || !E2Mask.empty()) { 7295 for (int &Idx : CommonMask) { 7296 if (Idx == PoisonMaskElem) 7297 continue; 7298 if (Idx < static_cast<int>(CommonVF) && !EMask.empty()) 7299 Idx = EMask[Idx]; 7300 else if (Idx >= static_cast<int>(CommonVF)) 7301 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) + 7302 E->Scalars.size(); 7303 } 7304 } 7305 CommonVF = E->Scalars.size(); 7306 } 7307 V1 = Constant::getNullValue( 7308 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7309 V2 = getAllOnesValue( 7310 *R.DL, FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7311 } else if (!V1 && P2.isNull()) { 7312 // Shuffle single entry node. 7313 const TreeEntry *E = P1.get<const TreeEntry *>(); 7314 unsigned VF = E->getVectorFactor(); 7315 CommonVF = VF; 7316 assert( 7317 all_of(Mask, 7318 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7319 "All elements in mask must be less than CommonVF."); 7320 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) { 7321 SmallVector<int> EMask = E->getCommonMask(); 7322 assert(!EMask.empty() && "Expected non-empty common mask."); 7323 for (int &Idx : CommonMask) { 7324 if (Idx != PoisonMaskElem) 7325 Idx = EMask[Idx]; 7326 } 7327 CommonVF = E->Scalars.size(); 7328 } 7329 V1 = Constant::getNullValue( 7330 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7331 } else if (V1 && P2.isNull()) { 7332 // Shuffle single vector. 7333 CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7334 assert( 7335 all_of(Mask, 7336 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7337 "All elements in mask must be less than CommonVF."); 7338 } else if (V1 && !V2) { 7339 // Shuffle vector and tree node. 7340 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7341 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7342 CommonVF = std::max(VF, E2->getVectorFactor()); 7343 assert(all_of(Mask, 7344 [=](int Idx) { 7345 return Idx < 2 * static_cast<int>(CommonVF); 7346 }) && 7347 "All elements in mask must be less than 2 * CommonVF."); 7348 if (E2->Scalars.size() == VF && VF != CommonVF) { 7349 SmallVector<int> E2Mask = E2->getCommonMask(); 7350 assert(!E2Mask.empty() && "Expected non-empty common mask."); 7351 for (int &Idx : CommonMask) { 7352 if (Idx == PoisonMaskElem) 7353 continue; 7354 if (Idx >= static_cast<int>(CommonVF)) 7355 Idx = E2Mask[Idx - CommonVF] + VF; 7356 } 7357 CommonVF = VF; 7358 } 7359 V1 = Constant::getNullValue( 7360 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7361 V2 = getAllOnesValue( 7362 *R.DL, 7363 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7364 } else if (!V1 && V2) { 7365 // Shuffle vector and tree node. 7366 unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 7367 const TreeEntry *E1 = P1.get<const TreeEntry *>(); 7368 CommonVF = std::max(VF, E1->getVectorFactor()); 7369 assert(all_of(Mask, 7370 [=](int Idx) { 7371 return Idx < 2 * static_cast<int>(CommonVF); 7372 }) && 7373 "All elements in mask must be less than 2 * CommonVF."); 7374 if (E1->Scalars.size() == VF && VF != CommonVF) { 7375 SmallVector<int> E1Mask = E1->getCommonMask(); 7376 assert(!E1Mask.empty() && "Expected non-empty common mask."); 7377 for (int &Idx : CommonMask) { 7378 if (Idx == PoisonMaskElem) 7379 continue; 7380 if (Idx >= static_cast<int>(CommonVF)) 7381 Idx = E1Mask[Idx - CommonVF] + VF; 7382 } 7383 CommonVF = VF; 7384 } 7385 V1 = Constant::getNullValue( 7386 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7387 V2 = getAllOnesValue( 7388 *R.DL, 7389 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7390 } else { 7391 assert(V1 && V2 && "Expected both vectors."); 7392 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7393 CommonVF = 7394 std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements()); 7395 assert(all_of(Mask, 7396 [=](int Idx) { 7397 return Idx < 2 * static_cast<int>(CommonVF); 7398 }) && 7399 "All elements in mask must be less than 2 * CommonVF."); 7400 if (V1->getType() != V2->getType()) { 7401 V1 = Constant::getNullValue(FixedVectorType::get( 7402 cast<FixedVectorType>(V1->getType())->getElementType(), CommonVF)); 7403 V2 = getAllOnesValue( 7404 *R.DL, FixedVectorType::get( 7405 cast<FixedVectorType>(V1->getType())->getElementType(), 7406 CommonVF)); 7407 } 7408 } 7409 InVectors.front() = Constant::getNullValue(FixedVectorType::get( 7410 cast<FixedVectorType>(V1->getType())->getElementType(), 7411 CommonMask.size())); 7412 if (InVectors.size() == 2) 7413 InVectors.pop_back(); 7414 return BaseShuffleAnalysis::createShuffle<InstructionCost>( 7415 V1, V2, CommonMask, Builder); 7416 } 7417 7418 public: 7419 ShuffleCostEstimator(TargetTransformInfo &TTI, 7420 ArrayRef<Value *> VectorizedVals, BoUpSLP &R, 7421 SmallPtrSetImpl<Value *> &CheckedExtracts) 7422 : TTI(TTI), VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), 7423 R(R), CheckedExtracts(CheckedExtracts) {} 7424 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 7425 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7426 unsigned NumParts, bool &UseVecBaseAsInput) { 7427 UseVecBaseAsInput = false; 7428 if (Mask.empty()) 7429 return nullptr; 7430 Value *VecBase = nullptr; 7431 ArrayRef<Value *> VL = E->Scalars; 7432 // If the resulting type is scalarized, do not adjust the cost. 7433 if (NumParts == VL.size()) 7434 return nullptr; 7435 // Check if it can be considered reused if same extractelements were 7436 // vectorized already. 7437 bool PrevNodeFound = any_of( 7438 ArrayRef(R.VectorizableTree).take_front(E->Idx), 7439 [&](const std::unique_ptr<TreeEntry> &TE) { 7440 return ((!TE->isAltShuffle() && 7441 TE->getOpcode() == Instruction::ExtractElement) || 7442 TE->State == TreeEntry::NeedToGather) && 7443 all_of(enumerate(TE->Scalars), [&](auto &&Data) { 7444 return VL.size() > Data.index() && 7445 (Mask[Data.index()] == PoisonMaskElem || 7446 isa<UndefValue>(VL[Data.index()]) || 7447 Data.value() == VL[Data.index()]); 7448 }); 7449 }); 7450 SmallPtrSet<Value *, 4> UniqueBases; 7451 unsigned SliceSize = VL.size() / NumParts; 7452 for (unsigned Part = 0; Part < NumParts; ++Part) { 7453 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 7454 for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) { 7455 // Ignore non-extractelement scalars. 7456 if (isa<UndefValue>(V) || 7457 (!SubMask.empty() && SubMask[I] == PoisonMaskElem)) 7458 continue; 7459 // If all users of instruction are going to be vectorized and this 7460 // instruction itself is not going to be vectorized, consider this 7461 // instruction as dead and remove its cost from the final cost of the 7462 // vectorized tree. 7463 // Also, avoid adjusting the cost for extractelements with multiple uses 7464 // in different graph entries. 7465 auto *EE = cast<ExtractElementInst>(V); 7466 VecBase = EE->getVectorOperand(); 7467 UniqueBases.insert(VecBase); 7468 const TreeEntry *VE = R.getTreeEntry(V); 7469 if (!CheckedExtracts.insert(V).second || 7470 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) || 7471 (VE && VE != E)) 7472 continue; 7473 std::optional<unsigned> EEIdx = getExtractIndex(EE); 7474 if (!EEIdx) 7475 continue; 7476 unsigned Idx = *EEIdx; 7477 // Take credit for instruction that will become dead. 7478 if (EE->hasOneUse() || !PrevNodeFound) { 7479 Instruction *Ext = EE->user_back(); 7480 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) { 7481 return isa<GetElementPtrInst>(U); 7482 })) { 7483 // Use getExtractWithExtendCost() to calculate the cost of 7484 // extractelement/ext pair. 7485 Cost -= 7486 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 7487 EE->getVectorOperandType(), Idx); 7488 // Add back the cost of s|zext which is subtracted separately. 7489 Cost += TTI.getCastInstrCost( 7490 Ext->getOpcode(), Ext->getType(), EE->getType(), 7491 TTI::getCastContextHint(Ext), CostKind, Ext); 7492 continue; 7493 } 7494 } 7495 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(), 7496 CostKind, Idx); 7497 } 7498 } 7499 // Check that gather of extractelements can be represented as just a 7500 // shuffle of a single/two vectors the scalars are extracted from. 7501 // Found the bunch of extractelement instructions that must be gathered 7502 // into a vector and can be represented as a permutation elements in a 7503 // single input vector or of 2 input vectors. 7504 // Done for reused if same extractelements were vectorized already. 7505 if (!PrevNodeFound) 7506 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts); 7507 InVectors.assign(1, E); 7508 CommonMask.assign(Mask.begin(), Mask.end()); 7509 transformMaskAfterShuffle(CommonMask, CommonMask); 7510 SameNodesEstimated = false; 7511 if (NumParts != 1 && UniqueBases.size() != 1) { 7512 UseVecBaseAsInput = true; 7513 VecBase = Constant::getNullValue( 7514 FixedVectorType::get(VL.front()->getType(), CommonMask.size())); 7515 } 7516 return VecBase; 7517 } 7518 /// Checks if the specified entry \p E needs to be delayed because of its 7519 /// dependency nodes. 7520 std::optional<InstructionCost> 7521 needToDelay(const TreeEntry *, 7522 ArrayRef<SmallVector<const TreeEntry *>>) const { 7523 // No need to delay the cost estimation during analysis. 7524 return std::nullopt; 7525 } 7526 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 7527 if (&E1 == &E2) { 7528 assert(all_of(Mask, 7529 [&](int Idx) { 7530 return Idx < static_cast<int>(E1.getVectorFactor()); 7531 }) && 7532 "Expected single vector shuffle mask."); 7533 add(E1, Mask); 7534 return; 7535 } 7536 if (InVectors.empty()) { 7537 CommonMask.assign(Mask.begin(), Mask.end()); 7538 InVectors.assign({&E1, &E2}); 7539 return; 7540 } 7541 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7542 auto *MaskVecTy = 7543 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7544 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7545 if (NumParts == 0 || NumParts >= Mask.size()) 7546 NumParts = 1; 7547 unsigned SliceSize = Mask.size() / NumParts; 7548 const auto *It = 7549 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7550 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7551 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize); 7552 } 7553 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 7554 if (InVectors.empty()) { 7555 CommonMask.assign(Mask.begin(), Mask.end()); 7556 InVectors.assign(1, &E1); 7557 return; 7558 } 7559 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7560 auto *MaskVecTy = 7561 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7562 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7563 if (NumParts == 0 || NumParts >= Mask.size()) 7564 NumParts = 1; 7565 unsigned SliceSize = Mask.size() / NumParts; 7566 const auto *It = 7567 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7568 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7569 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize); 7570 if (!SameNodesEstimated && InVectors.size() == 1) 7571 InVectors.emplace_back(&E1); 7572 } 7573 /// Adds 2 input vectors and the mask for their shuffling. 7574 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 7575 // May come only for shuffling of 2 vectors with extractelements, already 7576 // handled in adjustExtracts. 7577 assert(InVectors.size() == 1 && 7578 all_of(enumerate(CommonMask), 7579 [&](auto P) { 7580 if (P.value() == PoisonMaskElem) 7581 return Mask[P.index()] == PoisonMaskElem; 7582 auto *EI = 7583 cast<ExtractElementInst>(InVectors.front() 7584 .get<const TreeEntry *>() 7585 ->Scalars[P.index()]); 7586 return EI->getVectorOperand() == V1 || 7587 EI->getVectorOperand() == V2; 7588 }) && 7589 "Expected extractelement vectors."); 7590 } 7591 /// Adds another one input vector and the mask for the shuffling. 7592 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) { 7593 if (InVectors.empty()) { 7594 assert(CommonMask.empty() && !ForExtracts && 7595 "Expected empty input mask/vectors."); 7596 CommonMask.assign(Mask.begin(), Mask.end()); 7597 InVectors.assign(1, V1); 7598 return; 7599 } 7600 if (ForExtracts) { 7601 // No need to add vectors here, already handled them in adjustExtracts. 7602 assert(InVectors.size() == 1 && 7603 InVectors.front().is<const TreeEntry *>() && !CommonMask.empty() && 7604 all_of(enumerate(CommonMask), 7605 [&](auto P) { 7606 Value *Scalar = InVectors.front() 7607 .get<const TreeEntry *>() 7608 ->Scalars[P.index()]; 7609 if (P.value() == PoisonMaskElem) 7610 return P.value() == Mask[P.index()] || 7611 isa<UndefValue>(Scalar); 7612 if (isa<Constant>(V1)) 7613 return true; 7614 auto *EI = cast<ExtractElementInst>(Scalar); 7615 return EI->getVectorOperand() == V1; 7616 }) && 7617 "Expected only tree entry for extractelement vectors."); 7618 return; 7619 } 7620 assert(!InVectors.empty() && !CommonMask.empty() && 7621 "Expected only tree entries from extracts/reused buildvectors."); 7622 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7623 if (InVectors.size() == 2) { 7624 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask); 7625 transformMaskAfterShuffle(CommonMask, CommonMask); 7626 VF = std::max<unsigned>(VF, CommonMask.size()); 7627 } else if (const auto *InTE = 7628 InVectors.front().dyn_cast<const TreeEntry *>()) { 7629 VF = std::max(VF, InTE->getVectorFactor()); 7630 } else { 7631 VF = std::max( 7632 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType()) 7633 ->getNumElements()); 7634 } 7635 InVectors.push_back(V1); 7636 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7637 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 7638 CommonMask[Idx] = Mask[Idx] + VF; 7639 } 7640 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 7641 Value *Root = nullptr) { 7642 Cost += getBuildVectorCost(VL, Root); 7643 if (!Root) { 7644 // FIXME: Need to find a way to avoid use of getNullValue here. 7645 SmallVector<Constant *> Vals; 7646 unsigned VF = VL.size(); 7647 if (MaskVF != 0) 7648 VF = std::min(VF, MaskVF); 7649 for (Value *V : VL.take_front(VF)) { 7650 if (isa<UndefValue>(V)) { 7651 Vals.push_back(cast<Constant>(V)); 7652 continue; 7653 } 7654 Vals.push_back(Constant::getNullValue(V->getType())); 7655 } 7656 return ConstantVector::get(Vals); 7657 } 7658 return ConstantVector::getSplat( 7659 ElementCount::getFixed( 7660 cast<FixedVectorType>(Root->getType())->getNumElements()), 7661 getAllOnesValue(*R.DL, VL.front()->getType())); 7662 } 7663 InstructionCost createFreeze(InstructionCost Cost) { return Cost; } 7664 /// Finalize emission of the shuffles. 7665 InstructionCost 7666 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 7667 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 7668 IsFinalized = true; 7669 if (Action) { 7670 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front(); 7671 if (InVectors.size() == 2) 7672 Cost += createShuffle(Vec, InVectors.back(), CommonMask); 7673 else 7674 Cost += createShuffle(Vec, nullptr, CommonMask); 7675 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7676 if (CommonMask[Idx] != PoisonMaskElem) 7677 CommonMask[Idx] = Idx; 7678 assert(VF > 0 && 7679 "Expected vector length for the final value before action."); 7680 Value *V = Vec.get<Value *>(); 7681 Action(V, CommonMask); 7682 InVectors.front() = V; 7683 } 7684 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true); 7685 if (CommonMask.empty()) { 7686 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 7687 return Cost; 7688 } 7689 return Cost + 7690 createShuffle(InVectors.front(), 7691 InVectors.size() == 2 ? InVectors.back() : nullptr, 7692 CommonMask); 7693 } 7694 7695 ~ShuffleCostEstimator() { 7696 assert((IsFinalized || CommonMask.empty()) && 7697 "Shuffle construction must be finalized."); 7698 } 7699 }; 7700 7701 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E, 7702 unsigned Idx) const { 7703 Value *Op = E->getOperand(Idx).front(); 7704 if (const TreeEntry *TE = getTreeEntry(Op)) { 7705 if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) { 7706 return EI.EdgeIdx == Idx && EI.UserTE == E; 7707 }) != TE->UserTreeIndices.end()) 7708 return TE; 7709 auto MIt = MultiNodeScalars.find(Op); 7710 if (MIt != MultiNodeScalars.end()) { 7711 for (const TreeEntry *TE : MIt->second) { 7712 if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7713 return EI.EdgeIdx == Idx && EI.UserTE == E; 7714 }) != TE->UserTreeIndices.end()) 7715 return TE; 7716 } 7717 } 7718 } 7719 const auto *It = 7720 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 7721 return TE->State == TreeEntry::NeedToGather && 7722 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7723 return EI.EdgeIdx == Idx && EI.UserTE == E; 7724 }) != TE->UserTreeIndices.end(); 7725 }); 7726 assert(It != VectorizableTree.end() && "Expected vectorizable entry."); 7727 return It->get(); 7728 } 7729 7730 InstructionCost 7731 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, 7732 SmallPtrSetImpl<Value *> &CheckedExtracts) { 7733 ArrayRef<Value *> VL = E->Scalars; 7734 7735 Type *ScalarTy = VL[0]->getType(); 7736 if (E->State != TreeEntry::NeedToGather) { 7737 if (auto *SI = dyn_cast<StoreInst>(VL[0])) 7738 ScalarTy = SI->getValueOperand()->getType(); 7739 else if (auto *CI = dyn_cast<CmpInst>(VL[0])) 7740 ScalarTy = CI->getOperand(0)->getType(); 7741 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7742 ScalarTy = IE->getOperand(1)->getType(); 7743 } 7744 if (!FixedVectorType::isValidElementType(ScalarTy)) 7745 return InstructionCost::getInvalid(); 7746 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7747 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7748 7749 // If we have computed a smaller type for the expression, update VecTy so 7750 // that the costs will be accurate. 7751 auto It = MinBWs.find(E); 7752 if (It != MinBWs.end()) { 7753 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 7754 VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7755 } 7756 unsigned EntryVF = E->getVectorFactor(); 7757 auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF); 7758 7759 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7760 if (E->State == TreeEntry::NeedToGather) { 7761 if (allConstant(VL)) 7762 return 0; 7763 if (isa<InsertElementInst>(VL[0])) 7764 return InstructionCost::getInvalid(); 7765 return processBuildVector<ShuffleCostEstimator, InstructionCost>( 7766 E, *TTI, VectorizedVals, *this, CheckedExtracts); 7767 } 7768 InstructionCost CommonCost = 0; 7769 SmallVector<int> Mask; 7770 if (!E->ReorderIndices.empty() && 7771 E->State != TreeEntry::PossibleStridedVectorize) { 7772 SmallVector<int> NewMask; 7773 if (E->getOpcode() == Instruction::Store) { 7774 // For stores the order is actually a mask. 7775 NewMask.resize(E->ReorderIndices.size()); 7776 copy(E->ReorderIndices, NewMask.begin()); 7777 } else { 7778 inversePermutation(E->ReorderIndices, NewMask); 7779 } 7780 ::addMask(Mask, NewMask); 7781 } 7782 if (NeedToShuffleReuses) 7783 ::addMask(Mask, E->ReuseShuffleIndices); 7784 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 7785 CommonCost = 7786 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 7787 assert((E->State == TreeEntry::Vectorize || 7788 E->State == TreeEntry::ScatterVectorize || 7789 E->State == TreeEntry::PossibleStridedVectorize) && 7790 "Unhandled state"); 7791 assert(E->getOpcode() && 7792 ((allSameType(VL) && allSameBlock(VL)) || 7793 (E->getOpcode() == Instruction::GetElementPtr && 7794 E->getMainOp()->getType()->isPointerTy())) && 7795 "Invalid VL"); 7796 Instruction *VL0 = E->getMainOp(); 7797 unsigned ShuffleOrOp = 7798 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7799 SetVector<Value *> UniqueValues(VL.begin(), VL.end()); 7800 const unsigned Sz = UniqueValues.size(); 7801 SmallBitVector UsedScalars(Sz, false); 7802 for (unsigned I = 0; I < Sz; ++I) { 7803 if (getTreeEntry(UniqueValues[I]) == E) 7804 continue; 7805 UsedScalars.set(I); 7806 } 7807 auto GetCastContextHint = [&](Value *V) { 7808 if (const TreeEntry *OpTE = getTreeEntry(V)) { 7809 if (OpTE->State == TreeEntry::ScatterVectorize) 7810 return TTI::CastContextHint::GatherScatter; 7811 if (OpTE->State == TreeEntry::Vectorize && 7812 OpTE->getOpcode() == Instruction::Load && !OpTE->isAltShuffle()) { 7813 if (OpTE->ReorderIndices.empty()) 7814 return TTI::CastContextHint::Normal; 7815 SmallVector<int> Mask; 7816 inversePermutation(OpTE->ReorderIndices, Mask); 7817 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size())) 7818 return TTI::CastContextHint::Reversed; 7819 } 7820 } else { 7821 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI); 7822 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle()) 7823 return TTI::CastContextHint::GatherScatter; 7824 } 7825 return TTI::CastContextHint::None; 7826 }; 7827 auto GetCostDiff = 7828 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost, 7829 function_ref<InstructionCost(InstructionCost)> VectorCost) { 7830 // Calculate the cost of this instruction. 7831 InstructionCost ScalarCost = 0; 7832 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) { 7833 // For some of the instructions no need to calculate cost for each 7834 // particular instruction, we can use the cost of the single 7835 // instruction x total number of scalar instructions. 7836 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0); 7837 } else { 7838 for (unsigned I = 0; I < Sz; ++I) { 7839 if (UsedScalars.test(I)) 7840 continue; 7841 ScalarCost += ScalarEltCost(I); 7842 } 7843 } 7844 7845 InstructionCost VecCost = VectorCost(CommonCost); 7846 // Check if the current node must be resized, if the parent node is not 7847 // resized. 7848 if (!UnaryInstruction::isCast(E->getOpcode()) && E->Idx != 0) { 7849 const EdgeInfo &EI = E->UserTreeIndices.front(); 7850 if ((EI.UserTE->getOpcode() != Instruction::Select || 7851 EI.EdgeIdx != 0) && 7852 It != MinBWs.end()) { 7853 auto UserBWIt = MinBWs.find(EI.UserTE); 7854 Type *UserScalarTy = 7855 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType(); 7856 if (UserBWIt != MinBWs.end()) 7857 UserScalarTy = IntegerType::get(ScalarTy->getContext(), 7858 UserBWIt->second.first); 7859 if (ScalarTy != UserScalarTy) { 7860 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 7861 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy); 7862 unsigned VecOpcode; 7863 auto *SrcVecTy = 7864 FixedVectorType::get(UserScalarTy, E->getVectorFactor()); 7865 if (BWSz > SrcBWSz) 7866 VecOpcode = Instruction::Trunc; 7867 else 7868 VecOpcode = 7869 It->second.second ? Instruction::SExt : Instruction::ZExt; 7870 TTI::CastContextHint CCH = GetCastContextHint(VL0); 7871 VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, 7872 CostKind); 7873 ScalarCost += 7874 Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy, 7875 CCH, CostKind); 7876 } 7877 } 7878 } 7879 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost, 7880 ScalarCost, "Calculated costs for Tree")); 7881 return VecCost - ScalarCost; 7882 }; 7883 // Calculate cost difference from vectorizing set of GEPs. 7884 // Negative value means vectorizing is profitable. 7885 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) { 7886 InstructionCost ScalarCost = 0; 7887 InstructionCost VecCost = 0; 7888 // Here we differentiate two cases: (1) when Ptrs represent a regular 7889 // vectorization tree node (as they are pointer arguments of scattered 7890 // loads) or (2) when Ptrs are the arguments of loads or stores being 7891 // vectorized as plane wide unit-stride load/store since all the 7892 // loads/stores are known to be from/to adjacent locations. 7893 assert(E->State == TreeEntry::Vectorize && 7894 "Entry state expected to be Vectorize here."); 7895 if (isa<LoadInst, StoreInst>(VL0)) { 7896 // Case 2: estimate costs for pointer related costs when vectorizing to 7897 // a wide load/store. 7898 // Scalar cost is estimated as a set of pointers with known relationship 7899 // between them. 7900 // For vector code we will use BasePtr as argument for the wide load/store 7901 // but we also need to account all the instructions which are going to 7902 // stay in vectorized code due to uses outside of these scalar 7903 // loads/stores. 7904 ScalarCost = TTI->getPointersChainCost( 7905 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy, 7906 CostKind); 7907 7908 SmallVector<const Value *> PtrsRetainedInVecCode; 7909 for (Value *V : Ptrs) { 7910 if (V == BasePtr) { 7911 PtrsRetainedInVecCode.push_back(V); 7912 continue; 7913 } 7914 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7915 // For simplicity assume Ptr to stay in vectorized code if it's not a 7916 // GEP instruction. We don't care since it's cost considered free. 7917 // TODO: We should check for any uses outside of vectorizable tree 7918 // rather than just single use. 7919 if (!Ptr || !Ptr->hasOneUse()) 7920 PtrsRetainedInVecCode.push_back(V); 7921 } 7922 7923 if (PtrsRetainedInVecCode.size() == Ptrs.size()) { 7924 // If all pointers stay in vectorized code then we don't have 7925 // any savings on that. 7926 LLVM_DEBUG(dumpTreeCosts(E, 0, ScalarCost, ScalarCost, 7927 "Calculated GEPs cost for Tree")); 7928 return InstructionCost{TTI::TCC_Free}; 7929 } 7930 VecCost = TTI->getPointersChainCost( 7931 PtrsRetainedInVecCode, BasePtr, 7932 TTI::PointersChainInfo::getKnownStride(), VecTy, CostKind); 7933 } else { 7934 // Case 1: Ptrs are the arguments of loads that we are going to transform 7935 // into masked gather load intrinsic. 7936 // All the scalar GEPs will be removed as a result of vectorization. 7937 // For any external uses of some lanes extract element instructions will 7938 // be generated (which cost is estimated separately). 7939 TTI::PointersChainInfo PtrsInfo = 7940 all_of(Ptrs, 7941 [](const Value *V) { 7942 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7943 return Ptr && !Ptr->hasAllConstantIndices(); 7944 }) 7945 ? TTI::PointersChainInfo::getUnknownStride() 7946 : TTI::PointersChainInfo::getKnownStride(); 7947 7948 ScalarCost = TTI->getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, 7949 CostKind); 7950 if (auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr)) { 7951 SmallVector<const Value *> Indices(BaseGEP->indices()); 7952 VecCost = TTI->getGEPCost(BaseGEP->getSourceElementType(), 7953 BaseGEP->getPointerOperand(), Indices, VecTy, 7954 CostKind); 7955 } 7956 } 7957 7958 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost, 7959 "Calculated GEPs cost for Tree")); 7960 7961 return VecCost - ScalarCost; 7962 }; 7963 7964 switch (ShuffleOrOp) { 7965 case Instruction::PHI: { 7966 // Count reused scalars. 7967 InstructionCost ScalarCost = 0; 7968 SmallPtrSet<const TreeEntry *, 4> CountedOps; 7969 for (Value *V : UniqueValues) { 7970 auto *PHI = dyn_cast<PHINode>(V); 7971 if (!PHI) 7972 continue; 7973 7974 ValueList Operands(PHI->getNumIncomingValues(), nullptr); 7975 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) { 7976 Value *Op = PHI->getIncomingValue(I); 7977 Operands[I] = Op; 7978 } 7979 if (const TreeEntry *OpTE = getTreeEntry(Operands.front())) 7980 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second) 7981 if (!OpTE->ReuseShuffleIndices.empty()) 7982 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() - 7983 OpTE->Scalars.size()); 7984 } 7985 7986 return CommonCost - ScalarCost; 7987 } 7988 case Instruction::ExtractValue: 7989 case Instruction::ExtractElement: { 7990 auto GetScalarCost = [&](unsigned Idx) { 7991 auto *I = cast<Instruction>(UniqueValues[Idx]); 7992 VectorType *SrcVecTy; 7993 if (ShuffleOrOp == Instruction::ExtractElement) { 7994 auto *EE = cast<ExtractElementInst>(I); 7995 SrcVecTy = EE->getVectorOperandType(); 7996 } else { 7997 auto *EV = cast<ExtractValueInst>(I); 7998 Type *AggregateTy = EV->getAggregateOperand()->getType(); 7999 unsigned NumElts; 8000 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy)) 8001 NumElts = ATy->getNumElements(); 8002 else 8003 NumElts = AggregateTy->getStructNumElements(); 8004 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts); 8005 } 8006 if (I->hasOneUse()) { 8007 Instruction *Ext = I->user_back(); 8008 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 8009 all_of(Ext->users(), 8010 [](User *U) { return isa<GetElementPtrInst>(U); })) { 8011 // Use getExtractWithExtendCost() to calculate the cost of 8012 // extractelement/ext pair. 8013 InstructionCost Cost = TTI->getExtractWithExtendCost( 8014 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I)); 8015 // Subtract the cost of s|zext which is subtracted separately. 8016 Cost -= TTI->getCastInstrCost( 8017 Ext->getOpcode(), Ext->getType(), I->getType(), 8018 TTI::getCastContextHint(Ext), CostKind, Ext); 8019 return Cost; 8020 } 8021 } 8022 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy, 8023 CostKind, *getExtractIndex(I)); 8024 }; 8025 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; }; 8026 return GetCostDiff(GetScalarCost, GetVectorCost); 8027 } 8028 case Instruction::InsertElement: { 8029 assert(E->ReuseShuffleIndices.empty() && 8030 "Unique insertelements only are expected."); 8031 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 8032 unsigned const NumElts = SrcVecTy->getNumElements(); 8033 unsigned const NumScalars = VL.size(); 8034 8035 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy); 8036 8037 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 8038 unsigned OffsetBeg = *getInsertIndex(VL.front()); 8039 unsigned OffsetEnd = OffsetBeg; 8040 InsertMask[OffsetBeg] = 0; 8041 for (auto [I, V] : enumerate(VL.drop_front())) { 8042 unsigned Idx = *getInsertIndex(V); 8043 if (OffsetBeg > Idx) 8044 OffsetBeg = Idx; 8045 else if (OffsetEnd < Idx) 8046 OffsetEnd = Idx; 8047 InsertMask[Idx] = I + 1; 8048 } 8049 unsigned VecScalarsSz = PowerOf2Ceil(NumElts); 8050 if (NumOfParts > 0) 8051 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts); 8052 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) * 8053 VecScalarsSz; 8054 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz); 8055 unsigned InsertVecSz = std::min<unsigned>( 8056 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1), 8057 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz); 8058 bool IsWholeSubvector = 8059 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0); 8060 // Check if we can safely insert a subvector. If it is not possible, just 8061 // generate a whole-sized vector and shuffle the source vector and the new 8062 // subvector. 8063 if (OffsetBeg + InsertVecSz > VecSz) { 8064 // Align OffsetBeg to generate correct mask. 8065 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset); 8066 InsertVecSz = VecSz; 8067 } 8068 8069 APInt DemandedElts = APInt::getZero(NumElts); 8070 // TODO: Add support for Instruction::InsertValue. 8071 SmallVector<int> Mask; 8072 if (!E->ReorderIndices.empty()) { 8073 inversePermutation(E->ReorderIndices, Mask); 8074 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem); 8075 } else { 8076 Mask.assign(VecSz, PoisonMaskElem); 8077 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0); 8078 } 8079 bool IsIdentity = true; 8080 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem); 8081 Mask.swap(PrevMask); 8082 for (unsigned I = 0; I < NumScalars; ++I) { 8083 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 8084 DemandedElts.setBit(InsertIdx); 8085 IsIdentity &= InsertIdx - OffsetBeg == I; 8086 Mask[InsertIdx - OffsetBeg] = I; 8087 } 8088 assert(Offset < NumElts && "Failed to find vector index offset"); 8089 8090 InstructionCost Cost = 0; 8091 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 8092 /*Insert*/ true, /*Extract*/ false, 8093 CostKind); 8094 8095 // First cost - resize to actual vector size if not identity shuffle or 8096 // need to shift the vector. 8097 // Do not calculate the cost if the actual size is the register size and 8098 // we can merge this shuffle with the following SK_Select. 8099 auto *InsertVecTy = FixedVectorType::get(ScalarTy, InsertVecSz); 8100 if (!IsIdentity) 8101 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 8102 InsertVecTy, Mask); 8103 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 8104 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 8105 })); 8106 // Second cost - permutation with subvector, if some elements are from the 8107 // initial vector or inserting a subvector. 8108 // TODO: Implement the analysis of the FirstInsert->getOperand(0) 8109 // subvector of ActualVecTy. 8110 SmallBitVector InMask = 8111 isUndefVector(FirstInsert->getOperand(0), 8112 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask)); 8113 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { 8114 if (InsertVecSz != VecSz) { 8115 auto *ActualVecTy = FixedVectorType::get(ScalarTy, VecSz); 8116 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy, 8117 std::nullopt, CostKind, OffsetBeg - Offset, 8118 InsertVecTy); 8119 } else { 8120 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) 8121 Mask[I] = InMask.test(I) ? PoisonMaskElem : I; 8122 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset; 8123 I <= End; ++I) 8124 if (Mask[I] != PoisonMaskElem) 8125 Mask[I] = I + VecSz; 8126 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I) 8127 Mask[I] = 8128 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I; 8129 Cost += 8130 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, InsertVecTy, Mask); 8131 } 8132 } 8133 return Cost; 8134 } 8135 case Instruction::ZExt: 8136 case Instruction::SExt: 8137 case Instruction::FPToUI: 8138 case Instruction::FPToSI: 8139 case Instruction::FPExt: 8140 case Instruction::PtrToInt: 8141 case Instruction::IntToPtr: 8142 case Instruction::SIToFP: 8143 case Instruction::UIToFP: 8144 case Instruction::Trunc: 8145 case Instruction::FPTrunc: 8146 case Instruction::BitCast: { 8147 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 8148 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 8149 auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8150 unsigned Opcode = ShuffleOrOp; 8151 unsigned VecOpcode = Opcode; 8152 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 8153 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 8154 // Check if the values are candidates to demote. 8155 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 8156 if (SrcIt != MinBWs.end()) { 8157 SrcBWSz = SrcIt->second.first; 8158 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz); 8159 SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8160 } 8161 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 8162 if (BWSz == SrcBWSz) { 8163 VecOpcode = Instruction::BitCast; 8164 } else if (BWSz < SrcBWSz) { 8165 VecOpcode = Instruction::Trunc; 8166 } else if (It != MinBWs.end()) { 8167 assert(BWSz > SrcBWSz && "Invalid cast!"); 8168 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 8169 } 8170 } 8171 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost { 8172 // Do not count cost here if minimum bitwidth is in effect and it is just 8173 // a bitcast (here it is just a noop). 8174 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8175 return TTI::TCC_Free; 8176 auto *VI = VL0->getOpcode() == Opcode 8177 ? cast<Instruction>(UniqueValues[Idx]) 8178 : nullptr; 8179 return TTI->getCastInstrCost(Opcode, VL0->getType(), 8180 VL0->getOperand(0)->getType(), 8181 TTI::getCastContextHint(VI), CostKind, VI); 8182 }; 8183 auto GetVectorCost = [=](InstructionCost CommonCost) { 8184 // Do not count cost here if minimum bitwidth is in effect and it is just 8185 // a bitcast (here it is just a noop). 8186 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8187 return CommonCost; 8188 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr; 8189 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0)); 8190 return CommonCost + 8191 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind, 8192 VecOpcode == Opcode ? VI : nullptr); 8193 }; 8194 return GetCostDiff(GetScalarCost, GetVectorCost); 8195 } 8196 case Instruction::FCmp: 8197 case Instruction::ICmp: 8198 case Instruction::Select: { 8199 CmpInst::Predicate VecPred, SwappedVecPred; 8200 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value()); 8201 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) || 8202 match(VL0, MatchCmp)) 8203 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred); 8204 else 8205 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy() 8206 ? CmpInst::BAD_FCMP_PREDICATE 8207 : CmpInst::BAD_ICMP_PREDICATE; 8208 auto GetScalarCost = [&](unsigned Idx) { 8209 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8210 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy() 8211 ? CmpInst::BAD_FCMP_PREDICATE 8212 : CmpInst::BAD_ICMP_PREDICATE; 8213 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 8214 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) && 8215 !match(VI, MatchCmp)) || 8216 (CurrentPred != VecPred && CurrentPred != SwappedVecPred)) 8217 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy() 8218 ? CmpInst::BAD_FCMP_PREDICATE 8219 : CmpInst::BAD_ICMP_PREDICATE; 8220 8221 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 8222 Builder.getInt1Ty(), CurrentPred, CostKind, 8223 VI); 8224 }; 8225 auto GetVectorCost = [&](InstructionCost CommonCost) { 8226 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8227 8228 InstructionCost VecCost = TTI->getCmpSelInstrCost( 8229 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 8230 // Check if it is possible and profitable to use min/max for selects 8231 // in VL. 8232 // 8233 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 8234 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 8235 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 8236 {VecTy, VecTy}); 8237 InstructionCost IntrinsicCost = 8238 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8239 // If the selects are the only uses of the compares, they will be 8240 // dead and we can adjust the cost by removing their cost. 8241 if (IntrinsicAndUse.second) 8242 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 8243 MaskTy, VecPred, CostKind); 8244 VecCost = std::min(VecCost, IntrinsicCost); 8245 } 8246 return VecCost + CommonCost; 8247 }; 8248 return GetCostDiff(GetScalarCost, GetVectorCost); 8249 } 8250 case Instruction::FNeg: 8251 case Instruction::Add: 8252 case Instruction::FAdd: 8253 case Instruction::Sub: 8254 case Instruction::FSub: 8255 case Instruction::Mul: 8256 case Instruction::FMul: 8257 case Instruction::UDiv: 8258 case Instruction::SDiv: 8259 case Instruction::FDiv: 8260 case Instruction::URem: 8261 case Instruction::SRem: 8262 case Instruction::FRem: 8263 case Instruction::Shl: 8264 case Instruction::LShr: 8265 case Instruction::AShr: 8266 case Instruction::And: 8267 case Instruction::Or: 8268 case Instruction::Xor: { 8269 auto GetScalarCost = [&](unsigned Idx) { 8270 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8271 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1; 8272 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0)); 8273 TTI::OperandValueInfo Op2Info = 8274 TTI::getOperandInfo(VI->getOperand(OpIdx)); 8275 SmallVector<const Value *> Operands(VI->operand_values()); 8276 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind, 8277 Op1Info, Op2Info, Operands, VI); 8278 }; 8279 auto GetVectorCost = [=](InstructionCost CommonCost) { 8280 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1; 8281 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); 8282 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); 8283 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, 8284 Op2Info) + 8285 CommonCost; 8286 }; 8287 return GetCostDiff(GetScalarCost, GetVectorCost); 8288 } 8289 case Instruction::GetElementPtr: { 8290 return CommonCost + GetGEPCostDiff(VL, VL0); 8291 } 8292 case Instruction::Load: { 8293 auto GetScalarCost = [&](unsigned Idx) { 8294 auto *VI = cast<LoadInst>(UniqueValues[Idx]); 8295 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(), 8296 VI->getPointerAddressSpace(), CostKind, 8297 TTI::OperandValueInfo(), VI); 8298 }; 8299 auto *LI0 = cast<LoadInst>(VL0); 8300 auto GetVectorCost = [&](InstructionCost CommonCost) { 8301 InstructionCost VecLdCost; 8302 if (E->State == TreeEntry::Vectorize) { 8303 VecLdCost = TTI->getMemoryOpCost( 8304 Instruction::Load, VecTy, LI0->getAlign(), 8305 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()); 8306 } else { 8307 assert((E->State == TreeEntry::ScatterVectorize || 8308 E->State == TreeEntry::PossibleStridedVectorize) && 8309 "Unknown EntryState"); 8310 Align CommonAlignment = LI0->getAlign(); 8311 for (Value *V : UniqueValues) 8312 CommonAlignment = 8313 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 8314 VecLdCost = TTI->getGatherScatterOpCost( 8315 Instruction::Load, VecTy, LI0->getPointerOperand(), 8316 /*VariableMask=*/false, CommonAlignment, CostKind); 8317 } 8318 return VecLdCost + CommonCost; 8319 }; 8320 8321 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost); 8322 // If this node generates masked gather load then it is not a terminal node. 8323 // Hence address operand cost is estimated separately. 8324 if (E->State == TreeEntry::ScatterVectorize || 8325 E->State == TreeEntry::PossibleStridedVectorize) 8326 return Cost; 8327 8328 // Estimate cost of GEPs since this tree node is a terminator. 8329 SmallVector<Value *> PointerOps(VL.size()); 8330 for (auto [I, V] : enumerate(VL)) 8331 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand(); 8332 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand()); 8333 } 8334 case Instruction::Store: { 8335 bool IsReorder = !E->ReorderIndices.empty(); 8336 auto GetScalarCost = [=](unsigned Idx) { 8337 auto *VI = cast<StoreInst>(VL[Idx]); 8338 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand()); 8339 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(), 8340 VI->getPointerAddressSpace(), CostKind, 8341 OpInfo, VI); 8342 }; 8343 auto *BaseSI = 8344 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 8345 auto GetVectorCost = [=](InstructionCost CommonCost) { 8346 // We know that we can merge the stores. Calculate the cost. 8347 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); 8348 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), 8349 BaseSI->getPointerAddressSpace(), CostKind, 8350 OpInfo) + 8351 CommonCost; 8352 }; 8353 SmallVector<Value *> PointerOps(VL.size()); 8354 for (auto [I, V] : enumerate(VL)) { 8355 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I; 8356 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand(); 8357 } 8358 8359 return GetCostDiff(GetScalarCost, GetVectorCost) + 8360 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand()); 8361 } 8362 case Instruction::Call: { 8363 auto GetScalarCost = [&](unsigned Idx) { 8364 auto *CI = cast<CallInst>(UniqueValues[Idx]); 8365 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8366 if (ID != Intrinsic::not_intrinsic) { 8367 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 8368 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8369 } 8370 return TTI->getCallInstrCost(CI->getCalledFunction(), 8371 CI->getFunctionType()->getReturnType(), 8372 CI->getFunctionType()->params(), CostKind); 8373 }; 8374 auto GetVectorCost = [=](InstructionCost CommonCost) { 8375 auto *CI = cast<CallInst>(VL0); 8376 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 8377 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost; 8378 }; 8379 return GetCostDiff(GetScalarCost, GetVectorCost); 8380 } 8381 case Instruction::ShuffleVector: { 8382 assert(E->isAltShuffle() && 8383 ((Instruction::isBinaryOp(E->getOpcode()) && 8384 Instruction::isBinaryOp(E->getAltOpcode())) || 8385 (Instruction::isCast(E->getOpcode()) && 8386 Instruction::isCast(E->getAltOpcode())) || 8387 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8388 "Invalid Shuffle Vector Operand"); 8389 // Try to find the previous shuffle node with the same operands and same 8390 // main/alternate ops. 8391 auto TryFindNodeWithEqualOperands = [=]() { 8392 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 8393 if (TE.get() == E) 8394 break; 8395 if (TE->isAltShuffle() && 8396 ((TE->getOpcode() == E->getOpcode() && 8397 TE->getAltOpcode() == E->getAltOpcode()) || 8398 (TE->getOpcode() == E->getAltOpcode() && 8399 TE->getAltOpcode() == E->getOpcode())) && 8400 TE->hasEqualOperands(*E)) 8401 return true; 8402 } 8403 return false; 8404 }; 8405 auto GetScalarCost = [&](unsigned Idx) { 8406 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8407 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode"); 8408 (void)E; 8409 return TTI->getInstructionCost(VI, CostKind); 8410 }; 8411 // FIXME: Workaround for syntax error reported by MSVC buildbots. 8412 TargetTransformInfo &TTIRef = *TTI; 8413 // Need to clear CommonCost since the final shuffle cost is included into 8414 // vector cost. 8415 auto GetVectorCost = [&](InstructionCost) { 8416 // VecCost is equal to sum of the cost of creating 2 vectors 8417 // and the cost of creating shuffle. 8418 InstructionCost VecCost = 0; 8419 if (TryFindNodeWithEqualOperands()) { 8420 LLVM_DEBUG({ 8421 dbgs() << "SLP: diamond match for alternate node found.\n"; 8422 E->dump(); 8423 }); 8424 // No need to add new vector costs here since we're going to reuse 8425 // same main/alternate vector ops, just do different shuffling. 8426 } else if (Instruction::isBinaryOp(E->getOpcode())) { 8427 VecCost = 8428 TTIRef.getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 8429 VecCost += 8430 TTIRef.getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind); 8431 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8432 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8433 VecCost = TTIRef.getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 8434 CI0->getPredicate(), CostKind, VL0); 8435 VecCost += TTIRef.getCmpSelInstrCost( 8436 E->getOpcode(), VecTy, MaskTy, 8437 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 8438 E->getAltOp()); 8439 } else { 8440 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 8441 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 8442 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 8443 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 8444 VecCost = TTIRef.getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 8445 TTI::CastContextHint::None, CostKind); 8446 VecCost += 8447 TTIRef.getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 8448 TTI::CastContextHint::None, CostKind); 8449 } 8450 SmallVector<int> Mask; 8451 E->buildAltOpShuffleMask( 8452 [E](Instruction *I) { 8453 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8454 return I->getOpcode() == E->getAltOpcode(); 8455 }, 8456 Mask); 8457 VecCost += ::getShuffleCost(TTIRef, TargetTransformInfo::SK_PermuteTwoSrc, 8458 FinalVecTy, Mask); 8459 // Patterns like [fadd,fsub] can be combined into a single instruction 8460 // in x86. Reordering them into [fsub,fadd] blocks this pattern. So we 8461 // need to take into account their order when looking for the most used 8462 // order. 8463 unsigned Opcode0 = E->getOpcode(); 8464 unsigned Opcode1 = E->getAltOpcode(); 8465 // The opcode mask selects between the two opcodes. 8466 SmallBitVector OpcodeMask(E->Scalars.size(), false); 8467 for (unsigned Lane : seq<unsigned>(0, E->Scalars.size())) 8468 if (cast<Instruction>(E->Scalars[Lane])->getOpcode() == Opcode1) 8469 OpcodeMask.set(Lane); 8470 // If this pattern is supported by the target then we consider the 8471 // order. 8472 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 8473 InstructionCost AltVecCost = TTIRef.getAltInstrCost( 8474 VecTy, Opcode0, Opcode1, OpcodeMask, CostKind); 8475 return AltVecCost < VecCost ? AltVecCost : VecCost; 8476 } 8477 // TODO: Check the reverse order too. 8478 return VecCost; 8479 }; 8480 return GetCostDiff(GetScalarCost, GetVectorCost); 8481 } 8482 default: 8483 llvm_unreachable("Unknown instruction"); 8484 } 8485 } 8486 8487 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 8488 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 8489 << VectorizableTree.size() << " is fully vectorizable .\n"); 8490 8491 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 8492 SmallVector<int> Mask; 8493 return TE->State == TreeEntry::NeedToGather && 8494 !any_of(TE->Scalars, 8495 [this](Value *V) { return EphValues.contains(V); }) && 8496 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 8497 TE->Scalars.size() < Limit || 8498 ((TE->getOpcode() == Instruction::ExtractElement || 8499 all_of(TE->Scalars, 8500 [](Value *V) { 8501 return isa<ExtractElementInst, UndefValue>(V); 8502 })) && 8503 isFixedVectorShuffle(TE->Scalars, Mask)) || 8504 (TE->State == TreeEntry::NeedToGather && 8505 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 8506 }; 8507 8508 // We only handle trees of heights 1 and 2. 8509 if (VectorizableTree.size() == 1 && 8510 (VectorizableTree[0]->State == TreeEntry::Vectorize || 8511 (ForReduction && 8512 AreVectorizableGathers(VectorizableTree[0].get(), 8513 VectorizableTree[0]->Scalars.size()) && 8514 VectorizableTree[0]->getVectorFactor() > 2))) 8515 return true; 8516 8517 if (VectorizableTree.size() != 2) 8518 return false; 8519 8520 // Handle splat and all-constants stores. Also try to vectorize tiny trees 8521 // with the second gather nodes if they have less scalar operands rather than 8522 // the initial tree element (may be profitable to shuffle the second gather) 8523 // or they are extractelements, which form shuffle. 8524 SmallVector<int> Mask; 8525 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 8526 AreVectorizableGathers(VectorizableTree[1].get(), 8527 VectorizableTree[0]->Scalars.size())) 8528 return true; 8529 8530 // Gathering cost would be too much for tiny trees. 8531 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 8532 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 8533 VectorizableTree[0]->State != TreeEntry::ScatterVectorize && 8534 VectorizableTree[0]->State != TreeEntry::PossibleStridedVectorize)) 8535 return false; 8536 8537 return true; 8538 } 8539 8540 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 8541 TargetTransformInfo *TTI, 8542 bool MustMatchOrInst) { 8543 // Look past the root to find a source value. Arbitrarily follow the 8544 // path through operand 0 of any 'or'. Also, peek through optional 8545 // shift-left-by-multiple-of-8-bits. 8546 Value *ZextLoad = Root; 8547 const APInt *ShAmtC; 8548 bool FoundOr = false; 8549 while (!isa<ConstantExpr>(ZextLoad) && 8550 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 8551 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 8552 ShAmtC->urem(8) == 0))) { 8553 auto *BinOp = cast<BinaryOperator>(ZextLoad); 8554 ZextLoad = BinOp->getOperand(0); 8555 if (BinOp->getOpcode() == Instruction::Or) 8556 FoundOr = true; 8557 } 8558 // Check if the input is an extended load of the required or/shift expression. 8559 Value *Load; 8560 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 8561 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 8562 return false; 8563 8564 // Require that the total load bit width is a legal integer type. 8565 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 8566 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 8567 Type *SrcTy = Load->getType(); 8568 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 8569 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 8570 return false; 8571 8572 // Everything matched - assume that we can fold the whole sequence using 8573 // load combining. 8574 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 8575 << *(cast<Instruction>(Root)) << "\n"); 8576 8577 return true; 8578 } 8579 8580 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 8581 if (RdxKind != RecurKind::Or) 8582 return false; 8583 8584 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8585 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 8586 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 8587 /* MatchOr */ false); 8588 } 8589 8590 bool BoUpSLP::isLoadCombineCandidate() const { 8591 // Peek through a final sequence of stores and check if all operations are 8592 // likely to be load-combined. 8593 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8594 for (Value *Scalar : VectorizableTree[0]->Scalars) { 8595 Value *X; 8596 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 8597 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 8598 return false; 8599 } 8600 return true; 8601 } 8602 8603 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 8604 // No need to vectorize inserts of gathered values. 8605 if (VectorizableTree.size() == 2 && 8606 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 8607 VectorizableTree[1]->State == TreeEntry::NeedToGather && 8608 (VectorizableTree[1]->getVectorFactor() <= 2 || 8609 !(isSplat(VectorizableTree[1]->Scalars) || 8610 allConstant(VectorizableTree[1]->Scalars)))) 8611 return true; 8612 8613 // If the graph includes only PHI nodes and gathers, it is defnitely not 8614 // profitable for the vectorization, we can skip it, if the cost threshold is 8615 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of 8616 // gathers/buildvectors. 8617 constexpr int Limit = 4; 8618 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() && 8619 !VectorizableTree.empty() && 8620 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 8621 return (TE->State == TreeEntry::NeedToGather && 8622 TE->getOpcode() != Instruction::ExtractElement && 8623 count_if(TE->Scalars, 8624 [](Value *V) { return isa<ExtractElementInst>(V); }) <= 8625 Limit) || 8626 TE->getOpcode() == Instruction::PHI; 8627 })) 8628 return true; 8629 8630 // We can vectorize the tree if its size is greater than or equal to the 8631 // minimum size specified by the MinTreeSize command line option. 8632 if (VectorizableTree.size() >= MinTreeSize) 8633 return false; 8634 8635 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 8636 // can vectorize it if we can prove it fully vectorizable. 8637 if (isFullyVectorizableTinyTree(ForReduction)) 8638 return false; 8639 8640 assert(VectorizableTree.empty() 8641 ? ExternalUses.empty() 8642 : true && "We shouldn't have any external users"); 8643 8644 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 8645 // vectorizable. 8646 return true; 8647 } 8648 8649 InstructionCost BoUpSLP::getSpillCost() const { 8650 // Walk from the bottom of the tree to the top, tracking which values are 8651 // live. When we see a call instruction that is not part of our tree, 8652 // query TTI to see if there is a cost to keeping values live over it 8653 // (for example, if spills and fills are required). 8654 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 8655 InstructionCost Cost = 0; 8656 8657 SmallPtrSet<Instruction *, 4> LiveValues; 8658 Instruction *PrevInst = nullptr; 8659 8660 // The entries in VectorizableTree are not necessarily ordered by their 8661 // position in basic blocks. Collect them and order them by dominance so later 8662 // instructions are guaranteed to be visited first. For instructions in 8663 // different basic blocks, we only scan to the beginning of the block, so 8664 // their order does not matter, as long as all instructions in a basic block 8665 // are grouped together. Using dominance ensures a deterministic order. 8666 SmallVector<Instruction *, 16> OrderedScalars; 8667 for (const auto &TEPtr : VectorizableTree) { 8668 if (TEPtr->State != TreeEntry::Vectorize) 8669 continue; 8670 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 8671 if (!Inst) 8672 continue; 8673 OrderedScalars.push_back(Inst); 8674 } 8675 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 8676 auto *NodeA = DT->getNode(A->getParent()); 8677 auto *NodeB = DT->getNode(B->getParent()); 8678 assert(NodeA && "Should only process reachable instructions"); 8679 assert(NodeB && "Should only process reachable instructions"); 8680 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 8681 "Different nodes should have different DFS numbers"); 8682 if (NodeA != NodeB) 8683 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); 8684 return B->comesBefore(A); 8685 }); 8686 8687 for (Instruction *Inst : OrderedScalars) { 8688 if (!PrevInst) { 8689 PrevInst = Inst; 8690 continue; 8691 } 8692 8693 // Update LiveValues. 8694 LiveValues.erase(PrevInst); 8695 for (auto &J : PrevInst->operands()) { 8696 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 8697 LiveValues.insert(cast<Instruction>(&*J)); 8698 } 8699 8700 LLVM_DEBUG({ 8701 dbgs() << "SLP: #LV: " << LiveValues.size(); 8702 for (auto *X : LiveValues) 8703 dbgs() << " " << X->getName(); 8704 dbgs() << ", Looking at "; 8705 Inst->dump(); 8706 }); 8707 8708 // Now find the sequence of instructions between PrevInst and Inst. 8709 unsigned NumCalls = 0; 8710 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 8711 PrevInstIt = 8712 PrevInst->getIterator().getReverse(); 8713 while (InstIt != PrevInstIt) { 8714 if (PrevInstIt == PrevInst->getParent()->rend()) { 8715 PrevInstIt = Inst->getParent()->rbegin(); 8716 continue; 8717 } 8718 8719 auto NoCallIntrinsic = [this](Instruction *I) { 8720 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 8721 if (II->isAssumeLikeIntrinsic()) 8722 return true; 8723 FastMathFlags FMF; 8724 SmallVector<Type *, 4> Tys; 8725 for (auto &ArgOp : II->args()) 8726 Tys.push_back(ArgOp->getType()); 8727 if (auto *FPMO = dyn_cast<FPMathOperator>(II)) 8728 FMF = FPMO->getFastMathFlags(); 8729 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys, 8730 FMF); 8731 InstructionCost IntrCost = 8732 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput); 8733 InstructionCost CallCost = TTI->getCallInstrCost( 8734 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput); 8735 if (IntrCost < CallCost) 8736 return true; 8737 } 8738 return false; 8739 }; 8740 8741 // Debug information does not impact spill cost. 8742 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && 8743 &*PrevInstIt != PrevInst) 8744 NumCalls++; 8745 8746 ++PrevInstIt; 8747 } 8748 8749 if (NumCalls) { 8750 SmallVector<Type *, 4> V; 8751 for (auto *II : LiveValues) { 8752 auto *ScalarTy = II->getType(); 8753 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 8754 ScalarTy = VectorTy->getElementType(); 8755 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 8756 } 8757 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 8758 } 8759 8760 PrevInst = Inst; 8761 } 8762 8763 return Cost; 8764 } 8765 8766 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 8767 /// buildvector sequence. 8768 static bool isFirstInsertElement(const InsertElementInst *IE1, 8769 const InsertElementInst *IE2) { 8770 if (IE1 == IE2) 8771 return false; 8772 const auto *I1 = IE1; 8773 const auto *I2 = IE2; 8774 const InsertElementInst *PrevI1; 8775 const InsertElementInst *PrevI2; 8776 unsigned Idx1 = *getInsertIndex(IE1); 8777 unsigned Idx2 = *getInsertIndex(IE2); 8778 do { 8779 if (I2 == IE1) 8780 return true; 8781 if (I1 == IE2) 8782 return false; 8783 PrevI1 = I1; 8784 PrevI2 = I2; 8785 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 8786 getInsertIndex(I1).value_or(Idx2) != Idx2) 8787 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 8788 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 8789 getInsertIndex(I2).value_or(Idx1) != Idx1) 8790 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 8791 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 8792 llvm_unreachable("Two different buildvectors not expected."); 8793 } 8794 8795 namespace { 8796 /// Returns incoming Value *, if the requested type is Value * too, or a default 8797 /// value, otherwise. 8798 struct ValueSelect { 8799 template <typename U> 8800 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) { 8801 return V; 8802 } 8803 template <typename U> 8804 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) { 8805 return U(); 8806 } 8807 }; 8808 } // namespace 8809 8810 /// Does the analysis of the provided shuffle masks and performs the requested 8811 /// actions on the vectors with the given shuffle masks. It tries to do it in 8812 /// several steps. 8813 /// 1. If the Base vector is not undef vector, resizing the very first mask to 8814 /// have common VF and perform action for 2 input vectors (including non-undef 8815 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 8816 /// and processed as a shuffle of 2 elements. 8817 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 8818 /// action only for 1 vector with the given mask, if it is not the identity 8819 /// mask. 8820 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 8821 /// vectors, combing the masks properly between the steps. 8822 template <typename T> 8823 static T *performExtractsShuffleAction( 8824 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 8825 function_ref<unsigned(T *)> GetVF, 8826 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction, 8827 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 8828 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 8829 SmallVector<int> Mask(ShuffleMask.begin()->second); 8830 auto VMIt = std::next(ShuffleMask.begin()); 8831 T *Prev = nullptr; 8832 SmallBitVector UseMask = 8833 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask); 8834 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask); 8835 if (!IsBaseUndef.all()) { 8836 // Base is not undef, need to combine it with the next subvectors. 8837 std::pair<T *, bool> Res = 8838 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false); 8839 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask); 8840 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 8841 if (Mask[Idx] == PoisonMaskElem) 8842 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx; 8843 else 8844 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 8845 } 8846 auto *V = ValueSelect::get<T *>(Base); 8847 (void)V; 8848 assert((!V || GetVF(V) == Mask.size()) && 8849 "Expected base vector of VF number of elements."); 8850 Prev = Action(Mask, {nullptr, Res.first}); 8851 } else if (ShuffleMask.size() == 1) { 8852 // Base is undef and only 1 vector is shuffled - perform the action only for 8853 // single vector, if the mask is not the identity mask. 8854 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask, 8855 /*ForSingleMask=*/true); 8856 if (Res.second) 8857 // Identity mask is found. 8858 Prev = Res.first; 8859 else 8860 Prev = Action(Mask, {ShuffleMask.begin()->first}); 8861 } else { 8862 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 8863 // shuffles step by step, combining shuffle between the steps. 8864 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 8865 unsigned Vec2VF = GetVF(VMIt->first); 8866 if (Vec1VF == Vec2VF) { 8867 // No need to resize the input vectors since they are of the same size, we 8868 // can shuffle them directly. 8869 ArrayRef<int> SecMask = VMIt->second; 8870 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8871 if (SecMask[I] != PoisonMaskElem) { 8872 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8873 Mask[I] = SecMask[I] + Vec1VF; 8874 } 8875 } 8876 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 8877 } else { 8878 // Vectors of different sizes - resize and reshuffle. 8879 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask, 8880 /*ForSingleMask=*/false); 8881 std::pair<T *, bool> Res2 = 8882 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8883 ArrayRef<int> SecMask = VMIt->second; 8884 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8885 if (Mask[I] != PoisonMaskElem) { 8886 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8887 if (Res1.second) 8888 Mask[I] = I; 8889 } else if (SecMask[I] != PoisonMaskElem) { 8890 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8891 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 8892 } 8893 } 8894 Prev = Action(Mask, {Res1.first, Res2.first}); 8895 } 8896 VMIt = std::next(VMIt); 8897 } 8898 bool IsBaseNotUndef = !IsBaseUndef.all(); 8899 (void)IsBaseNotUndef; 8900 // Perform requested actions for the remaining masks/vectors. 8901 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 8902 // Shuffle other input vectors, if any. 8903 std::pair<T *, bool> Res = 8904 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8905 ArrayRef<int> SecMask = VMIt->second; 8906 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8907 if (SecMask[I] != PoisonMaskElem) { 8908 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) && 8909 "Multiple uses of scalars."); 8910 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 8911 } else if (Mask[I] != PoisonMaskElem) { 8912 Mask[I] = I; 8913 } 8914 } 8915 Prev = Action(Mask, {Prev, Res.first}); 8916 } 8917 return Prev; 8918 } 8919 8920 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 8921 InstructionCost Cost = 0; 8922 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 8923 << VectorizableTree.size() << ".\n"); 8924 8925 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 8926 8927 SmallPtrSet<Value *, 4> CheckedExtracts; 8928 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 8929 TreeEntry &TE = *VectorizableTree[I]; 8930 if (TE.State == TreeEntry::NeedToGather) { 8931 if (const TreeEntry *E = getTreeEntry(TE.getMainOp()); 8932 E && E->getVectorFactor() == TE.getVectorFactor() && 8933 E->isSame(TE.Scalars)) { 8934 // Some gather nodes might be absolutely the same as some vectorizable 8935 // nodes after reordering, need to handle it. 8936 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle " 8937 << shortBundleName(TE.Scalars) << ".\n" 8938 << "SLP: Current total cost = " << Cost << "\n"); 8939 continue; 8940 } 8941 } 8942 8943 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts); 8944 Cost += C; 8945 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle " 8946 << shortBundleName(TE.Scalars) << ".\n" 8947 << "SLP: Current total cost = " << Cost << "\n"); 8948 } 8949 8950 SmallPtrSet<Value *, 16> ExtractCostCalculated; 8951 InstructionCost ExtractCost = 0; 8952 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 8953 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 8954 SmallVector<APInt> DemandedElts; 8955 SmallDenseSet<Value *, 4> UsedInserts; 8956 DenseSet<Value *> VectorCasts; 8957 for (ExternalUser &EU : ExternalUses) { 8958 // We only add extract cost once for the same scalar. 8959 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 8960 !ExtractCostCalculated.insert(EU.Scalar).second) 8961 continue; 8962 8963 // Uses by ephemeral values are free (because the ephemeral value will be 8964 // removed prior to code generation, and so the extraction will be 8965 // removed as well). 8966 if (EphValues.count(EU.User)) 8967 continue; 8968 8969 // No extract cost for vector "scalar" 8970 if (isa<FixedVectorType>(EU.Scalar->getType())) 8971 continue; 8972 8973 // If found user is an insertelement, do not calculate extract cost but try 8974 // to detect it as a final shuffled/identity match. 8975 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 8976 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 8977 if (!UsedInserts.insert(VU).second) 8978 continue; 8979 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 8980 if (InsertIdx) { 8981 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 8982 auto *It = find_if( 8983 FirstUsers, 8984 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) { 8985 return areTwoInsertFromSameBuildVector( 8986 VU, cast<InsertElementInst>(Pair.first), 8987 [this](InsertElementInst *II) -> Value * { 8988 Value *Op0 = II->getOperand(0); 8989 if (getTreeEntry(II) && !getTreeEntry(Op0)) 8990 return nullptr; 8991 return Op0; 8992 }); 8993 }); 8994 int VecId = -1; 8995 if (It == FirstUsers.end()) { 8996 (void)ShuffleMasks.emplace_back(); 8997 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 8998 if (Mask.empty()) 8999 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9000 // Find the insertvector, vectorized in tree, if any. 9001 Value *Base = VU; 9002 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 9003 if (IEBase != EU.User && 9004 (!IEBase->hasOneUse() || 9005 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx)) 9006 break; 9007 // Build the mask for the vectorized insertelement instructions. 9008 if (const TreeEntry *E = getTreeEntry(IEBase)) { 9009 VU = IEBase; 9010 do { 9011 IEBase = cast<InsertElementInst>(Base); 9012 int Idx = *getInsertIndex(IEBase); 9013 assert(Mask[Idx] == PoisonMaskElem && 9014 "InsertElementInstruction used already."); 9015 Mask[Idx] = Idx; 9016 Base = IEBase->getOperand(0); 9017 } while (E == getTreeEntry(Base)); 9018 break; 9019 } 9020 Base = cast<InsertElementInst>(Base)->getOperand(0); 9021 } 9022 FirstUsers.emplace_back(VU, ScalarTE); 9023 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 9024 VecId = FirstUsers.size() - 1; 9025 auto It = MinBWs.find(ScalarTE); 9026 if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) { 9027 unsigned BWSz = It->second.second; 9028 unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType()); 9029 unsigned VecOpcode; 9030 if (BWSz < SrcBWSz) 9031 VecOpcode = Instruction::Trunc; 9032 else 9033 VecOpcode = 9034 It->second.second ? Instruction::SExt : Instruction::ZExt; 9035 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9036 InstructionCost C = TTI->getCastInstrCost( 9037 VecOpcode, FTy, 9038 FixedVectorType::get( 9039 IntegerType::get(FTy->getContext(), It->second.first), 9040 FTy->getNumElements()), 9041 TTI::CastContextHint::None, CostKind); 9042 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9043 << " for extending externally used vector with " 9044 "non-equal minimum bitwidth.\n"); 9045 Cost += C; 9046 } 9047 } else { 9048 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 9049 It->first = VU; 9050 VecId = std::distance(FirstUsers.begin(), It); 9051 } 9052 int InIdx = *InsertIdx; 9053 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 9054 if (Mask.empty()) 9055 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9056 Mask[InIdx] = EU.Lane; 9057 DemandedElts[VecId].setBit(InIdx); 9058 continue; 9059 } 9060 } 9061 } 9062 9063 // If we plan to rewrite the tree in a smaller type, we will need to sign 9064 // extend the extracted value back to the original type. Here, we account 9065 // for the extract and the added cost of the sign extend if needed. 9066 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 9067 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9068 auto It = MinBWs.find(getTreeEntry(EU.Scalar)); 9069 if (It != MinBWs.end()) { 9070 auto *MinTy = IntegerType::get(F->getContext(), It->second.first); 9071 unsigned Extend = 9072 It->second.second ? Instruction::SExt : Instruction::ZExt; 9073 VecTy = FixedVectorType::get(MinTy, BundleWidth); 9074 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 9075 VecTy, EU.Lane); 9076 } else { 9077 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 9078 CostKind, EU.Lane); 9079 } 9080 } 9081 // Add reduced value cost, if resized. 9082 if (!VectorizedVals.empty()) { 9083 auto BWIt = MinBWs.find(VectorizableTree.front().get()); 9084 if (BWIt != MinBWs.end()) { 9085 Type *DstTy = VectorizableTree.front()->Scalars.front()->getType(); 9086 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy); 9087 unsigned Opcode = Instruction::Trunc; 9088 if (OriginalSz < BWIt->second.first) 9089 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt; 9090 Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first); 9091 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy, 9092 TTI::CastContextHint::None, 9093 TTI::TCK_RecipThroughput); 9094 } 9095 } 9096 9097 InstructionCost SpillCost = getSpillCost(); 9098 Cost += SpillCost + ExtractCost; 9099 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask, 9100 bool) { 9101 InstructionCost C = 0; 9102 unsigned VF = Mask.size(); 9103 unsigned VecVF = TE->getVectorFactor(); 9104 if (VF != VecVF && 9105 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 9106 !ShuffleVectorInst::isIdentityMask(Mask, VF))) { 9107 SmallVector<int> OrigMask(VecVF, PoisonMaskElem); 9108 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 9109 OrigMask.begin()); 9110 C = TTI->getShuffleCost( 9111 TTI::SK_PermuteSingleSrc, 9112 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 9113 LLVM_DEBUG( 9114 dbgs() << "SLP: Adding cost " << C 9115 << " for final shuffle of insertelement external users.\n"; 9116 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9117 Cost += C; 9118 return std::make_pair(TE, true); 9119 } 9120 return std::make_pair(TE, false); 9121 }; 9122 // Calculate the cost of the reshuffled vectors, if any. 9123 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 9124 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 9125 auto Vector = ShuffleMasks[I].takeVector(); 9126 unsigned VF = 0; 9127 auto EstimateShufflesCost = [&](ArrayRef<int> Mask, 9128 ArrayRef<const TreeEntry *> TEs) { 9129 assert((TEs.size() == 1 || TEs.size() == 2) && 9130 "Expected exactly 1 or 2 tree entries."); 9131 if (TEs.size() == 1) { 9132 if (VF == 0) 9133 VF = TEs.front()->getVectorFactor(); 9134 auto *FTy = 9135 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9136 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) && 9137 !all_of(enumerate(Mask), [=](const auto &Data) { 9138 return Data.value() == PoisonMaskElem || 9139 (Data.index() < VF && 9140 static_cast<int>(Data.index()) == Data.value()); 9141 })) { 9142 InstructionCost C = 9143 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 9144 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9145 << " for final shuffle of insertelement " 9146 "external users.\n"; 9147 TEs.front()->dump(); 9148 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9149 Cost += C; 9150 } 9151 } else { 9152 if (VF == 0) { 9153 if (TEs.front() && 9154 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor()) 9155 VF = TEs.front()->getVectorFactor(); 9156 else 9157 VF = Mask.size(); 9158 } 9159 auto *FTy = 9160 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9161 InstructionCost C = 9162 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, FTy, Mask); 9163 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9164 << " for final shuffle of vector node and external " 9165 "insertelement users.\n"; 9166 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 9167 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9168 Cost += C; 9169 } 9170 VF = Mask.size(); 9171 return TEs.back(); 9172 }; 9173 (void)performExtractsShuffleAction<const TreeEntry>( 9174 MutableArrayRef(Vector.data(), Vector.size()), Base, 9175 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 9176 EstimateShufflesCost); 9177 InstructionCost InsertCost = TTI->getScalarizationOverhead( 9178 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 9179 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput); 9180 Cost -= InsertCost; 9181 } 9182 9183 #ifndef NDEBUG 9184 SmallString<256> Str; 9185 { 9186 raw_svector_ostream OS(Str); 9187 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 9188 << "SLP: Extract Cost = " << ExtractCost << ".\n" 9189 << "SLP: Total Cost = " << Cost << ".\n"; 9190 } 9191 LLVM_DEBUG(dbgs() << Str); 9192 if (ViewSLPTree) 9193 ViewGraph(this, "SLP" + F->getName(), false, Str); 9194 #endif 9195 9196 return Cost; 9197 } 9198 9199 /// Tries to find extractelement instructions with constant indices from fixed 9200 /// vector type and gather such instructions into a bunch, which highly likely 9201 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9202 /// successful, the matched scalars are replaced by poison values in \p VL for 9203 /// future analysis. 9204 std::optional<TTI::ShuffleKind> 9205 BoUpSLP::tryToGatherSingleRegisterExtractElements( 9206 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const { 9207 // Scan list of gathered scalars for extractelements that can be represented 9208 // as shuffles. 9209 MapVector<Value *, SmallVector<int>> VectorOpToIdx; 9210 SmallVector<int> UndefVectorExtracts; 9211 for (int I = 0, E = VL.size(); I < E; ++I) { 9212 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9213 if (!EI) { 9214 if (isa<UndefValue>(VL[I])) 9215 UndefVectorExtracts.push_back(I); 9216 continue; 9217 } 9218 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType()); 9219 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand())) 9220 continue; 9221 std::optional<unsigned> Idx = getExtractIndex(EI); 9222 // Undefined index. 9223 if (!Idx) { 9224 UndefVectorExtracts.push_back(I); 9225 continue; 9226 } 9227 SmallBitVector ExtractMask(VecTy->getNumElements(), true); 9228 ExtractMask.reset(*Idx); 9229 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) { 9230 UndefVectorExtracts.push_back(I); 9231 continue; 9232 } 9233 VectorOpToIdx[EI->getVectorOperand()].push_back(I); 9234 } 9235 // Sort the vector operands by the maximum number of uses in extractelements. 9236 MapVector<unsigned, SmallVector<Value *>> VFToVector; 9237 for (const auto &Data : VectorOpToIdx) 9238 VFToVector[cast<FixedVectorType>(Data.first->getType())->getNumElements()] 9239 .push_back(Data.first); 9240 for (auto &Data : VFToVector) { 9241 stable_sort(Data.second, [&VectorOpToIdx](Value *V1, Value *V2) { 9242 return VectorOpToIdx.find(V1)->second.size() > 9243 VectorOpToIdx.find(V2)->second.size(); 9244 }); 9245 } 9246 // Find the best pair of the vectors with the same number of elements or a 9247 // single vector. 9248 const int UndefSz = UndefVectorExtracts.size(); 9249 unsigned SingleMax = 0; 9250 Value *SingleVec = nullptr; 9251 unsigned PairMax = 0; 9252 std::pair<Value *, Value *> PairVec(nullptr, nullptr); 9253 for (auto &Data : VFToVector) { 9254 Value *V1 = Data.second.front(); 9255 if (SingleMax < VectorOpToIdx[V1].size() + UndefSz) { 9256 SingleMax = VectorOpToIdx[V1].size() + UndefSz; 9257 SingleVec = V1; 9258 } 9259 Value *V2 = nullptr; 9260 if (Data.second.size() > 1) 9261 V2 = *std::next(Data.second.begin()); 9262 if (V2 && PairMax < VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + 9263 UndefSz) { 9264 PairMax = VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + UndefSz; 9265 PairVec = std::make_pair(V1, V2); 9266 } 9267 } 9268 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0) 9269 return std::nullopt; 9270 // Check if better to perform a shuffle of 2 vectors or just of a single 9271 // vector. 9272 SmallVector<Value *> SavedVL(VL.begin(), VL.end()); 9273 SmallVector<Value *> GatheredExtracts( 9274 VL.size(), PoisonValue::get(VL.front()->getType())); 9275 if (SingleMax >= PairMax && SingleMax) { 9276 for (int Idx : VectorOpToIdx[SingleVec]) 9277 std::swap(GatheredExtracts[Idx], VL[Idx]); 9278 } else { 9279 for (Value *V : {PairVec.first, PairVec.second}) 9280 for (int Idx : VectorOpToIdx[V]) 9281 std::swap(GatheredExtracts[Idx], VL[Idx]); 9282 } 9283 // Add extracts from undefs too. 9284 for (int Idx : UndefVectorExtracts) 9285 std::swap(GatheredExtracts[Idx], VL[Idx]); 9286 // Check that gather of extractelements can be represented as just a 9287 // shuffle of a single/two vectors the scalars are extracted from. 9288 std::optional<TTI::ShuffleKind> Res = 9289 isFixedVectorShuffle(GatheredExtracts, Mask); 9290 if (!Res) { 9291 // TODO: try to check other subsets if possible. 9292 // Restore the original VL if attempt was not successful. 9293 copy(SavedVL, VL.begin()); 9294 return std::nullopt; 9295 } 9296 // Restore unused scalars from mask, if some of the extractelements were not 9297 // selected for shuffle. 9298 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) { 9299 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) && 9300 isa<UndefValue>(GatheredExtracts[I])) { 9301 std::swap(VL[I], GatheredExtracts[I]); 9302 continue; 9303 } 9304 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9305 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) || 9306 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) || 9307 is_contained(UndefVectorExtracts, I)) 9308 continue; 9309 } 9310 return Res; 9311 } 9312 9313 /// Tries to find extractelement instructions with constant indices from fixed 9314 /// vector type and gather such instructions into a bunch, which highly likely 9315 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9316 /// successful, the matched scalars are replaced by poison values in \p VL for 9317 /// future analysis. 9318 SmallVector<std::optional<TTI::ShuffleKind>> 9319 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 9320 SmallVectorImpl<int> &Mask, 9321 unsigned NumParts) const { 9322 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1."); 9323 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts); 9324 Mask.assign(VL.size(), PoisonMaskElem); 9325 unsigned SliceSize = VL.size() / NumParts; 9326 for (unsigned Part = 0; Part < NumParts; ++Part) { 9327 // Scan list of gathered scalars for extractelements that can be represented 9328 // as shuffles. 9329 MutableArrayRef<Value *> SubVL = 9330 MutableArrayRef(VL).slice(Part * SliceSize, SliceSize); 9331 SmallVector<int> SubMask; 9332 std::optional<TTI::ShuffleKind> Res = 9333 tryToGatherSingleRegisterExtractElements(SubVL, SubMask); 9334 ShufflesRes[Part] = Res; 9335 copy(SubMask, std::next(Mask.begin(), Part * SliceSize)); 9336 } 9337 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) { 9338 return Res.has_value(); 9339 })) 9340 ShufflesRes.clear(); 9341 return ShufflesRes; 9342 } 9343 9344 std::optional<TargetTransformInfo::ShuffleKind> 9345 BoUpSLP::isGatherShuffledSingleRegisterEntry( 9346 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 9347 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) { 9348 Entries.clear(); 9349 // TODO: currently checking only for Scalars in the tree entry, need to count 9350 // reused elements too for better cost estimation. 9351 const EdgeInfo &TEUseEI = TE->UserTreeIndices.front(); 9352 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE); 9353 const BasicBlock *TEInsertBlock = nullptr; 9354 // Main node of PHI entries keeps the correct order of operands/incoming 9355 // blocks. 9356 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) { 9357 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx); 9358 TEInsertPt = TEInsertBlock->getTerminator(); 9359 } else { 9360 TEInsertBlock = TEInsertPt->getParent(); 9361 } 9362 auto *NodeUI = DT->getNode(TEInsertBlock); 9363 assert(NodeUI && "Should only process reachable instructions"); 9364 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end()); 9365 auto CheckOrdering = [&](const Instruction *InsertPt) { 9366 // Argument InsertPt is an instruction where vector code for some other 9367 // tree entry (one that shares one or more scalars with TE) is going to be 9368 // generated. This lambda returns true if insertion point of vector code 9369 // for the TE dominates that point (otherwise dependency is the other way 9370 // around). The other node is not limited to be of a gather kind. Gather 9371 // nodes are not scheduled and their vector code is inserted before their 9372 // first user. If user is PHI, that is supposed to be at the end of a 9373 // predecessor block. Otherwise it is the last instruction among scalars of 9374 // the user node. So, instead of checking dependency between instructions 9375 // themselves, we check dependency between their insertion points for vector 9376 // code (since each scalar instruction ends up as a lane of a vector 9377 // instruction). 9378 const BasicBlock *InsertBlock = InsertPt->getParent(); 9379 auto *NodeEUI = DT->getNode(InsertBlock); 9380 if (!NodeEUI) 9381 return false; 9382 assert((NodeUI == NodeEUI) == 9383 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && 9384 "Different nodes should have different DFS numbers"); 9385 // Check the order of the gather nodes users. 9386 if (TEInsertPt->getParent() != InsertBlock && 9387 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI))) 9388 return false; 9389 if (TEInsertPt->getParent() == InsertBlock && 9390 TEInsertPt->comesBefore(InsertPt)) 9391 return false; 9392 return true; 9393 }; 9394 // Find all tree entries used by the gathered values. If no common entries 9395 // found - not a shuffle. 9396 // Here we build a set of tree nodes for each gathered value and trying to 9397 // find the intersection between these sets. If we have at least one common 9398 // tree node for each gathered value - we have just a permutation of the 9399 // single vector. If we have 2 different sets, we're in situation where we 9400 // have a permutation of 2 input vectors. 9401 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 9402 DenseMap<Value *, int> UsedValuesEntry; 9403 for (Value *V : VL) { 9404 if (isConstant(V)) 9405 continue; 9406 // Build a list of tree entries where V is used. 9407 SmallPtrSet<const TreeEntry *, 4> VToTEs; 9408 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) { 9409 if (TEPtr == TE) 9410 continue; 9411 assert(any_of(TEPtr->Scalars, 9412 [&](Value *V) { return GatheredScalars.contains(V); }) && 9413 "Must contain at least single gathered value."); 9414 assert(TEPtr->UserTreeIndices.size() == 1 && 9415 "Expected only single user of a gather node."); 9416 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front(); 9417 9418 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp()); 9419 const Instruction *InsertPt = 9420 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator() 9421 : &getLastInstructionInBundle(UseEI.UserTE); 9422 if (TEInsertPt == InsertPt) { 9423 // If 2 gathers are operands of the same entry (regardless of whether 9424 // user is PHI or else), compare operands indices, use the earlier one 9425 // as the base. 9426 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx) 9427 continue; 9428 // If the user instruction is used for some reason in different 9429 // vectorized nodes - make it depend on index. 9430 if (TEUseEI.UserTE != UseEI.UserTE && 9431 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx) 9432 continue; 9433 } 9434 9435 // Check if the user node of the TE comes after user node of TEPtr, 9436 // otherwise TEPtr depends on TE. 9437 if ((TEInsertBlock != InsertPt->getParent() || 9438 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) && 9439 !CheckOrdering(InsertPt)) 9440 continue; 9441 VToTEs.insert(TEPtr); 9442 } 9443 if (const TreeEntry *VTE = getTreeEntry(V)) { 9444 Instruction &LastBundleInst = getLastInstructionInBundle(VTE); 9445 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst)) 9446 continue; 9447 auto It = MinBWs.find(VTE); 9448 // If vectorize node is demoted - do not match. 9449 if (It != MinBWs.end() && 9450 It->second.first != DL->getTypeSizeInBits(V->getType())) 9451 continue; 9452 VToTEs.insert(VTE); 9453 } 9454 if (VToTEs.empty()) 9455 continue; 9456 if (UsedTEs.empty()) { 9457 // The first iteration, just insert the list of nodes to vector. 9458 UsedTEs.push_back(VToTEs); 9459 UsedValuesEntry.try_emplace(V, 0); 9460 } else { 9461 // Need to check if there are any previously used tree nodes which use V. 9462 // If there are no such nodes, consider that we have another one input 9463 // vector. 9464 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 9465 unsigned Idx = 0; 9466 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 9467 // Do we have a non-empty intersection of previously listed tree entries 9468 // and tree entries using current V? 9469 set_intersect(VToTEs, Set); 9470 if (!VToTEs.empty()) { 9471 // Yes, write the new subset and continue analysis for the next 9472 // scalar. 9473 Set.swap(VToTEs); 9474 break; 9475 } 9476 VToTEs = SavedVToTEs; 9477 ++Idx; 9478 } 9479 // No non-empty intersection found - need to add a second set of possible 9480 // source vectors. 9481 if (Idx == UsedTEs.size()) { 9482 // If the number of input vectors is greater than 2 - not a permutation, 9483 // fallback to the regular gather. 9484 // TODO: support multiple reshuffled nodes. 9485 if (UsedTEs.size() == 2) 9486 continue; 9487 UsedTEs.push_back(SavedVToTEs); 9488 Idx = UsedTEs.size() - 1; 9489 } 9490 UsedValuesEntry.try_emplace(V, Idx); 9491 } 9492 } 9493 9494 if (UsedTEs.empty()) { 9495 Entries.clear(); 9496 return std::nullopt; 9497 } 9498 9499 unsigned VF = 0; 9500 if (UsedTEs.size() == 1) { 9501 // Keep the order to avoid non-determinism. 9502 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(), 9503 UsedTEs.front().end()); 9504 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9505 return TE1->Idx < TE2->Idx; 9506 }); 9507 // Try to find the perfect match in another gather node at first. 9508 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) { 9509 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars); 9510 }); 9511 if (It != FirstEntries.end() && 9512 ((*It)->getVectorFactor() == VL.size() || 9513 ((*It)->getVectorFactor() == TE->Scalars.size() && 9514 TE->ReuseShuffleIndices.size() == VL.size() && 9515 (*It)->isSame(TE->Scalars)))) { 9516 Entries.push_back(*It); 9517 if ((*It)->getVectorFactor() == VL.size()) { 9518 std::iota(std::next(Mask.begin(), Part * VL.size()), 9519 std::next(Mask.begin(), (Part + 1) * VL.size()), 0); 9520 } else { 9521 SmallVector<int> CommonMask = TE->getCommonMask(); 9522 copy(CommonMask, Mask.begin()); 9523 } 9524 // Clear undef scalars. 9525 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9526 if (isa<PoisonValue>(VL[I])) 9527 Mask[I] = PoisonMaskElem; 9528 return TargetTransformInfo::SK_PermuteSingleSrc; 9529 } 9530 // No perfect match, just shuffle, so choose the first tree node from the 9531 // tree. 9532 Entries.push_back(FirstEntries.front()); 9533 } else { 9534 // Try to find nodes with the same vector factor. 9535 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 9536 // Keep the order of tree nodes to avoid non-determinism. 9537 DenseMap<int, const TreeEntry *> VFToTE; 9538 for (const TreeEntry *TE : UsedTEs.front()) { 9539 unsigned VF = TE->getVectorFactor(); 9540 auto It = VFToTE.find(VF); 9541 if (It != VFToTE.end()) { 9542 if (It->second->Idx > TE->Idx) 9543 It->getSecond() = TE; 9544 continue; 9545 } 9546 VFToTE.try_emplace(VF, TE); 9547 } 9548 // Same, keep the order to avoid non-determinism. 9549 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(), 9550 UsedTEs.back().end()); 9551 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9552 return TE1->Idx < TE2->Idx; 9553 }); 9554 for (const TreeEntry *TE : SecondEntries) { 9555 auto It = VFToTE.find(TE->getVectorFactor()); 9556 if (It != VFToTE.end()) { 9557 VF = It->first; 9558 Entries.push_back(It->second); 9559 Entries.push_back(TE); 9560 break; 9561 } 9562 } 9563 // No 2 source vectors with the same vector factor - just choose 2 with max 9564 // index. 9565 if (Entries.empty()) { 9566 Entries.push_back( 9567 *std::max_element(UsedTEs.front().begin(), UsedTEs.front().end(), 9568 [](const TreeEntry *TE1, const TreeEntry *TE2) { 9569 return TE1->Idx < TE2->Idx; 9570 })); 9571 Entries.push_back(SecondEntries.front()); 9572 VF = std::max(Entries.front()->getVectorFactor(), 9573 Entries.back()->getVectorFactor()); 9574 } 9575 } 9576 9577 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof); 9578 // Checks if the 2 PHIs are compatible in terms of high possibility to be 9579 // vectorized. 9580 auto AreCompatiblePHIs = [&](Value *V, Value *V1) { 9581 auto *PHI = cast<PHINode>(V); 9582 auto *PHI1 = cast<PHINode>(V1); 9583 // Check that all incoming values are compatible/from same parent (if they 9584 // are instructions). 9585 // The incoming values are compatible if they all are constants, or 9586 // instruction with the same/alternate opcodes from the same basic block. 9587 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 9588 Value *In = PHI->getIncomingValue(I); 9589 Value *In1 = PHI1->getIncomingValue(I); 9590 if (isConstant(In) && isConstant(In1)) 9591 continue; 9592 if (!getSameOpcode({In, In1}, *TLI).getOpcode()) 9593 return false; 9594 if (cast<Instruction>(In)->getParent() != 9595 cast<Instruction>(In1)->getParent()) 9596 return false; 9597 } 9598 return true; 9599 }; 9600 // Check if the value can be ignored during analysis for shuffled gathers. 9601 // We suppose it is better to ignore instruction, which do not form splats, 9602 // are not vectorized/not extractelements (these instructions will be handled 9603 // by extractelements processing) or may form vector node in future. 9604 auto MightBeIgnored = [=](Value *V) { 9605 auto *I = dyn_cast<Instruction>(V); 9606 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) && 9607 !isVectorLikeInstWithConstOps(I) && 9608 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I); 9609 }; 9610 // Check that the neighbor instruction may form a full vector node with the 9611 // current instruction V. It is possible, if they have same/alternate opcode 9612 // and same parent basic block. 9613 auto NeighborMightBeIgnored = [&](Value *V, int Idx) { 9614 Value *V1 = VL[Idx]; 9615 bool UsedInSameVTE = false; 9616 auto It = UsedValuesEntry.find(V1); 9617 if (It != UsedValuesEntry.end()) 9618 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second; 9619 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE && 9620 getSameOpcode({V, V1}, *TLI).getOpcode() && 9621 cast<Instruction>(V)->getParent() == 9622 cast<Instruction>(V1)->getParent() && 9623 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1)); 9624 }; 9625 // Build a shuffle mask for better cost estimation and vector emission. 9626 SmallBitVector UsedIdxs(Entries.size()); 9627 SmallVector<std::pair<unsigned, int>> EntryLanes; 9628 for (int I = 0, E = VL.size(); I < E; ++I) { 9629 Value *V = VL[I]; 9630 auto It = UsedValuesEntry.find(V); 9631 if (It == UsedValuesEntry.end()) 9632 continue; 9633 // Do not try to shuffle scalars, if they are constants, or instructions 9634 // that can be vectorized as a result of the following vector build 9635 // vectorization. 9636 if (isConstant(V) || (MightBeIgnored(V) && 9637 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) || 9638 (I != E - 1 && NeighborMightBeIgnored(V, I + 1))))) 9639 continue; 9640 unsigned Idx = It->second; 9641 EntryLanes.emplace_back(Idx, I); 9642 UsedIdxs.set(Idx); 9643 } 9644 // Iterate through all shuffled scalars and select entries, which can be used 9645 // for final shuffle. 9646 SmallVector<const TreeEntry *> TempEntries; 9647 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) { 9648 if (!UsedIdxs.test(I)) 9649 continue; 9650 // Fix the entry number for the given scalar. If it is the first entry, set 9651 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes). 9652 // These indices are used when calculating final shuffle mask as the vector 9653 // offset. 9654 for (std::pair<unsigned, int> &Pair : EntryLanes) 9655 if (Pair.first == I) 9656 Pair.first = TempEntries.size(); 9657 TempEntries.push_back(Entries[I]); 9658 } 9659 Entries.swap(TempEntries); 9660 if (EntryLanes.size() == Entries.size() && 9661 !VL.equals(ArrayRef(TE->Scalars) 9662 .slice(Part * VL.size(), 9663 std::min<int>(VL.size(), TE->Scalars.size())))) { 9664 // We may have here 1 or 2 entries only. If the number of scalars is equal 9665 // to the number of entries, no need to do the analysis, it is not very 9666 // profitable. Since VL is not the same as TE->Scalars, it means we already 9667 // have some shuffles before. Cut off not profitable case. 9668 Entries.clear(); 9669 return std::nullopt; 9670 } 9671 // Build the final mask, check for the identity shuffle, if possible. 9672 bool IsIdentity = Entries.size() == 1; 9673 // Pair.first is the offset to the vector, while Pair.second is the index of 9674 // scalar in the list. 9675 for (const std::pair<unsigned, int> &Pair : EntryLanes) { 9676 unsigned Idx = Part * VL.size() + Pair.second; 9677 Mask[Idx] = Pair.first * VF + 9678 Entries[Pair.first]->findLaneForValue(VL[Pair.second]); 9679 IsIdentity &= Mask[Idx] == Pair.second; 9680 } 9681 switch (Entries.size()) { 9682 case 1: 9683 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2) 9684 return TargetTransformInfo::SK_PermuteSingleSrc; 9685 break; 9686 case 2: 9687 if (EntryLanes.size() > 2 || VL.size() <= 2) 9688 return TargetTransformInfo::SK_PermuteTwoSrc; 9689 break; 9690 default: 9691 break; 9692 } 9693 Entries.clear(); 9694 // Clear the corresponding mask elements. 9695 std::fill(std::next(Mask.begin(), Part * VL.size()), 9696 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem); 9697 return std::nullopt; 9698 } 9699 9700 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 9701 BoUpSLP::isGatherShuffledEntry( 9702 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 9703 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 9704 unsigned NumParts) { 9705 assert(NumParts > 0 && NumParts < VL.size() && 9706 "Expected positive number of registers."); 9707 Entries.clear(); 9708 // No need to check for the topmost gather node. 9709 if (TE == VectorizableTree.front().get()) 9710 return {}; 9711 Mask.assign(VL.size(), PoisonMaskElem); 9712 assert(TE->UserTreeIndices.size() == 1 && 9713 "Expected only single user of the gather node."); 9714 assert(VL.size() % NumParts == 0 && 9715 "Number of scalars must be divisible by NumParts."); 9716 unsigned SliceSize = VL.size() / NumParts; 9717 SmallVector<std::optional<TTI::ShuffleKind>> Res; 9718 for (unsigned Part = 0; Part < NumParts; ++Part) { 9719 ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize); 9720 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back(); 9721 std::optional<TTI::ShuffleKind> SubRes = 9722 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part); 9723 if (!SubRes) 9724 SubEntries.clear(); 9725 Res.push_back(SubRes); 9726 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc && 9727 SubEntries.front()->getVectorFactor() == VL.size() && 9728 (SubEntries.front()->isSame(TE->Scalars) || 9729 SubEntries.front()->isSame(VL))) { 9730 SmallVector<const TreeEntry *> LocalSubEntries; 9731 LocalSubEntries.swap(SubEntries); 9732 Entries.clear(); 9733 Res.clear(); 9734 std::iota(Mask.begin(), Mask.end(), 0); 9735 // Clear undef scalars. 9736 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9737 if (isa<PoisonValue>(VL[I])) 9738 Mask[I] = PoisonMaskElem; 9739 Entries.emplace_back(1, LocalSubEntries.front()); 9740 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc); 9741 return Res; 9742 } 9743 } 9744 if (all_of(Res, 9745 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) { 9746 Entries.clear(); 9747 return {}; 9748 } 9749 return Res; 9750 } 9751 9752 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, 9753 bool ForPoisonSrc) const { 9754 // Find the type of the operands in VL. 9755 Type *ScalarTy = VL[0]->getType(); 9756 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 9757 ScalarTy = SI->getValueOperand()->getType(); 9758 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 9759 bool DuplicateNonConst = false; 9760 // Find the cost of inserting/extracting values from the vector. 9761 // Check if the same elements are inserted several times and count them as 9762 // shuffle candidates. 9763 APInt ShuffledElements = APInt::getZero(VL.size()); 9764 DenseSet<Value *> UniqueElements; 9765 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9766 InstructionCost Cost; 9767 auto EstimateInsertCost = [&](unsigned I, Value *V) { 9768 if (!ForPoisonSrc) 9769 Cost += 9770 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 9771 I, Constant::getNullValue(VecTy), V); 9772 }; 9773 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 9774 Value *V = VL[I]; 9775 // No need to shuffle duplicates for constants. 9776 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) { 9777 ShuffledElements.setBit(I); 9778 continue; 9779 } 9780 if (!UniqueElements.insert(V).second) { 9781 DuplicateNonConst = true; 9782 ShuffledElements.setBit(I); 9783 continue; 9784 } 9785 EstimateInsertCost(I, V); 9786 } 9787 if (ForPoisonSrc) 9788 Cost = 9789 TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true, 9790 /*Extract*/ false, CostKind); 9791 if (DuplicateNonConst) 9792 Cost += 9793 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 9794 return Cost; 9795 } 9796 9797 // Perform operand reordering on the instructions in VL and return the reordered 9798 // operands in Left and Right. 9799 void BoUpSLP::reorderInputsAccordingToOpcode( 9800 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 9801 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 9802 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) { 9803 if (VL.empty()) 9804 return; 9805 VLOperands Ops(VL, TLI, DL, SE, R); 9806 // Reorder the operands in place. 9807 Ops.reorder(); 9808 Left = Ops.getVL(0); 9809 Right = Ops.getVL(1); 9810 } 9811 9812 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { 9813 auto &Res = EntryToLastInstruction.FindAndConstruct(E); 9814 if (Res.second) 9815 return *Res.second; 9816 // Get the basic block this bundle is in. All instructions in the bundle 9817 // should be in this block (except for extractelement-like instructions with 9818 // constant indeces). 9819 auto *Front = E->getMainOp(); 9820 auto *BB = Front->getParent(); 9821 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 9822 if (E->getOpcode() == Instruction::GetElementPtr && 9823 !isa<GetElementPtrInst>(V)) 9824 return true; 9825 auto *I = cast<Instruction>(V); 9826 return !E->isOpcodeOrAlt(I) || I->getParent() == BB || 9827 isVectorLikeInstWithConstOps(I); 9828 })); 9829 9830 auto FindLastInst = [&]() { 9831 Instruction *LastInst = Front; 9832 for (Value *V : E->Scalars) { 9833 auto *I = dyn_cast<Instruction>(V); 9834 if (!I) 9835 continue; 9836 if (LastInst->getParent() == I->getParent()) { 9837 if (LastInst->comesBefore(I)) 9838 LastInst = I; 9839 continue; 9840 } 9841 assert(((E->getOpcode() == Instruction::GetElementPtr && 9842 !isa<GetElementPtrInst>(I)) || 9843 (isVectorLikeInstWithConstOps(LastInst) && 9844 isVectorLikeInstWithConstOps(I))) && 9845 "Expected vector-like or non-GEP in GEP node insts only."); 9846 if (!DT->isReachableFromEntry(LastInst->getParent())) { 9847 LastInst = I; 9848 continue; 9849 } 9850 if (!DT->isReachableFromEntry(I->getParent())) 9851 continue; 9852 auto *NodeA = DT->getNode(LastInst->getParent()); 9853 auto *NodeB = DT->getNode(I->getParent()); 9854 assert(NodeA && "Should only process reachable instructions"); 9855 assert(NodeB && "Should only process reachable instructions"); 9856 assert((NodeA == NodeB) == 9857 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9858 "Different nodes should have different DFS numbers"); 9859 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) 9860 LastInst = I; 9861 } 9862 BB = LastInst->getParent(); 9863 return LastInst; 9864 }; 9865 9866 auto FindFirstInst = [&]() { 9867 Instruction *FirstInst = Front; 9868 for (Value *V : E->Scalars) { 9869 auto *I = dyn_cast<Instruction>(V); 9870 if (!I) 9871 continue; 9872 if (FirstInst->getParent() == I->getParent()) { 9873 if (I->comesBefore(FirstInst)) 9874 FirstInst = I; 9875 continue; 9876 } 9877 assert(((E->getOpcode() == Instruction::GetElementPtr && 9878 !isa<GetElementPtrInst>(I)) || 9879 (isVectorLikeInstWithConstOps(FirstInst) && 9880 isVectorLikeInstWithConstOps(I))) && 9881 "Expected vector-like or non-GEP in GEP node insts only."); 9882 if (!DT->isReachableFromEntry(FirstInst->getParent())) { 9883 FirstInst = I; 9884 continue; 9885 } 9886 if (!DT->isReachableFromEntry(I->getParent())) 9887 continue; 9888 auto *NodeA = DT->getNode(FirstInst->getParent()); 9889 auto *NodeB = DT->getNode(I->getParent()); 9890 assert(NodeA && "Should only process reachable instructions"); 9891 assert(NodeB && "Should only process reachable instructions"); 9892 assert((NodeA == NodeB) == 9893 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9894 "Different nodes should have different DFS numbers"); 9895 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn()) 9896 FirstInst = I; 9897 } 9898 return FirstInst; 9899 }; 9900 9901 // Set the insert point to the beginning of the basic block if the entry 9902 // should not be scheduled. 9903 if (doesNotNeedToSchedule(E->Scalars) || 9904 (E->State != TreeEntry::NeedToGather && 9905 all_of(E->Scalars, isVectorLikeInstWithConstOps))) { 9906 if ((E->getOpcode() == Instruction::GetElementPtr && 9907 any_of(E->Scalars, 9908 [](Value *V) { 9909 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); 9910 })) || 9911 all_of(E->Scalars, [](Value *V) { 9912 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V); 9913 })) 9914 Res.second = FindLastInst(); 9915 else 9916 Res.second = FindFirstInst(); 9917 return *Res.second; 9918 } 9919 9920 // Find the last instruction. The common case should be that BB has been 9921 // scheduled, and the last instruction is VL.back(). So we start with 9922 // VL.back() and iterate over schedule data until we reach the end of the 9923 // bundle. The end of the bundle is marked by null ScheduleData. 9924 if (BlocksSchedules.count(BB)) { 9925 Value *V = E->isOneOf(E->Scalars.back()); 9926 if (doesNotNeedToBeScheduled(V)) 9927 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 9928 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 9929 if (Bundle && Bundle->isPartOfBundle()) 9930 for (; Bundle; Bundle = Bundle->NextInBundle) 9931 if (Bundle->OpValue == Bundle->Inst) 9932 Res.second = Bundle->Inst; 9933 } 9934 9935 // LastInst can still be null at this point if there's either not an entry 9936 // for BB in BlocksSchedules or there's no ScheduleData available for 9937 // VL.back(). This can be the case if buildTree_rec aborts for various 9938 // reasons (e.g., the maximum recursion depth is reached, the maximum region 9939 // size is reached, etc.). ScheduleData is initialized in the scheduling 9940 // "dry-run". 9941 // 9942 // If this happens, we can still find the last instruction by brute force. We 9943 // iterate forwards from Front (inclusive) until we either see all 9944 // instructions in the bundle or reach the end of the block. If Front is the 9945 // last instruction in program order, LastInst will be set to Front, and we 9946 // will visit all the remaining instructions in the block. 9947 // 9948 // One of the reasons we exit early from buildTree_rec is to place an upper 9949 // bound on compile-time. Thus, taking an additional compile-time hit here is 9950 // not ideal. However, this should be exceedingly rare since it requires that 9951 // we both exit early from buildTree_rec and that the bundle be out-of-order 9952 // (causing us to iterate all the way to the end of the block). 9953 if (!Res.second) 9954 Res.second = FindLastInst(); 9955 assert(Res.second && "Failed to find last instruction in bundle"); 9956 return *Res.second; 9957 } 9958 9959 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 9960 auto *Front = E->getMainOp(); 9961 Instruction *LastInst = &getLastInstructionInBundle(E); 9962 assert(LastInst && "Failed to find last instruction in bundle"); 9963 BasicBlock::iterator LastInstIt = LastInst->getIterator(); 9964 // If the instruction is PHI, set the insert point after all the PHIs. 9965 bool IsPHI = isa<PHINode>(LastInst); 9966 if (IsPHI) 9967 LastInstIt = LastInst->getParent()->getFirstNonPHIIt(); 9968 if (IsPHI || (E->State != TreeEntry::NeedToGather && 9969 doesNotNeedToSchedule(E->Scalars))) { 9970 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt); 9971 } else { 9972 // Set the insertion point after the last instruction in the bundle. Set the 9973 // debug location to Front. 9974 Builder.SetInsertPoint( 9975 LastInst->getParent(), 9976 LastInst->getNextNonDebugInstruction()->getIterator()); 9977 } 9978 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 9979 } 9980 9981 Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root) { 9982 // List of instructions/lanes from current block and/or the blocks which are 9983 // part of the current loop. These instructions will be inserted at the end to 9984 // make it possible to optimize loops and hoist invariant instructions out of 9985 // the loops body with better chances for success. 9986 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 9987 SmallSet<int, 4> PostponedIndices; 9988 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 9989 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 9990 SmallPtrSet<BasicBlock *, 4> Visited; 9991 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 9992 InsertBB = InsertBB->getSinglePredecessor(); 9993 return InsertBB && InsertBB == InstBB; 9994 }; 9995 for (int I = 0, E = VL.size(); I < E; ++I) { 9996 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 9997 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 9998 getTreeEntry(Inst) || 9999 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) && 10000 PostponedIndices.insert(I).second) 10001 PostponedInsts.emplace_back(Inst, I); 10002 } 10003 10004 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 10005 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 10006 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 10007 if (!InsElt) 10008 return Vec; 10009 GatherShuffleExtractSeq.insert(InsElt); 10010 CSEBlocks.insert(InsElt->getParent()); 10011 // Add to our 'need-to-extract' list. 10012 if (isa<Instruction>(V)) { 10013 if (TreeEntry *Entry = getTreeEntry(V)) { 10014 // Find which lane we need to extract. 10015 unsigned FoundLane = Entry->findLaneForValue(V); 10016 ExternalUses.emplace_back(V, InsElt, FoundLane); 10017 } 10018 } 10019 return Vec; 10020 }; 10021 Value *Val0 = 10022 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 10023 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 10024 Value *Vec = Root ? Root : PoisonValue::get(VecTy); 10025 SmallVector<int> NonConsts; 10026 // Insert constant values at first. 10027 for (int I = 0, E = VL.size(); I < E; ++I) { 10028 if (PostponedIndices.contains(I)) 10029 continue; 10030 if (!isConstant(VL[I])) { 10031 NonConsts.push_back(I); 10032 continue; 10033 } 10034 if (Root) { 10035 if (!isa<UndefValue>(VL[I])) { 10036 NonConsts.push_back(I); 10037 continue; 10038 } 10039 if (isa<PoisonValue>(VL[I])) 10040 continue; 10041 if (auto *SV = dyn_cast<ShuffleVectorInst>(Root)) { 10042 if (SV->getMaskValue(I) == PoisonMaskElem) 10043 continue; 10044 } 10045 } 10046 Vec = CreateInsertElement(Vec, VL[I], I); 10047 } 10048 // Insert non-constant values. 10049 for (int I : NonConsts) 10050 Vec = CreateInsertElement(Vec, VL[I], I); 10051 // Append instructions, which are/may be part of the loop, in the end to make 10052 // it possible to hoist non-loop-based instructions. 10053 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 10054 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 10055 10056 return Vec; 10057 } 10058 10059 /// Merges shuffle masks and emits final shuffle instruction, if required. It 10060 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 10061 /// when the actual shuffle instruction is generated only if this is actually 10062 /// required. Otherwise, the shuffle instruction emission is delayed till the 10063 /// end of the process, to reduce the number of emitted instructions and further 10064 /// analysis/transformations. 10065 /// The class also will look through the previously emitted shuffle instructions 10066 /// and properly mark indices in mask as undef. 10067 /// For example, given the code 10068 /// \code 10069 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 10070 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 10071 /// \endcode 10072 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 10073 /// look through %s1 and %s2 and emit 10074 /// \code 10075 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10076 /// \endcode 10077 /// instead. 10078 /// If 2 operands are of different size, the smallest one will be resized and 10079 /// the mask recalculated properly. 10080 /// For example, given the code 10081 /// \code 10082 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 10083 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 10084 /// \endcode 10085 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 10086 /// look through %s1 and %s2 and emit 10087 /// \code 10088 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10089 /// \endcode 10090 /// instead. 10091 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis { 10092 bool IsFinalized = false; 10093 /// Combined mask for all applied operands and masks. It is built during 10094 /// analysis and actual emission of shuffle vector instructions. 10095 SmallVector<int> CommonMask; 10096 /// List of operands for the shuffle vector instruction. It hold at max 2 10097 /// operands, if the 3rd is going to be added, the first 2 are combined into 10098 /// shuffle with \p CommonMask mask, the first operand sets to be the 10099 /// resulting shuffle and the second operand sets to be the newly added 10100 /// operand. The \p CommonMask is transformed in the proper way after that. 10101 SmallVector<Value *, 2> InVectors; 10102 IRBuilderBase &Builder; 10103 BoUpSLP &R; 10104 10105 class ShuffleIRBuilder { 10106 IRBuilderBase &Builder; 10107 /// Holds all of the instructions that we gathered. 10108 SetVector<Instruction *> &GatherShuffleExtractSeq; 10109 /// A list of blocks that we are going to CSE. 10110 DenseSet<BasicBlock *> &CSEBlocks; 10111 10112 public: 10113 ShuffleIRBuilder(IRBuilderBase &Builder, 10114 SetVector<Instruction *> &GatherShuffleExtractSeq, 10115 DenseSet<BasicBlock *> &CSEBlocks) 10116 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq), 10117 CSEBlocks(CSEBlocks) {} 10118 ~ShuffleIRBuilder() = default; 10119 /// Creates shufflevector for the 2 operands with the given mask. 10120 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) { 10121 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask); 10122 if (auto *I = dyn_cast<Instruction>(Vec)) { 10123 GatherShuffleExtractSeq.insert(I); 10124 CSEBlocks.insert(I->getParent()); 10125 } 10126 return Vec; 10127 } 10128 /// Creates permutation of the single vector operand with the given mask, if 10129 /// it is not identity mask. 10130 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) { 10131 if (Mask.empty()) 10132 return V1; 10133 unsigned VF = Mask.size(); 10134 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10135 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF)) 10136 return V1; 10137 Value *Vec = Builder.CreateShuffleVector(V1, Mask); 10138 if (auto *I = dyn_cast<Instruction>(Vec)) { 10139 GatherShuffleExtractSeq.insert(I); 10140 CSEBlocks.insert(I->getParent()); 10141 } 10142 return Vec; 10143 } 10144 Value *createIdentity(Value *V) { return V; } 10145 Value *createPoison(Type *Ty, unsigned VF) { 10146 return PoisonValue::get(FixedVectorType::get(Ty, VF)); 10147 } 10148 /// Resizes 2 input vector to match the sizes, if the they are not equal 10149 /// yet. The smallest vector is resized to the size of the larger vector. 10150 void resizeToMatch(Value *&V1, Value *&V2) { 10151 if (V1->getType() == V2->getType()) 10152 return; 10153 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10154 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 10155 int VF = std::max(V1VF, V2VF); 10156 int MinVF = std::min(V1VF, V2VF); 10157 SmallVector<int> IdentityMask(VF, PoisonMaskElem); 10158 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF), 10159 0); 10160 Value *&Op = MinVF == V1VF ? V1 : V2; 10161 Op = Builder.CreateShuffleVector(Op, IdentityMask); 10162 if (auto *I = dyn_cast<Instruction>(Op)) { 10163 GatherShuffleExtractSeq.insert(I); 10164 CSEBlocks.insert(I->getParent()); 10165 } 10166 if (MinVF == V1VF) 10167 V1 = Op; 10168 else 10169 V2 = Op; 10170 } 10171 }; 10172 10173 /// Smart shuffle instruction emission, walks through shuffles trees and 10174 /// tries to find the best matching vector for the actual shuffle 10175 /// instruction. 10176 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) { 10177 assert(V1 && "Expected at least one vector value."); 10178 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq, 10179 R.CSEBlocks); 10180 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask, 10181 ShuffleBuilder); 10182 } 10183 10184 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 10185 /// shuffle emission. 10186 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 10187 ArrayRef<int> Mask) { 10188 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10189 if (Mask[Idx] != PoisonMaskElem) 10190 CommonMask[Idx] = Idx; 10191 } 10192 10193 public: 10194 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R) 10195 : Builder(Builder), R(R) {} 10196 10197 /// Adjusts extractelements after reusing them. 10198 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 10199 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 10200 unsigned NumParts, bool &UseVecBaseAsInput) { 10201 UseVecBaseAsInput = false; 10202 SmallPtrSet<Value *, 4> UniqueBases; 10203 Value *VecBase = nullptr; 10204 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10205 int Idx = Mask[I]; 10206 if (Idx == PoisonMaskElem) 10207 continue; 10208 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10209 VecBase = EI->getVectorOperand(); 10210 if (const TreeEntry *TE = R.getTreeEntry(VecBase)) 10211 VecBase = TE->VectorizedValue; 10212 assert(VecBase && "Expected vectorized value."); 10213 UniqueBases.insert(VecBase); 10214 // If the only one use is vectorized - can delete the extractelement 10215 // itself. 10216 if (!EI->hasOneUse() || any_of(EI->users(), [&](User *U) { 10217 return !R.ScalarToTreeEntry.count(U); 10218 })) 10219 continue; 10220 R.eraseInstruction(EI); 10221 } 10222 if (NumParts == 1 || UniqueBases.size() == 1) 10223 return VecBase; 10224 UseVecBaseAsInput = true; 10225 auto TransformToIdentity = [](MutableArrayRef<int> Mask) { 10226 for (auto [I, Idx] : enumerate(Mask)) 10227 if (Idx != PoisonMaskElem) 10228 Idx = I; 10229 }; 10230 // Perform multi-register vector shuffle, joining them into a single virtual 10231 // long vector. 10232 // Need to shuffle each part independently and then insert all this parts 10233 // into a long virtual vector register, forming the original vector. 10234 Value *Vec = nullptr; 10235 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10236 unsigned SliceSize = E->Scalars.size() / NumParts; 10237 for (unsigned Part = 0; Part < NumParts; ++Part) { 10238 ArrayRef<Value *> VL = 10239 ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize); 10240 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 10241 constexpr int MaxBases = 2; 10242 SmallVector<Value *, MaxBases> Bases(MaxBases); 10243 #ifndef NDEBUG 10244 int PrevSize = 0; 10245 #endif // NDEBUG 10246 for (const auto [I, V]: enumerate(VL)) { 10247 if (SubMask[I] == PoisonMaskElem) 10248 continue; 10249 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand(); 10250 if (const TreeEntry *TE = R.getTreeEntry(VecOp)) 10251 VecOp = TE->VectorizedValue; 10252 assert(VecOp && "Expected vectorized value."); 10253 const int Size = 10254 cast<FixedVectorType>(VecOp->getType())->getNumElements(); 10255 #ifndef NDEBUG 10256 assert((PrevSize == Size || PrevSize == 0) && 10257 "Expected vectors of the same size."); 10258 PrevSize = Size; 10259 #endif // NDEBUG 10260 Bases[SubMask[I] < Size ? 0 : 1] = VecOp; 10261 } 10262 if (!Bases.front()) 10263 continue; 10264 Value *SubVec; 10265 if (Bases.back()) { 10266 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask); 10267 TransformToIdentity(SubMask); 10268 } else { 10269 SubVec = Bases.front(); 10270 } 10271 if (!Vec) { 10272 Vec = SubVec; 10273 assert((Part == 0 || all_of(seq<unsigned>(0, Part), 10274 [&](unsigned P) { 10275 ArrayRef<int> SubMask = 10276 Mask.slice(P * SliceSize, SliceSize); 10277 return all_of(SubMask, [](int Idx) { 10278 return Idx == PoisonMaskElem; 10279 }); 10280 })) && 10281 "Expected first part or all previous parts masked."); 10282 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10283 } else { 10284 unsigned VF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10285 if (Vec->getType() != SubVec->getType()) { 10286 unsigned SubVecVF = 10287 cast<FixedVectorType>(SubVec->getType())->getNumElements(); 10288 VF = std::max(VF, SubVecVF); 10289 } 10290 // Adjust SubMask. 10291 for (auto [I, Idx] : enumerate(SubMask)) 10292 if (Idx != PoisonMaskElem) 10293 Idx += VF; 10294 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10295 Vec = createShuffle(Vec, SubVec, VecMask); 10296 TransformToIdentity(VecMask); 10297 } 10298 } 10299 copy(VecMask, Mask.begin()); 10300 return Vec; 10301 } 10302 /// Checks if the specified entry \p E needs to be delayed because of its 10303 /// dependency nodes. 10304 std::optional<Value *> 10305 needToDelay(const TreeEntry *E, 10306 ArrayRef<SmallVector<const TreeEntry *>> Deps) const { 10307 // No need to delay emission if all deps are ready. 10308 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) { 10309 return all_of( 10310 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; }); 10311 })) 10312 return std::nullopt; 10313 // Postpone gather emission, will be emitted after the end of the 10314 // process to keep correct order. 10315 auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(), 10316 E->getVectorFactor()); 10317 return Builder.CreateAlignedLoad( 10318 VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())), 10319 MaybeAlign()); 10320 } 10321 /// Adds 2 input vectors (in form of tree entries) and the mask for their 10322 /// shuffling. 10323 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 10324 add(E1.VectorizedValue, E2.VectorizedValue, Mask); 10325 } 10326 /// Adds single input vector (in form of tree entry) and the mask for its 10327 /// shuffling. 10328 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 10329 add(E1.VectorizedValue, Mask); 10330 } 10331 /// Adds 2 input vectors and the mask for their shuffling. 10332 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 10333 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors."); 10334 if (InVectors.empty()) { 10335 InVectors.push_back(V1); 10336 InVectors.push_back(V2); 10337 CommonMask.assign(Mask.begin(), Mask.end()); 10338 return; 10339 } 10340 Value *Vec = InVectors.front(); 10341 if (InVectors.size() == 2) { 10342 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10343 transformMaskAfterShuffle(CommonMask, CommonMask); 10344 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() != 10345 Mask.size()) { 10346 Vec = createShuffle(Vec, nullptr, CommonMask); 10347 transformMaskAfterShuffle(CommonMask, CommonMask); 10348 } 10349 V1 = createShuffle(V1, V2, Mask); 10350 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10351 if (Mask[Idx] != PoisonMaskElem) 10352 CommonMask[Idx] = Idx + Sz; 10353 InVectors.front() = Vec; 10354 if (InVectors.size() == 2) 10355 InVectors.back() = V1; 10356 else 10357 InVectors.push_back(V1); 10358 } 10359 /// Adds another one input vector and the mask for the shuffling. 10360 void add(Value *V1, ArrayRef<int> Mask, bool = false) { 10361 if (InVectors.empty()) { 10362 if (!isa<FixedVectorType>(V1->getType())) { 10363 V1 = createShuffle(V1, nullptr, CommonMask); 10364 CommonMask.assign(Mask.size(), PoisonMaskElem); 10365 transformMaskAfterShuffle(CommonMask, Mask); 10366 } 10367 InVectors.push_back(V1); 10368 CommonMask.assign(Mask.begin(), Mask.end()); 10369 return; 10370 } 10371 const auto *It = find(InVectors, V1); 10372 if (It == InVectors.end()) { 10373 if (InVectors.size() == 2 || 10374 InVectors.front()->getType() != V1->getType() || 10375 !isa<FixedVectorType>(V1->getType())) { 10376 Value *V = InVectors.front(); 10377 if (InVectors.size() == 2) { 10378 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10379 transformMaskAfterShuffle(CommonMask, CommonMask); 10380 } else if (cast<FixedVectorType>(V->getType())->getNumElements() != 10381 CommonMask.size()) { 10382 V = createShuffle(InVectors.front(), nullptr, CommonMask); 10383 transformMaskAfterShuffle(CommonMask, CommonMask); 10384 } 10385 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10386 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem) 10387 CommonMask[Idx] = 10388 V->getType() != V1->getType() 10389 ? Idx + Sz 10390 : Mask[Idx] + cast<FixedVectorType>(V1->getType()) 10391 ->getNumElements(); 10392 if (V->getType() != V1->getType()) 10393 V1 = createShuffle(V1, nullptr, Mask); 10394 InVectors.front() = V; 10395 if (InVectors.size() == 2) 10396 InVectors.back() = V1; 10397 else 10398 InVectors.push_back(V1); 10399 return; 10400 } 10401 // Check if second vector is required if the used elements are already 10402 // used from the first one. 10403 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10404 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) { 10405 InVectors.push_back(V1); 10406 break; 10407 } 10408 } 10409 int VF = CommonMask.size(); 10410 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 10411 VF = FTy->getNumElements(); 10412 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10413 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 10414 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF); 10415 } 10416 /// Adds another one input vector and the mask for the shuffling. 10417 void addOrdered(Value *V1, ArrayRef<unsigned> Order) { 10418 SmallVector<int> NewMask; 10419 inversePermutation(Order, NewMask); 10420 add(V1, NewMask); 10421 } 10422 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 10423 Value *Root = nullptr) { 10424 return R.gather(VL, Root); 10425 } 10426 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); } 10427 /// Finalize emission of the shuffles. 10428 /// \param Action the action (if any) to be performed before final applying of 10429 /// the \p ExtMask mask. 10430 Value * 10431 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 10432 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 10433 IsFinalized = true; 10434 if (Action) { 10435 Value *Vec = InVectors.front(); 10436 if (InVectors.size() == 2) { 10437 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10438 InVectors.pop_back(); 10439 } else { 10440 Vec = createShuffle(Vec, nullptr, CommonMask); 10441 } 10442 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10443 if (CommonMask[Idx] != PoisonMaskElem) 10444 CommonMask[Idx] = Idx; 10445 assert(VF > 0 && 10446 "Expected vector length for the final value before action."); 10447 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10448 if (VecVF < VF) { 10449 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 10450 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0); 10451 Vec = createShuffle(Vec, nullptr, ResizeMask); 10452 } 10453 Action(Vec, CommonMask); 10454 InVectors.front() = Vec; 10455 } 10456 if (!ExtMask.empty()) { 10457 if (CommonMask.empty()) { 10458 CommonMask.assign(ExtMask.begin(), ExtMask.end()); 10459 } else { 10460 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 10461 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 10462 if (ExtMask[I] == PoisonMaskElem) 10463 continue; 10464 NewMask[I] = CommonMask[ExtMask[I]]; 10465 } 10466 CommonMask.swap(NewMask); 10467 } 10468 } 10469 if (CommonMask.empty()) { 10470 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 10471 return InVectors.front(); 10472 } 10473 if (InVectors.size() == 2) 10474 return createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10475 return createShuffle(InVectors.front(), nullptr, CommonMask); 10476 } 10477 10478 ~ShuffleInstructionBuilder() { 10479 assert((IsFinalized || CommonMask.empty()) && 10480 "Shuffle construction must be finalized."); 10481 } 10482 }; 10483 10484 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, 10485 bool PostponedPHIs) { 10486 ValueList &VL = E->getOperand(NodeIdx); 10487 if (E->State == TreeEntry::PossibleStridedVectorize && 10488 !E->ReorderIndices.empty()) { 10489 SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end()); 10490 reorderScalars(VL, Mask); 10491 } 10492 const unsigned VF = VL.size(); 10493 InstructionsState S = getSameOpcode(VL, *TLI); 10494 // Special processing for GEPs bundle, which may include non-gep values. 10495 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) { 10496 const auto *It = 10497 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 10498 if (It != VL.end()) 10499 S = getSameOpcode(*It, *TLI); 10500 } 10501 if (S.getOpcode()) { 10502 auto CheckSameVE = [&](const TreeEntry *VE) { 10503 return VE->isSame(VL) && 10504 (any_of(VE->UserTreeIndices, 10505 [E, NodeIdx](const EdgeInfo &EI) { 10506 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10507 }) || 10508 any_of(VectorizableTree, 10509 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) { 10510 return TE->isOperandGatherNode({E, NodeIdx}) && 10511 VE->isSame(TE->Scalars); 10512 })); 10513 }; 10514 TreeEntry *VE = getTreeEntry(S.OpValue); 10515 bool IsSameVE = VE && CheckSameVE(VE); 10516 if (!IsSameVE) { 10517 auto It = MultiNodeScalars.find(S.OpValue); 10518 if (It != MultiNodeScalars.end()) { 10519 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) { 10520 return TE != VE && CheckSameVE(TE); 10521 }); 10522 if (I != It->getSecond().end()) { 10523 VE = *I; 10524 IsSameVE = true; 10525 } 10526 } 10527 } 10528 if (IsSameVE) { 10529 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) { 10530 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 10531 ShuffleBuilder.add(V, Mask); 10532 return ShuffleBuilder.finalize(std::nullopt); 10533 }; 10534 Value *V = vectorizeTree(VE, PostponedPHIs); 10535 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 10536 if (!VE->ReuseShuffleIndices.empty()) { 10537 // Reshuffle to get only unique values. 10538 // If some of the scalars are duplicated in the vectorization 10539 // tree entry, we do not vectorize them but instead generate a 10540 // mask for the reuses. But if there are several users of the 10541 // same entry, they may have different vectorization factors. 10542 // This is especially important for PHI nodes. In this case, we 10543 // need to adapt the resulting instruction for the user 10544 // vectorization factor and have to reshuffle it again to take 10545 // only unique elements of the vector. Without this code the 10546 // function incorrectly returns reduced vector instruction with 10547 // the same elements, not with the unique ones. 10548 10549 // block: 10550 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 10551 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 10552 // ... (use %2) 10553 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 10554 // br %block 10555 SmallVector<int> UniqueIdxs(VF, PoisonMaskElem); 10556 SmallSet<int, 4> UsedIdxs; 10557 int Pos = 0; 10558 for (int Idx : VE->ReuseShuffleIndices) { 10559 if (Idx != static_cast<int>(VF) && Idx != PoisonMaskElem && 10560 UsedIdxs.insert(Idx).second) 10561 UniqueIdxs[Idx] = Pos; 10562 ++Pos; 10563 } 10564 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 10565 "less than original vector size."); 10566 UniqueIdxs.append(VF - UsedIdxs.size(), PoisonMaskElem); 10567 V = FinalShuffle(V, UniqueIdxs); 10568 } else { 10569 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 10570 "Expected vectorization factor less " 10571 "than original vector size."); 10572 SmallVector<int> UniformMask(VF, 0); 10573 std::iota(UniformMask.begin(), UniformMask.end(), 0); 10574 V = FinalShuffle(V, UniformMask); 10575 } 10576 } 10577 // Need to update the operand gather node, if actually the operand is not a 10578 // vectorized node, but the buildvector/gather node, which matches one of 10579 // the vectorized nodes. 10580 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) { 10581 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10582 }) == VE->UserTreeIndices.end()) { 10583 auto *It = find_if( 10584 VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 10585 return TE->State == TreeEntry::NeedToGather && 10586 TE->UserTreeIndices.front().UserTE == E && 10587 TE->UserTreeIndices.front().EdgeIdx == NodeIdx; 10588 }); 10589 assert(It != VectorizableTree.end() && "Expected gather node operand."); 10590 (*It)->VectorizedValue = V; 10591 } 10592 return V; 10593 } 10594 } 10595 10596 // Find the corresponding gather entry and vectorize it. 10597 // Allows to be more accurate with tree/graph transformations, checks for the 10598 // correctness of the transformations in many cases. 10599 auto *I = find_if(VectorizableTree, 10600 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) { 10601 return TE->isOperandGatherNode({E, NodeIdx}); 10602 }); 10603 assert(I != VectorizableTree.end() && "Gather node is not in the graph."); 10604 assert(I->get()->UserTreeIndices.size() == 1 && 10605 "Expected only single user for the gather node."); 10606 assert(I->get()->isSame(VL) && "Expected same list of scalars."); 10607 return vectorizeTree(I->get(), PostponedPHIs); 10608 } 10609 10610 template <typename BVTy, typename ResTy, typename... Args> 10611 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) { 10612 assert(E->State == TreeEntry::NeedToGather && "Expected gather node."); 10613 unsigned VF = E->getVectorFactor(); 10614 10615 bool NeedFreeze = false; 10616 SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(), 10617 E->ReuseShuffleIndices.end()); 10618 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end()); 10619 // Build a mask out of the reorder indices and reorder scalars per this 10620 // mask. 10621 SmallVector<int> ReorderMask; 10622 inversePermutation(E->ReorderIndices, ReorderMask); 10623 if (!ReorderMask.empty()) 10624 reorderScalars(GatheredScalars, ReorderMask); 10625 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF, 10626 unsigned I, unsigned SliceSize) { 10627 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) { 10628 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10629 })) 10630 return false; 10631 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE; 10632 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx; 10633 if (UserTE->getNumOperands() != 2) 10634 return false; 10635 auto *It = 10636 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) { 10637 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) { 10638 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx; 10639 }) != TE->UserTreeIndices.end(); 10640 }); 10641 if (It == VectorizableTree.end()) 10642 return false; 10643 int Idx; 10644 if ((Mask.size() < InputVF && 10645 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) && 10646 Idx == 0) || 10647 (Mask.size() == InputVF && 10648 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) { 10649 std::iota(std::next(Mask.begin(), I * SliceSize), 10650 std::next(Mask.begin(), (I + 1) * SliceSize), 0); 10651 } else { 10652 unsigned IVal = 10653 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; }); 10654 std::fill(std::next(Mask.begin(), I * SliceSize), 10655 std::next(Mask.begin(), (I + 1) * SliceSize), IVal); 10656 } 10657 return true; 10658 }; 10659 BVTy ShuffleBuilder(Params...); 10660 ResTy Res = ResTy(); 10661 SmallVector<int> Mask; 10662 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem); 10663 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles; 10664 Value *ExtractVecBase = nullptr; 10665 bool UseVecBaseAsInput = false; 10666 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles; 10667 SmallVector<SmallVector<const TreeEntry *>> Entries; 10668 Type *ScalarTy = GatheredScalars.front()->getType(); 10669 auto *VecTy = FixedVectorType::get(ScalarTy, GatheredScalars.size()); 10670 unsigned NumParts = TTI->getNumberOfParts(VecTy); 10671 if (NumParts == 0 || NumParts >= GatheredScalars.size()) 10672 NumParts = 1; 10673 if (!all_of(GatheredScalars, UndefValue::classof)) { 10674 // Check for gathered extracts. 10675 bool Resized = false; 10676 ExtractShuffles = 10677 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts); 10678 if (!ExtractShuffles.empty()) { 10679 SmallVector<const TreeEntry *> ExtractEntries; 10680 for (auto [Idx, I] : enumerate(ExtractMask)) { 10681 if (I == PoisonMaskElem) 10682 continue; 10683 if (const auto *TE = getTreeEntry( 10684 cast<ExtractElementInst>(E->Scalars[Idx])->getVectorOperand())) 10685 ExtractEntries.push_back(TE); 10686 } 10687 if (std::optional<ResTy> Delayed = 10688 ShuffleBuilder.needToDelay(E, ExtractEntries)) { 10689 // Delay emission of gathers which are not ready yet. 10690 PostponedGathers.insert(E); 10691 // Postpone gather emission, will be emitted after the end of the 10692 // process to keep correct order. 10693 return *Delayed; 10694 } 10695 if (Value *VecBase = ShuffleBuilder.adjustExtracts( 10696 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) { 10697 ExtractVecBase = VecBase; 10698 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType())) 10699 if (VF == VecBaseTy->getNumElements() && 10700 GatheredScalars.size() != VF) { 10701 Resized = true; 10702 GatheredScalars.append(VF - GatheredScalars.size(), 10703 PoisonValue::get(ScalarTy)); 10704 } 10705 } 10706 } 10707 // Gather extracts after we check for full matched gathers only. 10708 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load || 10709 E->isAltShuffle() || 10710 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) || 10711 isSplat(E->Scalars) || 10712 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) { 10713 GatherShuffles = 10714 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts); 10715 } 10716 if (!GatherShuffles.empty()) { 10717 if (std::optional<ResTy> Delayed = 10718 ShuffleBuilder.needToDelay(E, Entries)) { 10719 // Delay emission of gathers which are not ready yet. 10720 PostponedGathers.insert(E); 10721 // Postpone gather emission, will be emitted after the end of the 10722 // process to keep correct order. 10723 return *Delayed; 10724 } 10725 if (GatherShuffles.size() == 1 && 10726 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc && 10727 Entries.front().front()->isSame(E->Scalars)) { 10728 // Perfect match in the graph, will reuse the previously vectorized 10729 // node. Cost is 0. 10730 LLVM_DEBUG( 10731 dbgs() 10732 << "SLP: perfect diamond match for gather bundle " 10733 << shortBundleName(E->Scalars) << ".\n"); 10734 // Restore the mask for previous partially matched values. 10735 Mask.resize(E->Scalars.size()); 10736 const TreeEntry *FrontTE = Entries.front().front(); 10737 if (FrontTE->ReorderIndices.empty() && 10738 ((FrontTE->ReuseShuffleIndices.empty() && 10739 E->Scalars.size() == FrontTE->Scalars.size()) || 10740 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) { 10741 std::iota(Mask.begin(), Mask.end(), 0); 10742 } else { 10743 for (auto [I, V] : enumerate(E->Scalars)) { 10744 if (isa<PoisonValue>(V)) { 10745 Mask[I] = PoisonMaskElem; 10746 continue; 10747 } 10748 Mask[I] = FrontTE->findLaneForValue(V); 10749 } 10750 } 10751 ShuffleBuilder.add(*FrontTE, Mask); 10752 Res = ShuffleBuilder.finalize(E->getCommonMask()); 10753 return Res; 10754 } 10755 if (!Resized) { 10756 if (GatheredScalars.size() != VF && 10757 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) { 10758 return any_of(TEs, [&](const TreeEntry *TE) { 10759 return TE->getVectorFactor() == VF; 10760 }); 10761 })) 10762 GatheredScalars.append(VF - GatheredScalars.size(), 10763 PoisonValue::get(ScalarTy)); 10764 } 10765 // Remove shuffled elements from list of gathers. 10766 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10767 if (Mask[I] != PoisonMaskElem) 10768 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10769 } 10770 } 10771 } 10772 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars, 10773 SmallVectorImpl<int> &ReuseMask, 10774 bool IsRootPoison) { 10775 // For splats with can emit broadcasts instead of gathers, so try to find 10776 // such sequences. 10777 bool IsSplat = IsRootPoison && isSplat(Scalars) && 10778 (Scalars.size() > 2 || Scalars.front() == Scalars.back()); 10779 Scalars.append(VF - Scalars.size(), PoisonValue::get(ScalarTy)); 10780 SmallVector<int> UndefPos; 10781 DenseMap<Value *, unsigned> UniquePositions; 10782 // Gather unique non-const values and all constant values. 10783 // For repeated values, just shuffle them. 10784 int NumNonConsts = 0; 10785 int SinglePos = 0; 10786 for (auto [I, V] : enumerate(Scalars)) { 10787 if (isa<UndefValue>(V)) { 10788 if (!isa<PoisonValue>(V)) { 10789 ReuseMask[I] = I; 10790 UndefPos.push_back(I); 10791 } 10792 continue; 10793 } 10794 if (isConstant(V)) { 10795 ReuseMask[I] = I; 10796 continue; 10797 } 10798 ++NumNonConsts; 10799 SinglePos = I; 10800 Value *OrigV = V; 10801 Scalars[I] = PoisonValue::get(ScalarTy); 10802 if (IsSplat) { 10803 Scalars.front() = OrigV; 10804 ReuseMask[I] = 0; 10805 } else { 10806 const auto Res = UniquePositions.try_emplace(OrigV, I); 10807 Scalars[Res.first->second] = OrigV; 10808 ReuseMask[I] = Res.first->second; 10809 } 10810 } 10811 if (NumNonConsts == 1) { 10812 // Restore single insert element. 10813 if (IsSplat) { 10814 ReuseMask.assign(VF, PoisonMaskElem); 10815 std::swap(Scalars.front(), Scalars[SinglePos]); 10816 if (!UndefPos.empty() && UndefPos.front() == 0) 10817 Scalars.front() = UndefValue::get(ScalarTy); 10818 } 10819 ReuseMask[SinglePos] = SinglePos; 10820 } else if (!UndefPos.empty() && IsSplat) { 10821 // For undef values, try to replace them with the simple broadcast. 10822 // We can do it if the broadcasted value is guaranteed to be 10823 // non-poisonous, or by freezing the incoming scalar value first. 10824 auto *It = find_if(Scalars, [this, E](Value *V) { 10825 return !isa<UndefValue>(V) && 10826 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) || 10827 (E->UserTreeIndices.size() == 1 && 10828 any_of(V->uses(), [E](const Use &U) { 10829 // Check if the value already used in the same operation in 10830 // one of the nodes already. 10831 return E->UserTreeIndices.front().EdgeIdx != 10832 U.getOperandNo() && 10833 is_contained( 10834 E->UserTreeIndices.front().UserTE->Scalars, 10835 U.getUser()); 10836 }))); 10837 }); 10838 if (It != Scalars.end()) { 10839 // Replace undefs by the non-poisoned scalars and emit broadcast. 10840 int Pos = std::distance(Scalars.begin(), It); 10841 for (int I : UndefPos) { 10842 // Set the undef position to the non-poisoned scalar. 10843 ReuseMask[I] = Pos; 10844 // Replace the undef by the poison, in the mask it is replaced by 10845 // non-poisoned scalar already. 10846 if (I != Pos) 10847 Scalars[I] = PoisonValue::get(ScalarTy); 10848 } 10849 } else { 10850 // Replace undefs by the poisons, emit broadcast and then emit 10851 // freeze. 10852 for (int I : UndefPos) { 10853 ReuseMask[I] = PoisonMaskElem; 10854 if (isa<UndefValue>(Scalars[I])) 10855 Scalars[I] = PoisonValue::get(ScalarTy); 10856 } 10857 NeedFreeze = true; 10858 } 10859 } 10860 }; 10861 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) { 10862 bool IsNonPoisoned = true; 10863 bool IsUsedInExpr = true; 10864 Value *Vec1 = nullptr; 10865 if (!ExtractShuffles.empty()) { 10866 // Gather of extractelements can be represented as just a shuffle of 10867 // a single/two vectors the scalars are extracted from. 10868 // Find input vectors. 10869 Value *Vec2 = nullptr; 10870 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10871 if (!Mask.empty() && Mask[I] != PoisonMaskElem) 10872 ExtractMask[I] = PoisonMaskElem; 10873 } 10874 if (UseVecBaseAsInput) { 10875 Vec1 = ExtractVecBase; 10876 } else { 10877 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10878 if (ExtractMask[I] == PoisonMaskElem) 10879 continue; 10880 if (isa<UndefValue>(E->Scalars[I])) 10881 continue; 10882 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10883 Value *VecOp = EI->getVectorOperand(); 10884 if (const auto *TE = getTreeEntry(VecOp)) 10885 if (TE->VectorizedValue) 10886 VecOp = TE->VectorizedValue; 10887 if (!Vec1) { 10888 Vec1 = VecOp; 10889 } else if (Vec1 != EI->getVectorOperand()) { 10890 assert((!Vec2 || Vec2 == EI->getVectorOperand()) && 10891 "Expected only 1 or 2 vectors shuffle."); 10892 Vec2 = VecOp; 10893 } 10894 } 10895 } 10896 if (Vec2) { 10897 IsUsedInExpr = false; 10898 IsNonPoisoned &= 10899 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2); 10900 ShuffleBuilder.add(Vec1, Vec2, ExtractMask); 10901 } else if (Vec1) { 10902 IsUsedInExpr &= FindReusedSplat( 10903 ExtractMask, 10904 cast<FixedVectorType>(Vec1->getType())->getNumElements(), 0, 10905 ExtractMask.size()); 10906 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true); 10907 IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1); 10908 } else { 10909 IsUsedInExpr = false; 10910 ShuffleBuilder.add(PoisonValue::get(FixedVectorType::get( 10911 ScalarTy, GatheredScalars.size())), 10912 ExtractMask, /*ForExtracts=*/true); 10913 } 10914 } 10915 if (!GatherShuffles.empty()) { 10916 unsigned SliceSize = E->Scalars.size() / NumParts; 10917 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10918 for (const auto [I, TEs] : enumerate(Entries)) { 10919 if (TEs.empty()) { 10920 assert(!GatherShuffles[I] && 10921 "No shuffles with empty entries list expected."); 10922 continue; 10923 } 10924 assert((TEs.size() == 1 || TEs.size() == 2) && 10925 "Expected shuffle of 1 or 2 entries."); 10926 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize); 10927 VecMask.assign(VecMask.size(), PoisonMaskElem); 10928 copy(SubMask, std::next(VecMask.begin(), I * SliceSize)); 10929 if (TEs.size() == 1) { 10930 IsUsedInExpr &= 10931 FindReusedSplat(VecMask, TEs.front()->getVectorFactor(), I, SliceSize); 10932 ShuffleBuilder.add(*TEs.front(), VecMask); 10933 if (TEs.front()->VectorizedValue) 10934 IsNonPoisoned &= 10935 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue); 10936 } else { 10937 IsUsedInExpr = false; 10938 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask); 10939 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue) 10940 IsNonPoisoned &= 10941 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) && 10942 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue); 10943 } 10944 } 10945 } 10946 // Try to figure out best way to combine values: build a shuffle and insert 10947 // elements or just build several shuffles. 10948 // Insert non-constant scalars. 10949 SmallVector<Value *> NonConstants(GatheredScalars); 10950 int EMSz = ExtractMask.size(); 10951 int MSz = Mask.size(); 10952 // Try to build constant vector and shuffle with it only if currently we 10953 // have a single permutation and more than 1 scalar constants. 10954 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty(); 10955 bool IsIdentityShuffle = 10956 ((UseVecBaseAsInput || 10957 all_of(ExtractShuffles, 10958 [](const std::optional<TTI::ShuffleKind> &SK) { 10959 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10960 TTI::SK_PermuteSingleSrc; 10961 })) && 10962 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) && 10963 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) || 10964 (!GatherShuffles.empty() && 10965 all_of(GatherShuffles, 10966 [](const std::optional<TTI::ShuffleKind> &SK) { 10967 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10968 TTI::SK_PermuteSingleSrc; 10969 }) && 10970 none_of(Mask, [&](int I) { return I >= MSz; }) && 10971 ShuffleVectorInst::isIdentityMask(Mask, MSz)); 10972 bool EnoughConstsForShuffle = 10973 IsSingleShuffle && 10974 (none_of(GatheredScalars, 10975 [](Value *V) { 10976 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10977 }) || 10978 any_of(GatheredScalars, 10979 [](Value *V) { 10980 return isa<Constant>(V) && !isa<UndefValue>(V); 10981 })) && 10982 (!IsIdentityShuffle || 10983 (GatheredScalars.size() == 2 && 10984 any_of(GatheredScalars, 10985 [](Value *V) { return !isa<UndefValue>(V); })) || 10986 count_if(GatheredScalars, [](Value *V) { 10987 return isa<Constant>(V) && !isa<PoisonValue>(V); 10988 }) > 1); 10989 // NonConstants array contains just non-constant values, GatheredScalars 10990 // contains only constant to build final vector and then shuffle. 10991 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) { 10992 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I])) 10993 NonConstants[I] = PoisonValue::get(ScalarTy); 10994 else 10995 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10996 } 10997 // Generate constants for final shuffle and build a mask for them. 10998 if (!all_of(GatheredScalars, PoisonValue::classof)) { 10999 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem); 11000 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true); 11001 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size()); 11002 ShuffleBuilder.add(BV, BVMask); 11003 } 11004 if (all_of(NonConstants, [=](Value *V) { 11005 return isa<PoisonValue>(V) || 11006 (IsSingleShuffle && ((IsIdentityShuffle && 11007 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V)); 11008 })) 11009 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11010 else 11011 Res = ShuffleBuilder.finalize( 11012 E->ReuseShuffleIndices, E->Scalars.size(), 11013 [&](Value *&Vec, SmallVectorImpl<int> &Mask) { 11014 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false); 11015 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec); 11016 }); 11017 } else if (!allConstant(GatheredScalars)) { 11018 // Gather unique scalars and all constants. 11019 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem); 11020 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true); 11021 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size()); 11022 ShuffleBuilder.add(BV, ReuseMask); 11023 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11024 } else { 11025 // Gather all constants. 11026 SmallVector<int> Mask(E->Scalars.size(), PoisonMaskElem); 11027 for (auto [I, V] : enumerate(E->Scalars)) { 11028 if (!isa<PoisonValue>(V)) 11029 Mask[I] = I; 11030 } 11031 Value *BV = ShuffleBuilder.gather(E->Scalars); 11032 ShuffleBuilder.add(BV, Mask); 11033 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11034 } 11035 11036 if (NeedFreeze) 11037 Res = ShuffleBuilder.createFreeze(Res); 11038 return Res; 11039 } 11040 11041 Value *BoUpSLP::createBuildVector(const TreeEntry *E) { 11042 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, Builder, 11043 *this); 11044 } 11045 11046 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { 11047 IRBuilder<>::InsertPointGuard Guard(Builder); 11048 11049 if (E->VectorizedValue && 11050 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI || 11051 E->isAltShuffle())) { 11052 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 11053 return E->VectorizedValue; 11054 } 11055 11056 if (E->State == TreeEntry::NeedToGather) { 11057 // Set insert point for non-reduction initial nodes. 11058 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList) 11059 setInsertPointAfterBundle(E); 11060 Value *Vec = createBuildVector(E); 11061 E->VectorizedValue = Vec; 11062 return Vec; 11063 } 11064 11065 auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy, 11066 bool IsSigned) { 11067 if (V->getType() != VecTy) 11068 V = Builder.CreateIntCast(V, VecTy, IsSigned); 11069 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 11070 if (E->getOpcode() == Instruction::Store) { 11071 ArrayRef<int> Mask = 11072 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()), 11073 E->ReorderIndices.size()); 11074 ShuffleBuilder.add(V, Mask); 11075 } else if (E->State == TreeEntry::PossibleStridedVectorize) { 11076 ShuffleBuilder.addOrdered(V, std::nullopt); 11077 } else { 11078 ShuffleBuilder.addOrdered(V, E->ReorderIndices); 11079 } 11080 return ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11081 }; 11082 11083 assert((E->State == TreeEntry::Vectorize || 11084 E->State == TreeEntry::ScatterVectorize || 11085 E->State == TreeEntry::PossibleStridedVectorize) && 11086 "Unhandled state"); 11087 unsigned ShuffleOrOp = 11088 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 11089 Instruction *VL0 = E->getMainOp(); 11090 Type *ScalarTy = VL0->getType(); 11091 if (auto *Store = dyn_cast<StoreInst>(VL0)) 11092 ScalarTy = Store->getValueOperand()->getType(); 11093 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 11094 ScalarTy = IE->getOperand(1)->getType(); 11095 bool IsSigned = false; 11096 auto It = MinBWs.find(E); 11097 if (It != MinBWs.end()) { 11098 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 11099 IsSigned = It->second.second; 11100 } 11101 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 11102 switch (ShuffleOrOp) { 11103 case Instruction::PHI: { 11104 assert((E->ReorderIndices.empty() || 11105 E != VectorizableTree.front().get() || 11106 !E->UserTreeIndices.empty()) && 11107 "PHI reordering is free."); 11108 if (PostponedPHIs && E->VectorizedValue) 11109 return E->VectorizedValue; 11110 auto *PH = cast<PHINode>(VL0); 11111 Builder.SetInsertPoint(PH->getParent(), 11112 PH->getParent()->getFirstNonPHIIt()); 11113 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11114 if (PostponedPHIs || !E->VectorizedValue) { 11115 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 11116 E->PHI = NewPhi; 11117 Value *V = NewPhi; 11118 11119 // Adjust insertion point once all PHI's have been generated. 11120 Builder.SetInsertPoint(PH->getParent(), 11121 PH->getParent()->getFirstInsertionPt()); 11122 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11123 11124 V = FinalShuffle(V, E, VecTy, IsSigned); 11125 11126 E->VectorizedValue = V; 11127 if (PostponedPHIs) 11128 return V; 11129 } 11130 PHINode *NewPhi = cast<PHINode>(E->PHI); 11131 // If phi node is fully emitted - exit. 11132 if (NewPhi->getNumIncomingValues() != 0) 11133 return NewPhi; 11134 11135 // PHINodes may have multiple entries from the same block. We want to 11136 // visit every block once. 11137 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 11138 11139 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 11140 ValueList Operands; 11141 BasicBlock *IBB = PH->getIncomingBlock(I); 11142 11143 // Stop emission if all incoming values are generated. 11144 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) { 11145 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11146 return NewPhi; 11147 } 11148 11149 if (!VisitedBBs.insert(IBB).second) { 11150 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 11151 continue; 11152 } 11153 11154 Builder.SetInsertPoint(IBB->getTerminator()); 11155 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11156 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true); 11157 if (VecTy != Vec->getType()) { 11158 assert(MinBWs.contains(getOperandEntry(E, I)) && 11159 "Expected item in MinBWs."); 11160 Vec = Builder.CreateIntCast(Vec, VecTy, It->second.second); 11161 } 11162 NewPhi->addIncoming(Vec, IBB); 11163 } 11164 11165 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 11166 "Invalid number of incoming values"); 11167 return NewPhi; 11168 } 11169 11170 case Instruction::ExtractElement: { 11171 Value *V = E->getSingleOperand(0); 11172 if (const TreeEntry *TE = getTreeEntry(V)) 11173 V = TE->VectorizedValue; 11174 setInsertPointAfterBundle(E); 11175 V = FinalShuffle(V, E, VecTy, IsSigned); 11176 E->VectorizedValue = V; 11177 return V; 11178 } 11179 case Instruction::ExtractValue: { 11180 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 11181 Builder.SetInsertPoint(LI); 11182 Value *Ptr = LI->getPointerOperand(); 11183 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 11184 Value *NewV = propagateMetadata(V, E->Scalars); 11185 NewV = FinalShuffle(NewV, E, VecTy, IsSigned); 11186 E->VectorizedValue = NewV; 11187 return NewV; 11188 } 11189 case Instruction::InsertElement: { 11190 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 11191 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 11192 Value *V = vectorizeOperand(E, 1, PostponedPHIs); 11193 ArrayRef<Value *> Op = E->getOperand(1); 11194 Type *ScalarTy = Op.front()->getType(); 11195 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) { 11196 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs."); 11197 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1)); 11198 assert(Res.first > 0 && "Expected item in MinBWs."); 11199 V = Builder.CreateIntCast( 11200 V, 11201 FixedVectorType::get( 11202 ScalarTy, 11203 cast<FixedVectorType>(V->getType())->getNumElements()), 11204 Res.second); 11205 } 11206 11207 // Create InsertVector shuffle if necessary 11208 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 11209 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 11210 })); 11211 const unsigned NumElts = 11212 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 11213 const unsigned NumScalars = E->Scalars.size(); 11214 11215 unsigned Offset = *getInsertIndex(VL0); 11216 assert(Offset < NumElts && "Failed to find vector index offset"); 11217 11218 // Create shuffle to resize vector 11219 SmallVector<int> Mask; 11220 if (!E->ReorderIndices.empty()) { 11221 inversePermutation(E->ReorderIndices, Mask); 11222 Mask.append(NumElts - NumScalars, PoisonMaskElem); 11223 } else { 11224 Mask.assign(NumElts, PoisonMaskElem); 11225 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 11226 } 11227 // Create InsertVector shuffle if necessary 11228 bool IsIdentity = true; 11229 SmallVector<int> PrevMask(NumElts, PoisonMaskElem); 11230 Mask.swap(PrevMask); 11231 for (unsigned I = 0; I < NumScalars; ++I) { 11232 Value *Scalar = E->Scalars[PrevMask[I]]; 11233 unsigned InsertIdx = *getInsertIndex(Scalar); 11234 IsIdentity &= InsertIdx - Offset == I; 11235 Mask[InsertIdx - Offset] = I; 11236 } 11237 if (!IsIdentity || NumElts != NumScalars) { 11238 Value *V2 = nullptr; 11239 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V); 11240 SmallVector<int> InsertMask(Mask); 11241 if (NumElts != NumScalars && Offset == 0) { 11242 // Follow all insert element instructions from the current buildvector 11243 // sequence. 11244 InsertElementInst *Ins = cast<InsertElementInst>(VL0); 11245 do { 11246 std::optional<unsigned> InsertIdx = getInsertIndex(Ins); 11247 if (!InsertIdx) 11248 break; 11249 if (InsertMask[*InsertIdx] == PoisonMaskElem) 11250 InsertMask[*InsertIdx] = *InsertIdx; 11251 if (!Ins->hasOneUse()) 11252 break; 11253 Ins = dyn_cast_or_null<InsertElementInst>( 11254 Ins->getUniqueUndroppableUser()); 11255 } while (Ins); 11256 SmallBitVector UseMask = 11257 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11258 SmallBitVector IsFirstPoison = 11259 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11260 SmallBitVector IsFirstUndef = 11261 isUndefVector(FirstInsert->getOperand(0), UseMask); 11262 if (!IsFirstPoison.all()) { 11263 unsigned Idx = 0; 11264 for (unsigned I = 0; I < NumElts; I++) { 11265 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) && 11266 IsFirstUndef.test(I)) { 11267 if (IsVNonPoisonous) { 11268 InsertMask[I] = I < NumScalars ? I : 0; 11269 continue; 11270 } 11271 if (!V2) 11272 V2 = UndefValue::get(V->getType()); 11273 if (Idx >= NumScalars) 11274 Idx = NumScalars - 1; 11275 InsertMask[I] = NumScalars + Idx; 11276 ++Idx; 11277 } else if (InsertMask[I] != PoisonMaskElem && 11278 Mask[I] == PoisonMaskElem) { 11279 InsertMask[I] = PoisonMaskElem; 11280 } 11281 } 11282 } else { 11283 InsertMask = Mask; 11284 } 11285 } 11286 if (!V2) 11287 V2 = PoisonValue::get(V->getType()); 11288 V = Builder.CreateShuffleVector(V, V2, InsertMask); 11289 if (auto *I = dyn_cast<Instruction>(V)) { 11290 GatherShuffleExtractSeq.insert(I); 11291 CSEBlocks.insert(I->getParent()); 11292 } 11293 } 11294 11295 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 11296 for (unsigned I = 0; I < NumElts; I++) { 11297 if (Mask[I] != PoisonMaskElem) 11298 InsertMask[Offset + I] = I; 11299 } 11300 SmallBitVector UseMask = 11301 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11302 SmallBitVector IsFirstUndef = 11303 isUndefVector(FirstInsert->getOperand(0), UseMask); 11304 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) && 11305 NumElts != NumScalars) { 11306 if (IsFirstUndef.all()) { 11307 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) { 11308 SmallBitVector IsFirstPoison = 11309 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11310 if (!IsFirstPoison.all()) { 11311 for (unsigned I = 0; I < NumElts; I++) { 11312 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I)) 11313 InsertMask[I] = I + NumElts; 11314 } 11315 } 11316 V = Builder.CreateShuffleVector( 11317 V, 11318 IsFirstPoison.all() ? PoisonValue::get(V->getType()) 11319 : FirstInsert->getOperand(0), 11320 InsertMask, cast<Instruction>(E->Scalars.back())->getName()); 11321 if (auto *I = dyn_cast<Instruction>(V)) { 11322 GatherShuffleExtractSeq.insert(I); 11323 CSEBlocks.insert(I->getParent()); 11324 } 11325 } 11326 } else { 11327 SmallBitVector IsFirstPoison = 11328 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11329 for (unsigned I = 0; I < NumElts; I++) { 11330 if (InsertMask[I] == PoisonMaskElem) 11331 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I; 11332 else 11333 InsertMask[I] += NumElts; 11334 } 11335 V = Builder.CreateShuffleVector( 11336 FirstInsert->getOperand(0), V, InsertMask, 11337 cast<Instruction>(E->Scalars.back())->getName()); 11338 if (auto *I = dyn_cast<Instruction>(V)) { 11339 GatherShuffleExtractSeq.insert(I); 11340 CSEBlocks.insert(I->getParent()); 11341 } 11342 } 11343 } 11344 11345 ++NumVectorInstructions; 11346 E->VectorizedValue = V; 11347 return V; 11348 } 11349 case Instruction::ZExt: 11350 case Instruction::SExt: 11351 case Instruction::FPToUI: 11352 case Instruction::FPToSI: 11353 case Instruction::FPExt: 11354 case Instruction::PtrToInt: 11355 case Instruction::IntToPtr: 11356 case Instruction::SIToFP: 11357 case Instruction::UIToFP: 11358 case Instruction::Trunc: 11359 case Instruction::FPTrunc: 11360 case Instruction::BitCast: { 11361 setInsertPointAfterBundle(E); 11362 11363 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs); 11364 if (E->VectorizedValue) { 11365 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11366 return E->VectorizedValue; 11367 } 11368 11369 auto *CI = cast<CastInst>(VL0); 11370 Instruction::CastOps VecOpcode = CI->getOpcode(); 11371 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 11372 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 11373 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 11374 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 11375 // Check if the values are candidates to demote. 11376 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 11377 if (SrcIt != MinBWs.end()) 11378 SrcBWSz = SrcIt->second.first; 11379 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 11380 if (BWSz == SrcBWSz) { 11381 VecOpcode = Instruction::BitCast; 11382 } else if (BWSz < SrcBWSz) { 11383 VecOpcode = Instruction::Trunc; 11384 } else if (It != MinBWs.end()) { 11385 assert(BWSz > SrcBWSz && "Invalid cast!"); 11386 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 11387 } 11388 } 11389 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast) 11390 ? InVec 11391 : Builder.CreateCast(VecOpcode, InVec, VecTy); 11392 V = FinalShuffle(V, E, VecTy, IsSigned); 11393 11394 E->VectorizedValue = V; 11395 ++NumVectorInstructions; 11396 return V; 11397 } 11398 case Instruction::FCmp: 11399 case Instruction::ICmp: { 11400 setInsertPointAfterBundle(E); 11401 11402 Value *L = vectorizeOperand(E, 0, PostponedPHIs); 11403 if (E->VectorizedValue) { 11404 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11405 return E->VectorizedValue; 11406 } 11407 Value *R = vectorizeOperand(E, 1, PostponedPHIs); 11408 if (E->VectorizedValue) { 11409 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11410 return E->VectorizedValue; 11411 } 11412 if (L->getType() != R->getType()) { 11413 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11414 MinBWs.contains(getOperandEntry(E, 1))) && 11415 "Expected item in MinBWs."); 11416 L = Builder.CreateIntCast(L, VecTy, IsSigned); 11417 R = Builder.CreateIntCast(R, VecTy, IsSigned); 11418 } 11419 11420 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 11421 Value *V = Builder.CreateCmp(P0, L, R); 11422 propagateIRFlags(V, E->Scalars, VL0); 11423 // Do not cast for cmps. 11424 VecTy = cast<FixedVectorType>(V->getType()); 11425 V = FinalShuffle(V, E, VecTy, IsSigned); 11426 11427 E->VectorizedValue = V; 11428 ++NumVectorInstructions; 11429 return V; 11430 } 11431 case Instruction::Select: { 11432 setInsertPointAfterBundle(E); 11433 11434 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs); 11435 if (E->VectorizedValue) { 11436 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11437 return E->VectorizedValue; 11438 } 11439 Value *True = vectorizeOperand(E, 1, PostponedPHIs); 11440 if (E->VectorizedValue) { 11441 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11442 return E->VectorizedValue; 11443 } 11444 Value *False = vectorizeOperand(E, 2, PostponedPHIs); 11445 if (E->VectorizedValue) { 11446 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11447 return E->VectorizedValue; 11448 } 11449 if (True->getType() != False->getType()) { 11450 assert((MinBWs.contains(getOperandEntry(E, 1)) || 11451 MinBWs.contains(getOperandEntry(E, 2))) && 11452 "Expected item in MinBWs."); 11453 True = Builder.CreateIntCast(True, VecTy, IsSigned); 11454 False = Builder.CreateIntCast(False, VecTy, IsSigned); 11455 } 11456 11457 Value *V = Builder.CreateSelect(Cond, True, False); 11458 V = FinalShuffle(V, E, VecTy, IsSigned); 11459 11460 E->VectorizedValue = V; 11461 ++NumVectorInstructions; 11462 return V; 11463 } 11464 case Instruction::FNeg: { 11465 setInsertPointAfterBundle(E); 11466 11467 Value *Op = vectorizeOperand(E, 0, PostponedPHIs); 11468 11469 if (E->VectorizedValue) { 11470 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11471 return E->VectorizedValue; 11472 } 11473 11474 Value *V = Builder.CreateUnOp( 11475 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 11476 propagateIRFlags(V, E->Scalars, VL0); 11477 if (auto *I = dyn_cast<Instruction>(V)) 11478 V = propagateMetadata(I, E->Scalars); 11479 11480 V = FinalShuffle(V, E, VecTy, IsSigned); 11481 11482 E->VectorizedValue = V; 11483 ++NumVectorInstructions; 11484 11485 return V; 11486 } 11487 case Instruction::Add: 11488 case Instruction::FAdd: 11489 case Instruction::Sub: 11490 case Instruction::FSub: 11491 case Instruction::Mul: 11492 case Instruction::FMul: 11493 case Instruction::UDiv: 11494 case Instruction::SDiv: 11495 case Instruction::FDiv: 11496 case Instruction::URem: 11497 case Instruction::SRem: 11498 case Instruction::FRem: 11499 case Instruction::Shl: 11500 case Instruction::LShr: 11501 case Instruction::AShr: 11502 case Instruction::And: 11503 case Instruction::Or: 11504 case Instruction::Xor: { 11505 setInsertPointAfterBundle(E); 11506 11507 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs); 11508 if (E->VectorizedValue) { 11509 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11510 return E->VectorizedValue; 11511 } 11512 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs); 11513 if (E->VectorizedValue) { 11514 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11515 return E->VectorizedValue; 11516 } 11517 if (LHS->getType() != RHS->getType()) { 11518 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11519 MinBWs.contains(getOperandEntry(E, 1))) && 11520 "Expected item in MinBWs."); 11521 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11522 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11523 } 11524 11525 Value *V = Builder.CreateBinOp( 11526 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 11527 RHS); 11528 propagateIRFlags(V, E->Scalars, VL0, !MinBWs.contains(E)); 11529 if (auto *I = dyn_cast<Instruction>(V)) 11530 V = propagateMetadata(I, E->Scalars); 11531 11532 V = FinalShuffle(V, E, VecTy, IsSigned); 11533 11534 E->VectorizedValue = V; 11535 ++NumVectorInstructions; 11536 11537 return V; 11538 } 11539 case Instruction::Load: { 11540 // Loads are inserted at the head of the tree because we don't want to 11541 // sink them all the way down past store instructions. 11542 setInsertPointAfterBundle(E); 11543 11544 LoadInst *LI = cast<LoadInst>(VL0); 11545 Instruction *NewLI; 11546 Value *PO = LI->getPointerOperand(); 11547 if (E->State == TreeEntry::Vectorize) { 11548 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign()); 11549 } else { 11550 assert((E->State == TreeEntry::ScatterVectorize || 11551 E->State == TreeEntry::PossibleStridedVectorize) && 11552 "Unhandled state"); 11553 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs); 11554 if (E->VectorizedValue) { 11555 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11556 return E->VectorizedValue; 11557 } 11558 // Use the minimum alignment of the gathered loads. 11559 Align CommonAlignment = LI->getAlign(); 11560 for (Value *V : E->Scalars) 11561 CommonAlignment = 11562 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 11563 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 11564 } 11565 Value *V = propagateMetadata(NewLI, E->Scalars); 11566 11567 V = FinalShuffle(V, E, VecTy, IsSigned); 11568 E->VectorizedValue = V; 11569 ++NumVectorInstructions; 11570 return V; 11571 } 11572 case Instruction::Store: { 11573 auto *SI = cast<StoreInst>(VL0); 11574 11575 setInsertPointAfterBundle(E); 11576 11577 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs); 11578 VecValue = FinalShuffle(VecValue, E, VecTy, IsSigned); 11579 11580 Value *Ptr = SI->getPointerOperand(); 11581 StoreInst *ST = 11582 Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); 11583 11584 Value *V = propagateMetadata(ST, E->Scalars); 11585 11586 E->VectorizedValue = V; 11587 ++NumVectorInstructions; 11588 return V; 11589 } 11590 case Instruction::GetElementPtr: { 11591 auto *GEP0 = cast<GetElementPtrInst>(VL0); 11592 setInsertPointAfterBundle(E); 11593 11594 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs); 11595 if (E->VectorizedValue) { 11596 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11597 return E->VectorizedValue; 11598 } 11599 11600 SmallVector<Value *> OpVecs; 11601 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 11602 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs); 11603 if (E->VectorizedValue) { 11604 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11605 return E->VectorizedValue; 11606 } 11607 OpVecs.push_back(OpVec); 11608 } 11609 11610 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 11611 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) { 11612 SmallVector<Value *> GEPs; 11613 for (Value *V : E->Scalars) { 11614 if (isa<GetElementPtrInst>(V)) 11615 GEPs.push_back(V); 11616 } 11617 V = propagateMetadata(I, GEPs); 11618 } 11619 11620 V = FinalShuffle(V, E, VecTy, IsSigned); 11621 11622 E->VectorizedValue = V; 11623 ++NumVectorInstructions; 11624 11625 return V; 11626 } 11627 case Instruction::Call: { 11628 CallInst *CI = cast<CallInst>(VL0); 11629 setInsertPointAfterBundle(E); 11630 11631 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 11632 11633 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 11634 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 11635 VecCallCosts.first <= VecCallCosts.second; 11636 11637 Value *ScalarArg = nullptr; 11638 SmallVector<Value *> OpVecs; 11639 SmallVector<Type *, 2> TysForDecl; 11640 // Add return type if intrinsic is overloaded on it. 11641 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1)) 11642 TysForDecl.push_back( 11643 FixedVectorType::get(CI->getType(), E->Scalars.size())); 11644 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 11645 ValueList OpVL; 11646 // Some intrinsics have scalar arguments. This argument should not be 11647 // vectorized. 11648 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) { 11649 CallInst *CEI = cast<CallInst>(VL0); 11650 ScalarArg = CEI->getArgOperand(I); 11651 OpVecs.push_back(CEI->getArgOperand(I)); 11652 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11653 TysForDecl.push_back(ScalarArg->getType()); 11654 continue; 11655 } 11656 11657 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs); 11658 if (E->VectorizedValue) { 11659 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11660 return E->VectorizedValue; 11661 } 11662 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n"); 11663 OpVecs.push_back(OpVec); 11664 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11665 TysForDecl.push_back(OpVec->getType()); 11666 } 11667 11668 Function *CF; 11669 if (!UseIntrinsic) { 11670 VFShape Shape = 11671 VFShape::get(CI->getFunctionType(), 11672 ElementCount::getFixed( 11673 static_cast<unsigned>(VecTy->getNumElements())), 11674 false /*HasGlobalPred*/); 11675 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 11676 } else { 11677 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 11678 } 11679 11680 SmallVector<OperandBundleDef, 1> OpBundles; 11681 CI->getOperandBundlesAsDefs(OpBundles); 11682 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 11683 11684 propagateIRFlags(V, E->Scalars, VL0); 11685 V = FinalShuffle(V, E, VecTy, IsSigned); 11686 11687 E->VectorizedValue = V; 11688 ++NumVectorInstructions; 11689 return V; 11690 } 11691 case Instruction::ShuffleVector: { 11692 assert(E->isAltShuffle() && 11693 ((Instruction::isBinaryOp(E->getOpcode()) && 11694 Instruction::isBinaryOp(E->getAltOpcode())) || 11695 (Instruction::isCast(E->getOpcode()) && 11696 Instruction::isCast(E->getAltOpcode())) || 11697 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 11698 "Invalid Shuffle Vector Operand"); 11699 11700 Value *LHS = nullptr, *RHS = nullptr; 11701 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 11702 setInsertPointAfterBundle(E); 11703 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11704 if (E->VectorizedValue) { 11705 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11706 return E->VectorizedValue; 11707 } 11708 RHS = vectorizeOperand(E, 1, PostponedPHIs); 11709 } else { 11710 setInsertPointAfterBundle(E); 11711 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11712 } 11713 if (E->VectorizedValue) { 11714 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11715 return E->VectorizedValue; 11716 } 11717 if (LHS && RHS && LHS->getType() != RHS->getType()) { 11718 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11719 MinBWs.contains(getOperandEntry(E, 1))) && 11720 "Expected item in MinBWs."); 11721 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11722 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11723 } 11724 11725 Value *V0, *V1; 11726 if (Instruction::isBinaryOp(E->getOpcode())) { 11727 V0 = Builder.CreateBinOp( 11728 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 11729 V1 = Builder.CreateBinOp( 11730 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 11731 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 11732 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 11733 auto *AltCI = cast<CmpInst>(E->getAltOp()); 11734 CmpInst::Predicate AltPred = AltCI->getPredicate(); 11735 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 11736 } else { 11737 V0 = Builder.CreateCast( 11738 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 11739 V1 = Builder.CreateCast( 11740 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 11741 } 11742 // Add V0 and V1 to later analysis to try to find and remove matching 11743 // instruction, if any. 11744 for (Value *V : {V0, V1}) { 11745 if (auto *I = dyn_cast<Instruction>(V)) { 11746 GatherShuffleExtractSeq.insert(I); 11747 CSEBlocks.insert(I->getParent()); 11748 } 11749 } 11750 11751 // Create shuffle to take alternate operations from the vector. 11752 // Also, gather up main and alt scalar ops to propagate IR flags to 11753 // each vector operation. 11754 ValueList OpScalars, AltScalars; 11755 SmallVector<int> Mask; 11756 E->buildAltOpShuffleMask( 11757 [E, this](Instruction *I) { 11758 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 11759 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(), 11760 *TLI); 11761 }, 11762 Mask, &OpScalars, &AltScalars); 11763 11764 propagateIRFlags(V0, OpScalars); 11765 propagateIRFlags(V1, AltScalars); 11766 11767 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 11768 if (auto *I = dyn_cast<Instruction>(V)) { 11769 V = propagateMetadata(I, E->Scalars); 11770 GatherShuffleExtractSeq.insert(I); 11771 CSEBlocks.insert(I->getParent()); 11772 } 11773 11774 if (V->getType() != VecTy && !isa<CmpInst>(VL0)) 11775 V = Builder.CreateIntCast( 11776 V, FixedVectorType::get(ScalarTy, E->getVectorFactor()), IsSigned); 11777 E->VectorizedValue = V; 11778 ++NumVectorInstructions; 11779 11780 return V; 11781 } 11782 default: 11783 llvm_unreachable("unknown inst"); 11784 } 11785 return nullptr; 11786 } 11787 11788 Value *BoUpSLP::vectorizeTree() { 11789 ExtraValueToDebugLocsMap ExternallyUsedValues; 11790 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 11791 return vectorizeTree(ExternallyUsedValues, ReplacedExternals); 11792 } 11793 11794 namespace { 11795 /// Data type for handling buildvector sequences with the reused scalars from 11796 /// other tree entries. 11797 struct ShuffledInsertData { 11798 /// List of insertelements to be replaced by shuffles. 11799 SmallVector<InsertElementInst *> InsertElements; 11800 /// The parent vectors and shuffle mask for the given list of inserts. 11801 MapVector<Value *, SmallVector<int>> ValueMasks; 11802 }; 11803 } // namespace 11804 11805 Value *BoUpSLP::vectorizeTree( 11806 const ExtraValueToDebugLocsMap &ExternallyUsedValues, 11807 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 11808 Instruction *ReductionRoot) { 11809 // All blocks must be scheduled before any instructions are inserted. 11810 for (auto &BSIter : BlocksSchedules) { 11811 scheduleBlock(BSIter.second.get()); 11812 } 11813 // Clean Entry-to-LastInstruction table. It can be affected after scheduling, 11814 // need to rebuild it. 11815 EntryToLastInstruction.clear(); 11816 11817 if (ReductionRoot) 11818 Builder.SetInsertPoint(ReductionRoot->getParent(), 11819 ReductionRoot->getIterator()); 11820 else 11821 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11822 11823 // Postpone emission of PHIs operands to avoid cyclic dependencies issues. 11824 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true); 11825 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) 11826 if (TE->State == TreeEntry::Vectorize && 11827 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() && 11828 TE->VectorizedValue) 11829 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false); 11830 // Run through the list of postponed gathers and emit them, replacing the temp 11831 // emitted allocas with actual vector instructions. 11832 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef(); 11833 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues; 11834 for (const TreeEntry *E : PostponedNodes) { 11835 auto *TE = const_cast<TreeEntry *>(E); 11836 if (auto *VecTE = getTreeEntry(TE->Scalars.front())) 11837 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand( 11838 TE->UserTreeIndices.front().EdgeIdx))) 11839 // Found gather node which is absolutely the same as one of the 11840 // vectorized nodes. It may happen after reordering. 11841 continue; 11842 auto *PrevVec = cast<Instruction>(TE->VectorizedValue); 11843 TE->VectorizedValue = nullptr; 11844 auto *UserI = 11845 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue); 11846 // If user is a PHI node, its vector code have to be inserted right before 11847 // block terminator. Since the node was delayed, there were some unresolved 11848 // dependencies at the moment when stab instruction was emitted. In a case 11849 // when any of these dependencies turn out an operand of another PHI, coming 11850 // from this same block, position of a stab instruction will become invalid. 11851 // The is because source vector that supposed to feed this gather node was 11852 // inserted at the end of the block [after stab instruction]. So we need 11853 // to adjust insertion point again to the end of block. 11854 if (isa<PHINode>(UserI)) { 11855 // Insert before all users. 11856 Instruction *InsertPt = PrevVec->getParent()->getTerminator(); 11857 for (User *U : PrevVec->users()) { 11858 if (U == UserI) 11859 continue; 11860 auto *UI = dyn_cast<Instruction>(U); 11861 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent()) 11862 continue; 11863 if (UI->comesBefore(InsertPt)) 11864 InsertPt = UI; 11865 } 11866 Builder.SetInsertPoint(InsertPt); 11867 } else { 11868 Builder.SetInsertPoint(PrevVec); 11869 } 11870 Builder.SetCurrentDebugLocation(UserI->getDebugLoc()); 11871 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false); 11872 PrevVec->replaceAllUsesWith(Vec); 11873 PostponedValues.try_emplace(Vec).first->second.push_back(TE); 11874 // Replace the stub vector node, if it was used before for one of the 11875 // buildvector nodes already. 11876 auto It = PostponedValues.find(PrevVec); 11877 if (It != PostponedValues.end()) { 11878 for (TreeEntry *VTE : It->getSecond()) 11879 VTE->VectorizedValue = Vec; 11880 } 11881 eraseInstruction(PrevVec); 11882 } 11883 11884 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 11885 << " values .\n"); 11886 11887 SmallVector<ShuffledInsertData> ShuffledInserts; 11888 // Maps vector instruction to original insertelement instruction 11889 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 11890 // Maps extract Scalar to the corresponding extractelement instruction in the 11891 // basic block. Only one extractelement per block should be emitted. 11892 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs; 11893 SmallDenseSet<Value *, 4> UsedInserts; 11894 DenseMap<Value *, Value *> VectorCasts; 11895 SmallDenseSet<Value *, 4> ScalarsWithNullptrUser; 11896 // Extract all of the elements with the external uses. 11897 for (const auto &ExternalUse : ExternalUses) { 11898 Value *Scalar = ExternalUse.Scalar; 11899 llvm::User *User = ExternalUse.User; 11900 11901 // Skip users that we already RAUW. This happens when one instruction 11902 // has multiple uses of the same value. 11903 if (User && !is_contained(Scalar->users(), User)) 11904 continue; 11905 TreeEntry *E = getTreeEntry(Scalar); 11906 assert(E && "Invalid scalar"); 11907 assert(E->State != TreeEntry::NeedToGather && 11908 "Extracting from a gather list"); 11909 // Non-instruction pointers are not deleted, just skip them. 11910 if (E->getOpcode() == Instruction::GetElementPtr && 11911 !isa<GetElementPtrInst>(Scalar)) 11912 continue; 11913 11914 Value *Vec = E->VectorizedValue; 11915 assert(Vec && "Can't find vectorizable value"); 11916 11917 Value *Lane = Builder.getInt32(ExternalUse.Lane); 11918 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 11919 if (Scalar->getType() != Vec->getType()) { 11920 Value *Ex = nullptr; 11921 auto It = ScalarToEEs.find(Scalar); 11922 if (It != ScalarToEEs.end()) { 11923 // No need to emit many extracts, just move the only one in the 11924 // current block. 11925 auto EEIt = It->second.find(Builder.GetInsertBlock()); 11926 if (EEIt != It->second.end()) { 11927 Instruction *I = EEIt->second; 11928 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 11929 Builder.GetInsertPoint()->comesBefore(I)) 11930 I->moveBefore(*Builder.GetInsertPoint()->getParent(), 11931 Builder.GetInsertPoint()); 11932 Ex = I; 11933 } 11934 } 11935 if (!Ex) { 11936 // "Reuse" the existing extract to improve final codegen. 11937 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 11938 Value *V = ES->getVectorOperand(); 11939 if (const TreeEntry *ETE = getTreeEntry(V)) 11940 V = ETE->VectorizedValue; 11941 Ex = Builder.CreateExtractElement(V, ES->getIndexOperand()); 11942 } else { 11943 Ex = Builder.CreateExtractElement(Vec, Lane); 11944 } 11945 if (auto *I = dyn_cast<Instruction>(Ex)) 11946 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I); 11947 } 11948 // The then branch of the previous if may produce constants, since 0 11949 // operand might be a constant. 11950 if (auto *ExI = dyn_cast<Instruction>(Ex)) { 11951 GatherShuffleExtractSeq.insert(ExI); 11952 CSEBlocks.insert(ExI->getParent()); 11953 } 11954 // If necessary, sign-extend or zero-extend ScalarRoot 11955 // to the larger type. 11956 if (Scalar->getType() != Ex->getType()) 11957 return Builder.CreateIntCast(Ex, Scalar->getType(), 11958 MinBWs.find(E)->second.second); 11959 return Ex; 11960 } 11961 assert(isa<FixedVectorType>(Scalar->getType()) && 11962 isa<InsertElementInst>(Scalar) && 11963 "In-tree scalar of vector type is not insertelement?"); 11964 auto *IE = cast<InsertElementInst>(Scalar); 11965 VectorToInsertElement.try_emplace(Vec, IE); 11966 return Vec; 11967 }; 11968 // If User == nullptr, the Scalar remains as scalar in vectorized 11969 // instructions or is used as extra arg. Generate ExtractElement instruction 11970 // and update the record for this scalar in ExternallyUsedValues. 11971 if (!User) { 11972 if (!ScalarsWithNullptrUser.insert(Scalar).second) 11973 continue; 11974 assert((ExternallyUsedValues.count(Scalar) || 11975 any_of(Scalar->users(), 11976 [&](llvm::User *U) { 11977 TreeEntry *UseEntry = getTreeEntry(U); 11978 return UseEntry && 11979 UseEntry->State == TreeEntry::Vectorize && 11980 E->State == TreeEntry::Vectorize && 11981 doesInTreeUserNeedToExtract( 11982 Scalar, 11983 cast<Instruction>(UseEntry->Scalars.front()), 11984 TLI); 11985 })) && 11986 "Scalar with nullptr User must be registered in " 11987 "ExternallyUsedValues map or remain as scalar in vectorized " 11988 "instructions"); 11989 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 11990 if (auto *PHI = dyn_cast<PHINode>(VecI)) 11991 Builder.SetInsertPoint(PHI->getParent(), 11992 PHI->getParent()->getFirstNonPHIIt()); 11993 else 11994 Builder.SetInsertPoint(VecI->getParent(), 11995 std::next(VecI->getIterator())); 11996 } else { 11997 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11998 } 11999 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12000 // Required to update internally referenced instructions. 12001 Scalar->replaceAllUsesWith(NewInst); 12002 ReplacedExternals.emplace_back(Scalar, NewInst); 12003 continue; 12004 } 12005 12006 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 12007 // Skip if the scalar is another vector op or Vec is not an instruction. 12008 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 12009 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 12010 if (!UsedInserts.insert(VU).second) 12011 continue; 12012 // Need to use original vector, if the root is truncated. 12013 auto BWIt = MinBWs.find(E); 12014 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) { 12015 auto VecIt = VectorCasts.find(Scalar); 12016 if (VecIt == VectorCasts.end()) { 12017 IRBuilder<>::InsertPointGuard Guard(Builder); 12018 if (auto *IVec = dyn_cast<Instruction>(Vec)) 12019 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction()); 12020 Vec = Builder.CreateIntCast( 12021 Vec, 12022 FixedVectorType::get( 12023 cast<VectorType>(VU->getType())->getElementType(), 12024 cast<FixedVectorType>(Vec->getType())->getNumElements()), 12025 BWIt->second.second); 12026 VectorCasts.try_emplace(Scalar, Vec); 12027 } else { 12028 Vec = VecIt->second; 12029 } 12030 } 12031 12032 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 12033 if (InsertIdx) { 12034 auto *It = 12035 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 12036 // Checks if 2 insertelements are from the same buildvector. 12037 InsertElementInst *VecInsert = Data.InsertElements.front(); 12038 return areTwoInsertFromSameBuildVector( 12039 VU, VecInsert, 12040 [](InsertElementInst *II) { return II->getOperand(0); }); 12041 }); 12042 unsigned Idx = *InsertIdx; 12043 if (It == ShuffledInserts.end()) { 12044 (void)ShuffledInserts.emplace_back(); 12045 It = std::next(ShuffledInserts.begin(), 12046 ShuffledInserts.size() - 1); 12047 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12048 if (Mask.empty()) 12049 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12050 // Find the insertvector, vectorized in tree, if any. 12051 Value *Base = VU; 12052 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 12053 if (IEBase != User && 12054 (!IEBase->hasOneUse() || 12055 getInsertIndex(IEBase).value_or(Idx) == Idx)) 12056 break; 12057 // Build the mask for the vectorized insertelement instructions. 12058 if (const TreeEntry *E = getTreeEntry(IEBase)) { 12059 do { 12060 IEBase = cast<InsertElementInst>(Base); 12061 int IEIdx = *getInsertIndex(IEBase); 12062 assert(Mask[Idx] == PoisonMaskElem && 12063 "InsertElementInstruction used already."); 12064 Mask[IEIdx] = IEIdx; 12065 Base = IEBase->getOperand(0); 12066 } while (E == getTreeEntry(Base)); 12067 break; 12068 } 12069 Base = cast<InsertElementInst>(Base)->getOperand(0); 12070 // After the vectorization the def-use chain has changed, need 12071 // to look through original insertelement instructions, if they 12072 // get replaced by vector instructions. 12073 auto It = VectorToInsertElement.find(Base); 12074 if (It != VectorToInsertElement.end()) 12075 Base = It->second; 12076 } 12077 } 12078 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12079 if (Mask.empty()) 12080 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12081 Mask[Idx] = ExternalUse.Lane; 12082 It->InsertElements.push_back(cast<InsertElementInst>(User)); 12083 continue; 12084 } 12085 } 12086 } 12087 } 12088 12089 // Generate extracts for out-of-tree users. 12090 // Find the insertion point for the extractelement lane. 12091 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 12092 if (PHINode *PH = dyn_cast<PHINode>(User)) { 12093 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 12094 if (PH->getIncomingValue(I) == Scalar) { 12095 Instruction *IncomingTerminator = 12096 PH->getIncomingBlock(I)->getTerminator(); 12097 if (isa<CatchSwitchInst>(IncomingTerminator)) { 12098 Builder.SetInsertPoint(VecI->getParent(), 12099 std::next(VecI->getIterator())); 12100 } else { 12101 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator()); 12102 } 12103 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12104 PH->setOperand(I, NewInst); 12105 } 12106 } 12107 } else { 12108 Builder.SetInsertPoint(cast<Instruction>(User)); 12109 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12110 User->replaceUsesOfWith(Scalar, NewInst); 12111 } 12112 } else { 12113 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 12114 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12115 User->replaceUsesOfWith(Scalar, NewInst); 12116 } 12117 12118 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 12119 } 12120 12121 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) { 12122 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 12123 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 12124 int VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 12125 for (int I = 0, E = Mask.size(); I < E; ++I) { 12126 if (Mask[I] < VF) 12127 CombinedMask1[I] = Mask[I]; 12128 else 12129 CombinedMask2[I] = Mask[I] - VF; 12130 } 12131 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 12132 ShuffleBuilder.add(V1, CombinedMask1); 12133 if (V2) 12134 ShuffleBuilder.add(V2, CombinedMask2); 12135 return ShuffleBuilder.finalize(std::nullopt); 12136 }; 12137 12138 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask, 12139 bool ForSingleMask) { 12140 unsigned VF = Mask.size(); 12141 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 12142 if (VF != VecVF) { 12143 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 12144 Vec = CreateShuffle(Vec, nullptr, Mask); 12145 return std::make_pair(Vec, true); 12146 } 12147 if (!ForSingleMask) { 12148 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 12149 for (unsigned I = 0; I < VF; ++I) { 12150 if (Mask[I] != PoisonMaskElem) 12151 ResizeMask[Mask[I]] = Mask[I]; 12152 } 12153 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 12154 } 12155 } 12156 12157 return std::make_pair(Vec, false); 12158 }; 12159 // Perform shuffling of the vectorize tree entries for better handling of 12160 // external extracts. 12161 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 12162 // Find the first and the last instruction in the list of insertelements. 12163 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 12164 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 12165 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 12166 Builder.SetInsertPoint(LastInsert); 12167 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 12168 Value *NewInst = performExtractsShuffleAction<Value>( 12169 MutableArrayRef(Vector.data(), Vector.size()), 12170 FirstInsert->getOperand(0), 12171 [](Value *Vec) { 12172 return cast<VectorType>(Vec->getType()) 12173 ->getElementCount() 12174 .getKnownMinValue(); 12175 }, 12176 ResizeToVF, 12177 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 12178 ArrayRef<Value *> Vals) { 12179 assert((Vals.size() == 1 || Vals.size() == 2) && 12180 "Expected exactly 1 or 2 input values."); 12181 if (Vals.size() == 1) { 12182 // Do not create shuffle if the mask is a simple identity 12183 // non-resizing mask. 12184 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 12185 ->getNumElements() || 12186 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 12187 return CreateShuffle(Vals.front(), nullptr, Mask); 12188 return Vals.front(); 12189 } 12190 return CreateShuffle(Vals.front() ? Vals.front() 12191 : FirstInsert->getOperand(0), 12192 Vals.back(), Mask); 12193 }); 12194 auto It = ShuffledInserts[I].InsertElements.rbegin(); 12195 // Rebuild buildvector chain. 12196 InsertElementInst *II = nullptr; 12197 if (It != ShuffledInserts[I].InsertElements.rend()) 12198 II = *It; 12199 SmallVector<Instruction *> Inserts; 12200 while (It != ShuffledInserts[I].InsertElements.rend()) { 12201 assert(II && "Must be an insertelement instruction."); 12202 if (*It == II) 12203 ++It; 12204 else 12205 Inserts.push_back(cast<Instruction>(II)); 12206 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 12207 } 12208 for (Instruction *II : reverse(Inserts)) { 12209 II->replaceUsesOfWith(II->getOperand(0), NewInst); 12210 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 12211 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 12212 II->moveAfter(NewI); 12213 NewInst = II; 12214 } 12215 LastInsert->replaceAllUsesWith(NewInst); 12216 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 12217 IE->replaceUsesOfWith(IE->getOperand(0), 12218 PoisonValue::get(IE->getOperand(0)->getType())); 12219 IE->replaceUsesOfWith(IE->getOperand(1), 12220 PoisonValue::get(IE->getOperand(1)->getType())); 12221 eraseInstruction(IE); 12222 } 12223 CSEBlocks.insert(LastInsert->getParent()); 12224 } 12225 12226 SmallVector<Instruction *> RemovedInsts; 12227 // For each vectorized value: 12228 for (auto &TEPtr : VectorizableTree) { 12229 TreeEntry *Entry = TEPtr.get(); 12230 12231 // No need to handle users of gathered values. 12232 if (Entry->State == TreeEntry::NeedToGather) 12233 continue; 12234 12235 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 12236 12237 // For each lane: 12238 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 12239 Value *Scalar = Entry->Scalars[Lane]; 12240 12241 if (Entry->getOpcode() == Instruction::GetElementPtr && 12242 !isa<GetElementPtrInst>(Scalar)) 12243 continue; 12244 #ifndef NDEBUG 12245 Type *Ty = Scalar->getType(); 12246 if (!Ty->isVoidTy()) { 12247 for (User *U : Scalar->users()) { 12248 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 12249 12250 // It is legal to delete users in the ignorelist. 12251 assert((getTreeEntry(U) || 12252 (UserIgnoreList && UserIgnoreList->contains(U)) || 12253 (isa_and_nonnull<Instruction>(U) && 12254 isDeleted(cast<Instruction>(U)))) && 12255 "Deleting out-of-tree value"); 12256 } 12257 } 12258 #endif 12259 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 12260 eraseInstruction(cast<Instruction>(Scalar)); 12261 // Retain to-be-deleted instructions for some debug-info 12262 // bookkeeping. NOTE: eraseInstruction only marks the instruction for 12263 // deletion - instructions are not deleted until later. 12264 RemovedInsts.push_back(cast<Instruction>(Scalar)); 12265 } 12266 } 12267 12268 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the 12269 // new vector instruction. 12270 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue)) 12271 V->mergeDIAssignID(RemovedInsts); 12272 12273 Builder.ClearInsertionPoint(); 12274 InstrElementSize.clear(); 12275 12276 return VectorizableTree[0]->VectorizedValue; 12277 } 12278 12279 void BoUpSLP::optimizeGatherSequence() { 12280 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size() 12281 << " gather sequences instructions.\n"); 12282 // LICM InsertElementInst sequences. 12283 for (Instruction *I : GatherShuffleExtractSeq) { 12284 if (isDeleted(I)) 12285 continue; 12286 12287 // Check if this block is inside a loop. 12288 Loop *L = LI->getLoopFor(I->getParent()); 12289 if (!L) 12290 continue; 12291 12292 // Check if it has a preheader. 12293 BasicBlock *PreHeader = L->getLoopPreheader(); 12294 if (!PreHeader) 12295 continue; 12296 12297 // If the vector or the element that we insert into it are 12298 // instructions that are defined in this basic block then we can't 12299 // hoist this instruction. 12300 if (any_of(I->operands(), [L](Value *V) { 12301 auto *OpI = dyn_cast<Instruction>(V); 12302 return OpI && L->contains(OpI); 12303 })) 12304 continue; 12305 12306 // We can hoist this instruction. Move it to the pre-header. 12307 I->moveBefore(PreHeader->getTerminator()); 12308 CSEBlocks.insert(PreHeader); 12309 } 12310 12311 // Make a list of all reachable blocks in our CSE queue. 12312 SmallVector<const DomTreeNode *, 8> CSEWorkList; 12313 CSEWorkList.reserve(CSEBlocks.size()); 12314 for (BasicBlock *BB : CSEBlocks) 12315 if (DomTreeNode *N = DT->getNode(BB)) { 12316 assert(DT->isReachableFromEntry(N)); 12317 CSEWorkList.push_back(N); 12318 } 12319 12320 // Sort blocks by domination. This ensures we visit a block after all blocks 12321 // dominating it are visited. 12322 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 12323 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 12324 "Different nodes should have different DFS numbers"); 12325 return A->getDFSNumIn() < B->getDFSNumIn(); 12326 }); 12327 12328 // Less defined shuffles can be replaced by the more defined copies. 12329 // Between two shuffles one is less defined if it has the same vector operands 12330 // and its mask indeces are the same as in the first one or undefs. E.g. 12331 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 12332 // poison, <0, 0, 0, 0>. 12333 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 12334 SmallVectorImpl<int> &NewMask) { 12335 if (I1->getType() != I2->getType()) 12336 return false; 12337 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 12338 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 12339 if (!SI1 || !SI2) 12340 return I1->isIdenticalTo(I2); 12341 if (SI1->isIdenticalTo(SI2)) 12342 return true; 12343 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 12344 if (SI1->getOperand(I) != SI2->getOperand(I)) 12345 return false; 12346 // Check if the second instruction is more defined than the first one. 12347 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 12348 ArrayRef<int> SM1 = SI1->getShuffleMask(); 12349 // Count trailing undefs in the mask to check the final number of used 12350 // registers. 12351 unsigned LastUndefsCnt = 0; 12352 for (int I = 0, E = NewMask.size(); I < E; ++I) { 12353 if (SM1[I] == PoisonMaskElem) 12354 ++LastUndefsCnt; 12355 else 12356 LastUndefsCnt = 0; 12357 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem && 12358 NewMask[I] != SM1[I]) 12359 return false; 12360 if (NewMask[I] == PoisonMaskElem) 12361 NewMask[I] = SM1[I]; 12362 } 12363 // Check if the last undefs actually change the final number of used vector 12364 // registers. 12365 return SM1.size() - LastUndefsCnt > 1 && 12366 TTI->getNumberOfParts(SI1->getType()) == 12367 TTI->getNumberOfParts( 12368 FixedVectorType::get(SI1->getType()->getElementType(), 12369 SM1.size() - LastUndefsCnt)); 12370 }; 12371 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 12372 // instructions. TODO: We can further optimize this scan if we split the 12373 // instructions into different buckets based on the insert lane. 12374 SmallVector<Instruction *, 16> Visited; 12375 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 12376 assert(*I && 12377 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 12378 "Worklist not sorted properly!"); 12379 BasicBlock *BB = (*I)->getBlock(); 12380 // For all instructions in blocks containing gather sequences: 12381 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 12382 if (isDeleted(&In)) 12383 continue; 12384 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) && 12385 !GatherShuffleExtractSeq.contains(&In)) 12386 continue; 12387 12388 // Check if we can replace this instruction with any of the 12389 // visited instructions. 12390 bool Replaced = false; 12391 for (Instruction *&V : Visited) { 12392 SmallVector<int> NewMask; 12393 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 12394 DT->dominates(V->getParent(), In.getParent())) { 12395 In.replaceAllUsesWith(V); 12396 eraseInstruction(&In); 12397 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 12398 if (!NewMask.empty()) 12399 SI->setShuffleMask(NewMask); 12400 Replaced = true; 12401 break; 12402 } 12403 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 12404 GatherShuffleExtractSeq.contains(V) && 12405 IsIdenticalOrLessDefined(V, &In, NewMask) && 12406 DT->dominates(In.getParent(), V->getParent())) { 12407 In.moveAfter(V); 12408 V->replaceAllUsesWith(&In); 12409 eraseInstruction(V); 12410 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 12411 if (!NewMask.empty()) 12412 SI->setShuffleMask(NewMask); 12413 V = &In; 12414 Replaced = true; 12415 break; 12416 } 12417 } 12418 if (!Replaced) { 12419 assert(!is_contained(Visited, &In)); 12420 Visited.push_back(&In); 12421 } 12422 } 12423 } 12424 CSEBlocks.clear(); 12425 GatherShuffleExtractSeq.clear(); 12426 } 12427 12428 BoUpSLP::ScheduleData * 12429 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 12430 ScheduleData *Bundle = nullptr; 12431 ScheduleData *PrevInBundle = nullptr; 12432 for (Value *V : VL) { 12433 if (doesNotNeedToBeScheduled(V)) 12434 continue; 12435 ScheduleData *BundleMember = getScheduleData(V); 12436 assert(BundleMember && 12437 "no ScheduleData for bundle member " 12438 "(maybe not in same basic block)"); 12439 assert(BundleMember->isSchedulingEntity() && 12440 "bundle member already part of other bundle"); 12441 if (PrevInBundle) { 12442 PrevInBundle->NextInBundle = BundleMember; 12443 } else { 12444 Bundle = BundleMember; 12445 } 12446 12447 // Group the instructions to a bundle. 12448 BundleMember->FirstInBundle = Bundle; 12449 PrevInBundle = BundleMember; 12450 } 12451 assert(Bundle && "Failed to find schedule bundle"); 12452 return Bundle; 12453 } 12454 12455 // Groups the instructions to a bundle (which is then a single scheduling entity) 12456 // and schedules instructions until the bundle gets ready. 12457 std::optional<BoUpSLP::ScheduleData *> 12458 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 12459 const InstructionsState &S) { 12460 // No need to schedule PHIs, insertelement, extractelement and extractvalue 12461 // instructions. 12462 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 12463 doesNotNeedToSchedule(VL)) 12464 return nullptr; 12465 12466 // Initialize the instruction bundle. 12467 Instruction *OldScheduleEnd = ScheduleEnd; 12468 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 12469 12470 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 12471 ScheduleData *Bundle) { 12472 // The scheduling region got new instructions at the lower end (or it is a 12473 // new region for the first bundle). This makes it necessary to 12474 // recalculate all dependencies. 12475 // It is seldom that this needs to be done a second time after adding the 12476 // initial bundle to the region. 12477 if (ScheduleEnd != OldScheduleEnd) { 12478 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 12479 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 12480 ReSchedule = true; 12481 } 12482 if (Bundle) { 12483 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 12484 << " in block " << BB->getName() << "\n"); 12485 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 12486 } 12487 12488 if (ReSchedule) { 12489 resetSchedule(); 12490 initialFillReadyList(ReadyInsts); 12491 } 12492 12493 // Now try to schedule the new bundle or (if no bundle) just calculate 12494 // dependencies. As soon as the bundle is "ready" it means that there are no 12495 // cyclic dependencies and we can schedule it. Note that's important that we 12496 // don't "schedule" the bundle yet (see cancelScheduling). 12497 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 12498 !ReadyInsts.empty()) { 12499 ScheduleData *Picked = ReadyInsts.pop_back_val(); 12500 assert(Picked->isSchedulingEntity() && Picked->isReady() && 12501 "must be ready to schedule"); 12502 schedule(Picked, ReadyInsts); 12503 } 12504 }; 12505 12506 // Make sure that the scheduling region contains all 12507 // instructions of the bundle. 12508 for (Value *V : VL) { 12509 if (doesNotNeedToBeScheduled(V)) 12510 continue; 12511 if (!extendSchedulingRegion(V, S)) { 12512 // If the scheduling region got new instructions at the lower end (or it 12513 // is a new region for the first bundle). This makes it necessary to 12514 // recalculate all dependencies. 12515 // Otherwise the compiler may crash trying to incorrectly calculate 12516 // dependencies and emit instruction in the wrong order at the actual 12517 // scheduling. 12518 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 12519 return std::nullopt; 12520 } 12521 } 12522 12523 bool ReSchedule = false; 12524 for (Value *V : VL) { 12525 if (doesNotNeedToBeScheduled(V)) 12526 continue; 12527 ScheduleData *BundleMember = getScheduleData(V); 12528 assert(BundleMember && 12529 "no ScheduleData for bundle member (maybe not in same basic block)"); 12530 12531 // Make sure we don't leave the pieces of the bundle in the ready list when 12532 // whole bundle might not be ready. 12533 ReadyInsts.remove(BundleMember); 12534 12535 if (!BundleMember->IsScheduled) 12536 continue; 12537 // A bundle member was scheduled as single instruction before and now 12538 // needs to be scheduled as part of the bundle. We just get rid of the 12539 // existing schedule. 12540 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 12541 << " was already scheduled\n"); 12542 ReSchedule = true; 12543 } 12544 12545 auto *Bundle = buildBundle(VL); 12546 TryScheduleBundleImpl(ReSchedule, Bundle); 12547 if (!Bundle->isReady()) { 12548 cancelScheduling(VL, S.OpValue); 12549 return std::nullopt; 12550 } 12551 return Bundle; 12552 } 12553 12554 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 12555 Value *OpValue) { 12556 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 12557 doesNotNeedToSchedule(VL)) 12558 return; 12559 12560 if (doesNotNeedToBeScheduled(OpValue)) 12561 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 12562 ScheduleData *Bundle = getScheduleData(OpValue); 12563 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 12564 assert(!Bundle->IsScheduled && 12565 "Can't cancel bundle which is already scheduled"); 12566 assert(Bundle->isSchedulingEntity() && 12567 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 12568 "tried to unbundle something which is not a bundle"); 12569 12570 // Remove the bundle from the ready list. 12571 if (Bundle->isReady()) 12572 ReadyInsts.remove(Bundle); 12573 12574 // Un-bundle: make single instructions out of the bundle. 12575 ScheduleData *BundleMember = Bundle; 12576 while (BundleMember) { 12577 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 12578 BundleMember->FirstInBundle = BundleMember; 12579 ScheduleData *Next = BundleMember->NextInBundle; 12580 BundleMember->NextInBundle = nullptr; 12581 BundleMember->TE = nullptr; 12582 if (BundleMember->unscheduledDepsInBundle() == 0) { 12583 ReadyInsts.insert(BundleMember); 12584 } 12585 BundleMember = Next; 12586 } 12587 } 12588 12589 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 12590 // Allocate a new ScheduleData for the instruction. 12591 if (ChunkPos >= ChunkSize) { 12592 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 12593 ChunkPos = 0; 12594 } 12595 return &(ScheduleDataChunks.back()[ChunkPos++]); 12596 } 12597 12598 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 12599 const InstructionsState &S) { 12600 if (getScheduleData(V, isOneOf(S, V))) 12601 return true; 12602 Instruction *I = dyn_cast<Instruction>(V); 12603 assert(I && "bundle member must be an instruction"); 12604 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 12605 !doesNotNeedToBeScheduled(I) && 12606 "phi nodes/insertelements/extractelements/extractvalues don't need to " 12607 "be scheduled"); 12608 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 12609 ScheduleData *ISD = getScheduleData(I); 12610 if (!ISD) 12611 return false; 12612 assert(isInSchedulingRegion(ISD) && 12613 "ScheduleData not in scheduling region"); 12614 ScheduleData *SD = allocateScheduleDataChunks(); 12615 SD->Inst = I; 12616 SD->init(SchedulingRegionID, S.OpValue); 12617 ExtraScheduleDataMap[I][S.OpValue] = SD; 12618 return true; 12619 }; 12620 if (CheckScheduleForI(I)) 12621 return true; 12622 if (!ScheduleStart) { 12623 // It's the first instruction in the new region. 12624 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 12625 ScheduleStart = I; 12626 ScheduleEnd = I->getNextNode(); 12627 if (isOneOf(S, I) != I) 12628 CheckScheduleForI(I); 12629 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12630 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 12631 return true; 12632 } 12633 // Search up and down at the same time, because we don't know if the new 12634 // instruction is above or below the existing scheduling region. 12635 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted 12636 // against the budget. Otherwise debug info could affect codegen. 12637 BasicBlock::reverse_iterator UpIter = 12638 ++ScheduleStart->getIterator().getReverse(); 12639 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 12640 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 12641 BasicBlock::iterator LowerEnd = BB->end(); 12642 auto IsAssumeLikeIntr = [](const Instruction &I) { 12643 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 12644 return II->isAssumeLikeIntrinsic(); 12645 return false; 12646 }; 12647 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12648 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12649 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 12650 &*DownIter != I) { 12651 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 12652 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 12653 return false; 12654 } 12655 12656 ++UpIter; 12657 ++DownIter; 12658 12659 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12660 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12661 } 12662 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 12663 assert(I->getParent() == ScheduleStart->getParent() && 12664 "Instruction is in wrong basic block."); 12665 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 12666 ScheduleStart = I; 12667 if (isOneOf(S, I) != I) 12668 CheckScheduleForI(I); 12669 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 12670 << "\n"); 12671 return true; 12672 } 12673 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 12674 "Expected to reach top of the basic block or instruction down the " 12675 "lower end."); 12676 assert(I->getParent() == ScheduleEnd->getParent() && 12677 "Instruction is in wrong basic block."); 12678 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 12679 nullptr); 12680 ScheduleEnd = I->getNextNode(); 12681 if (isOneOf(S, I) != I) 12682 CheckScheduleForI(I); 12683 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12684 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 12685 return true; 12686 } 12687 12688 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 12689 Instruction *ToI, 12690 ScheduleData *PrevLoadStore, 12691 ScheduleData *NextLoadStore) { 12692 ScheduleData *CurrentLoadStore = PrevLoadStore; 12693 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 12694 // No need to allocate data for non-schedulable instructions. 12695 if (doesNotNeedToBeScheduled(I)) 12696 continue; 12697 ScheduleData *SD = ScheduleDataMap.lookup(I); 12698 if (!SD) { 12699 SD = allocateScheduleDataChunks(); 12700 ScheduleDataMap[I] = SD; 12701 SD->Inst = I; 12702 } 12703 assert(!isInSchedulingRegion(SD) && 12704 "new ScheduleData already in scheduling region"); 12705 SD->init(SchedulingRegionID, I); 12706 12707 if (I->mayReadOrWriteMemory() && 12708 (!isa<IntrinsicInst>(I) || 12709 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 12710 cast<IntrinsicInst>(I)->getIntrinsicID() != 12711 Intrinsic::pseudoprobe))) { 12712 // Update the linked list of memory accessing instructions. 12713 if (CurrentLoadStore) { 12714 CurrentLoadStore->NextLoadStore = SD; 12715 } else { 12716 FirstLoadStoreInRegion = SD; 12717 } 12718 CurrentLoadStore = SD; 12719 } 12720 12721 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12722 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12723 RegionHasStackSave = true; 12724 } 12725 if (NextLoadStore) { 12726 if (CurrentLoadStore) 12727 CurrentLoadStore->NextLoadStore = NextLoadStore; 12728 } else { 12729 LastLoadStoreInRegion = CurrentLoadStore; 12730 } 12731 } 12732 12733 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 12734 bool InsertInReadyList, 12735 BoUpSLP *SLP) { 12736 assert(SD->isSchedulingEntity()); 12737 12738 SmallVector<ScheduleData *, 10> WorkList; 12739 WorkList.push_back(SD); 12740 12741 while (!WorkList.empty()) { 12742 ScheduleData *SD = WorkList.pop_back_val(); 12743 for (ScheduleData *BundleMember = SD; BundleMember; 12744 BundleMember = BundleMember->NextInBundle) { 12745 assert(isInSchedulingRegion(BundleMember)); 12746 if (BundleMember->hasValidDependencies()) 12747 continue; 12748 12749 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 12750 << "\n"); 12751 BundleMember->Dependencies = 0; 12752 BundleMember->resetUnscheduledDeps(); 12753 12754 // Handle def-use chain dependencies. 12755 if (BundleMember->OpValue != BundleMember->Inst) { 12756 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 12757 BundleMember->Dependencies++; 12758 ScheduleData *DestBundle = UseSD->FirstInBundle; 12759 if (!DestBundle->IsScheduled) 12760 BundleMember->incrementUnscheduledDeps(1); 12761 if (!DestBundle->hasValidDependencies()) 12762 WorkList.push_back(DestBundle); 12763 } 12764 } else { 12765 for (User *U : BundleMember->Inst->users()) { 12766 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 12767 BundleMember->Dependencies++; 12768 ScheduleData *DestBundle = UseSD->FirstInBundle; 12769 if (!DestBundle->IsScheduled) 12770 BundleMember->incrementUnscheduledDeps(1); 12771 if (!DestBundle->hasValidDependencies()) 12772 WorkList.push_back(DestBundle); 12773 } 12774 } 12775 } 12776 12777 auto MakeControlDependent = [&](Instruction *I) { 12778 auto *DepDest = getScheduleData(I); 12779 assert(DepDest && "must be in schedule window"); 12780 DepDest->ControlDependencies.push_back(BundleMember); 12781 BundleMember->Dependencies++; 12782 ScheduleData *DestBundle = DepDest->FirstInBundle; 12783 if (!DestBundle->IsScheduled) 12784 BundleMember->incrementUnscheduledDeps(1); 12785 if (!DestBundle->hasValidDependencies()) 12786 WorkList.push_back(DestBundle); 12787 }; 12788 12789 // Any instruction which isn't safe to speculate at the beginning of the 12790 // block is control dependend on any early exit or non-willreturn call 12791 // which proceeds it. 12792 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 12793 for (Instruction *I = BundleMember->Inst->getNextNode(); 12794 I != ScheduleEnd; I = I->getNextNode()) { 12795 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC)) 12796 continue; 12797 12798 // Add the dependency 12799 MakeControlDependent(I); 12800 12801 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 12802 // Everything past here must be control dependent on I. 12803 break; 12804 } 12805 } 12806 12807 if (RegionHasStackSave) { 12808 // If we have an inalloc alloca instruction, it needs to be scheduled 12809 // after any preceeding stacksave. We also need to prevent any alloca 12810 // from reordering above a preceeding stackrestore. 12811 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 12812 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 12813 for (Instruction *I = BundleMember->Inst->getNextNode(); 12814 I != ScheduleEnd; I = I->getNextNode()) { 12815 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12816 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12817 // Any allocas past here must be control dependent on I, and I 12818 // must be memory dependend on BundleMember->Inst. 12819 break; 12820 12821 if (!isa<AllocaInst>(I)) 12822 continue; 12823 12824 // Add the dependency 12825 MakeControlDependent(I); 12826 } 12827 } 12828 12829 // In addition to the cases handle just above, we need to prevent 12830 // allocas and loads/stores from moving below a stacksave or a 12831 // stackrestore. Avoiding moving allocas below stackrestore is currently 12832 // thought to be conservatism. Moving loads/stores below a stackrestore 12833 // can lead to incorrect code. 12834 if (isa<AllocaInst>(BundleMember->Inst) || 12835 BundleMember->Inst->mayReadOrWriteMemory()) { 12836 for (Instruction *I = BundleMember->Inst->getNextNode(); 12837 I != ScheduleEnd; I = I->getNextNode()) { 12838 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 12839 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12840 continue; 12841 12842 // Add the dependency 12843 MakeControlDependent(I); 12844 break; 12845 } 12846 } 12847 } 12848 12849 // Handle the memory dependencies (if any). 12850 ScheduleData *DepDest = BundleMember->NextLoadStore; 12851 if (!DepDest) 12852 continue; 12853 Instruction *SrcInst = BundleMember->Inst; 12854 assert(SrcInst->mayReadOrWriteMemory() && 12855 "NextLoadStore list for non memory effecting bundle?"); 12856 MemoryLocation SrcLoc = getLocation(SrcInst); 12857 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 12858 unsigned NumAliased = 0; 12859 unsigned DistToSrc = 1; 12860 12861 for (; DepDest; DepDest = DepDest->NextLoadStore) { 12862 assert(isInSchedulingRegion(DepDest)); 12863 12864 // We have two limits to reduce the complexity: 12865 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 12866 // SLP->isAliased (which is the expensive part in this loop). 12867 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 12868 // the whole loop (even if the loop is fast, it's quadratic). 12869 // It's important for the loop break condition (see below) to 12870 // check this limit even between two read-only instructions. 12871 if (DistToSrc >= MaxMemDepDistance || 12872 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 12873 (NumAliased >= AliasedCheckLimit || 12874 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 12875 12876 // We increment the counter only if the locations are aliased 12877 // (instead of counting all alias checks). This gives a better 12878 // balance between reduced runtime and accurate dependencies. 12879 NumAliased++; 12880 12881 DepDest->MemoryDependencies.push_back(BundleMember); 12882 BundleMember->Dependencies++; 12883 ScheduleData *DestBundle = DepDest->FirstInBundle; 12884 if (!DestBundle->IsScheduled) { 12885 BundleMember->incrementUnscheduledDeps(1); 12886 } 12887 if (!DestBundle->hasValidDependencies()) { 12888 WorkList.push_back(DestBundle); 12889 } 12890 } 12891 12892 // Example, explaining the loop break condition: Let's assume our 12893 // starting instruction is i0 and MaxMemDepDistance = 3. 12894 // 12895 // +--------v--v--v 12896 // i0,i1,i2,i3,i4,i5,i6,i7,i8 12897 // +--------^--^--^ 12898 // 12899 // MaxMemDepDistance let us stop alias-checking at i3 and we add 12900 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 12901 // Previously we already added dependencies from i3 to i6,i7,i8 12902 // (because of MaxMemDepDistance). As we added a dependency from 12903 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 12904 // and we can abort this loop at i6. 12905 if (DistToSrc >= 2 * MaxMemDepDistance) 12906 break; 12907 DistToSrc++; 12908 } 12909 } 12910 if (InsertInReadyList && SD->isReady()) { 12911 ReadyInsts.insert(SD); 12912 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 12913 << "\n"); 12914 } 12915 } 12916 } 12917 12918 void BoUpSLP::BlockScheduling::resetSchedule() { 12919 assert(ScheduleStart && 12920 "tried to reset schedule on block which has not been scheduled"); 12921 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 12922 doForAllOpcodes(I, [&](ScheduleData *SD) { 12923 assert(isInSchedulingRegion(SD) && 12924 "ScheduleData not in scheduling region"); 12925 SD->IsScheduled = false; 12926 SD->resetUnscheduledDeps(); 12927 }); 12928 } 12929 ReadyInsts.clear(); 12930 } 12931 12932 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 12933 if (!BS->ScheduleStart) 12934 return; 12935 12936 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 12937 12938 // A key point - if we got here, pre-scheduling was able to find a valid 12939 // scheduling of the sub-graph of the scheduling window which consists 12940 // of all vector bundles and their transitive users. As such, we do not 12941 // need to reschedule anything *outside of* that subgraph. 12942 12943 BS->resetSchedule(); 12944 12945 // For the real scheduling we use a more sophisticated ready-list: it is 12946 // sorted by the original instruction location. This lets the final schedule 12947 // be as close as possible to the original instruction order. 12948 // WARNING: If changing this order causes a correctness issue, that means 12949 // there is some missing dependence edge in the schedule data graph. 12950 struct ScheduleDataCompare { 12951 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 12952 return SD2->SchedulingPriority < SD1->SchedulingPriority; 12953 } 12954 }; 12955 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 12956 12957 // Ensure that all dependency data is updated (for nodes in the sub-graph) 12958 // and fill the ready-list with initial instructions. 12959 int Idx = 0; 12960 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 12961 I = I->getNextNode()) { 12962 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 12963 TreeEntry *SDTE = getTreeEntry(SD->Inst); 12964 (void)SDTE; 12965 assert((isVectorLikeInstWithConstOps(SD->Inst) || 12966 SD->isPartOfBundle() == 12967 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 12968 "scheduler and vectorizer bundle mismatch"); 12969 SD->FirstInBundle->SchedulingPriority = Idx++; 12970 12971 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 12972 BS->calculateDependencies(SD, false, this); 12973 }); 12974 } 12975 BS->initialFillReadyList(ReadyInsts); 12976 12977 Instruction *LastScheduledInst = BS->ScheduleEnd; 12978 12979 // Do the "real" scheduling. 12980 while (!ReadyInsts.empty()) { 12981 ScheduleData *Picked = *ReadyInsts.begin(); 12982 ReadyInsts.erase(ReadyInsts.begin()); 12983 12984 // Move the scheduled instruction(s) to their dedicated places, if not 12985 // there yet. 12986 for (ScheduleData *BundleMember = Picked; BundleMember; 12987 BundleMember = BundleMember->NextInBundle) { 12988 Instruction *PickedInst = BundleMember->Inst; 12989 if (PickedInst->getNextNode() != LastScheduledInst) 12990 PickedInst->moveBefore(LastScheduledInst); 12991 LastScheduledInst = PickedInst; 12992 } 12993 12994 BS->schedule(Picked, ReadyInsts); 12995 } 12996 12997 // Check that we didn't break any of our invariants. 12998 #ifdef EXPENSIVE_CHECKS 12999 BS->verify(); 13000 #endif 13001 13002 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 13003 // Check that all schedulable entities got scheduled 13004 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 13005 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 13006 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 13007 assert(SD->IsScheduled && "must be scheduled at this point"); 13008 } 13009 }); 13010 } 13011 #endif 13012 13013 // Avoid duplicate scheduling of the block. 13014 BS->ScheduleStart = nullptr; 13015 } 13016 13017 unsigned BoUpSLP::getVectorElementSize(Value *V) { 13018 // If V is a store, just return the width of the stored value (or value 13019 // truncated just before storing) without traversing the expression tree. 13020 // This is the common case. 13021 if (auto *Store = dyn_cast<StoreInst>(V)) 13022 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 13023 13024 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 13025 return getVectorElementSize(IEI->getOperand(1)); 13026 13027 auto E = InstrElementSize.find(V); 13028 if (E != InstrElementSize.end()) 13029 return E->second; 13030 13031 // If V is not a store, we can traverse the expression tree to find loads 13032 // that feed it. The type of the loaded value may indicate a more suitable 13033 // width than V's type. We want to base the vector element size on the width 13034 // of memory operations where possible. 13035 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 13036 SmallPtrSet<Instruction *, 16> Visited; 13037 if (auto *I = dyn_cast<Instruction>(V)) { 13038 Worklist.emplace_back(I, I->getParent()); 13039 Visited.insert(I); 13040 } 13041 13042 // Traverse the expression tree in bottom-up order looking for loads. If we 13043 // encounter an instruction we don't yet handle, we give up. 13044 auto Width = 0u; 13045 while (!Worklist.empty()) { 13046 Instruction *I; 13047 BasicBlock *Parent; 13048 std::tie(I, Parent) = Worklist.pop_back_val(); 13049 13050 // We should only be looking at scalar instructions here. If the current 13051 // instruction has a vector type, skip. 13052 auto *Ty = I->getType(); 13053 if (isa<VectorType>(Ty)) 13054 continue; 13055 13056 // If the current instruction is a load, update MaxWidth to reflect the 13057 // width of the loaded value. 13058 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I)) 13059 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 13060 13061 // Otherwise, we need to visit the operands of the instruction. We only 13062 // handle the interesting cases from buildTree here. If an operand is an 13063 // instruction we haven't yet visited and from the same basic block as the 13064 // user or the use is a PHI node, we add it to the worklist. 13065 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst, 13066 BinaryOperator, UnaryOperator>(I)) { 13067 for (Use &U : I->operands()) 13068 if (auto *J = dyn_cast<Instruction>(U.get())) 13069 if (Visited.insert(J).second && 13070 (isa<PHINode>(I) || J->getParent() == Parent)) 13071 Worklist.emplace_back(J, J->getParent()); 13072 } else { 13073 break; 13074 } 13075 } 13076 13077 // If we didn't encounter a memory access in the expression tree, or if we 13078 // gave up for some reason, just return the width of V. Otherwise, return the 13079 // maximum width we found. 13080 if (!Width) { 13081 if (auto *CI = dyn_cast<CmpInst>(V)) 13082 V = CI->getOperand(0); 13083 Width = DL->getTypeSizeInBits(V->getType()); 13084 } 13085 13086 for (Instruction *I : Visited) 13087 InstrElementSize[I] = Width; 13088 13089 return Width; 13090 } 13091 13092 // Determine if a value V in a vectorizable expression Expr can be demoted to a 13093 // smaller type with a truncation. We collect the values that will be demoted 13094 // in ToDemote and additional roots that require investigating in Roots. 13095 bool BoUpSLP::collectValuesToDemote( 13096 Value *V, SmallVectorImpl<Value *> &ToDemote, 13097 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 13098 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const { 13099 // We can always demote constants. 13100 if (isa<Constant>(V)) 13101 return true; 13102 13103 // If the value is not a vectorized instruction in the expression and not used 13104 // by the insertelement instruction and not used in multiple vector nodes, it 13105 // cannot be demoted. 13106 auto *I = dyn_cast<Instruction>(V); 13107 if (!I || !getTreeEntry(I) || MultiNodeScalars.contains(I) || 13108 !Visited.insert(I).second || all_of(I->users(), [&](User *U) { 13109 return isa<InsertElementInst>(U) && !getTreeEntry(U); 13110 })) 13111 return false; 13112 13113 unsigned Start = 0; 13114 unsigned End = I->getNumOperands(); 13115 switch (I->getOpcode()) { 13116 13117 // We can always demote truncations and extensions. Since truncations can 13118 // seed additional demotion, we save the truncated value. 13119 case Instruction::Trunc: 13120 Roots.push_back(I->getOperand(0)); 13121 break; 13122 case Instruction::ZExt: 13123 case Instruction::SExt: 13124 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0))) 13125 return false; 13126 break; 13127 13128 // We can demote certain binary operations if we can demote both of their 13129 // operands. 13130 case Instruction::Add: 13131 case Instruction::Sub: 13132 case Instruction::Mul: 13133 case Instruction::And: 13134 case Instruction::Or: 13135 case Instruction::Xor: 13136 if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots, 13137 Visited) || 13138 !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots, 13139 Visited)) 13140 return false; 13141 break; 13142 13143 // We can demote selects if we can demote their true and false values. 13144 case Instruction::Select: { 13145 Start = 1; 13146 SelectInst *SI = cast<SelectInst>(I); 13147 if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts, 13148 Roots, Visited) || 13149 !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts, 13150 Roots, Visited)) 13151 return false; 13152 break; 13153 } 13154 13155 // We can demote phis if we can demote all their incoming operands. Note that 13156 // we don't need to worry about cycles since we ensure single use above. 13157 case Instruction::PHI: { 13158 PHINode *PN = cast<PHINode>(I); 13159 for (Value *IncValue : PN->incoming_values()) 13160 if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots, 13161 Visited)) 13162 return false; 13163 break; 13164 } 13165 13166 // Otherwise, conservatively give up. 13167 default: 13168 return false; 13169 } 13170 13171 // Gather demoted constant operands. 13172 for (unsigned Idx : seq<unsigned>(Start, End)) 13173 if (isa<Constant>(I->getOperand(Idx))) 13174 DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx); 13175 // Record the value that we can demote. 13176 ToDemote.push_back(V); 13177 return true; 13178 } 13179 13180 void BoUpSLP::computeMinimumValueSizes() { 13181 // We only attempt to truncate integer expressions. 13182 auto &TreeRoot = VectorizableTree[0]->Scalars; 13183 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 13184 if (!TreeRootIT) 13185 return; 13186 13187 // Ensure the roots of the vectorizable tree don't form a cycle. 13188 if (!VectorizableTree.front()->UserTreeIndices.empty()) 13189 return; 13190 13191 // Conservatively determine if we can actually truncate the roots of the 13192 // expression. Collect the values that can be demoted in ToDemote and 13193 // additional roots that require investigating in Roots. 13194 SmallVector<Value *, 32> ToDemote; 13195 DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts; 13196 SmallVector<Value *, 4> Roots; 13197 for (auto *Root : TreeRoot) { 13198 DenseSet<Value *> Visited; 13199 if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited)) 13200 return; 13201 } 13202 13203 // The maximum bit width required to represent all the values that can be 13204 // demoted without loss of precision. It would be safe to truncate the roots 13205 // of the expression to this width. 13206 auto MaxBitWidth = 1u; 13207 13208 // We first check if all the bits of the roots are demanded. If they're not, 13209 // we can truncate the roots to this narrower type. 13210 for (auto *Root : TreeRoot) { 13211 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 13212 MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(), 13213 MaxBitWidth); 13214 } 13215 13216 // True if the roots can be zero-extended back to their original type, rather 13217 // than sign-extended. We know that if the leading bits are not demanded, we 13218 // can safely zero-extend. So we initialize IsKnownPositive to True. 13219 bool IsKnownPositive = true; 13220 13221 // If all the bits of the roots are demanded, we can try a little harder to 13222 // compute a narrower type. This can happen, for example, if the roots are 13223 // getelementptr indices. InstCombine promotes these indices to the pointer 13224 // width. Thus, all their bits are technically demanded even though the 13225 // address computation might be vectorized in a smaller type. 13226 // 13227 // We start by looking at each entry that can be demoted. We compute the 13228 // maximum bit width required to store the scalar by using ValueTracking to 13229 // compute the number of high-order bits we can truncate. 13230 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 13231 all_of(TreeRoot, [](Value *V) { 13232 return all_of(V->users(), 13233 [](User *U) { return isa<GetElementPtrInst>(U); }); 13234 })) { 13235 MaxBitWidth = 8u; 13236 13237 // Determine if the sign bit of all the roots is known to be zero. If not, 13238 // IsKnownPositive is set to False. 13239 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 13240 KnownBits Known = computeKnownBits(R, *DL); 13241 return Known.isNonNegative(); 13242 }); 13243 13244 // Determine the maximum number of bits required to store the scalar 13245 // values. 13246 for (auto *Scalar : ToDemote) { 13247 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 13248 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 13249 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 13250 } 13251 13252 // If we can't prove that the sign bit is zero, we must add one to the 13253 // maximum bit width to account for the unknown sign bit. This preserves 13254 // the existing sign bit so we can safely sign-extend the root back to the 13255 // original type. Otherwise, if we know the sign bit is zero, we will 13256 // zero-extend the root instead. 13257 // 13258 // FIXME: This is somewhat suboptimal, as there will be cases where adding 13259 // one to the maximum bit width will yield a larger-than-necessary 13260 // type. In general, we need to add an extra bit only if we can't 13261 // prove that the upper bit of the original type is equal to the 13262 // upper bit of the proposed smaller type. If these two bits are the 13263 // same (either zero or one) we know that sign-extending from the 13264 // smaller type will result in the same value. Here, since we can't 13265 // yet prove this, we are just making the proposed smaller type 13266 // larger to ensure correctness. 13267 if (!IsKnownPositive) 13268 ++MaxBitWidth; 13269 } 13270 13271 // Round MaxBitWidth up to the next power-of-two. 13272 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 13273 13274 // If the maximum bit width we compute is less than the with of the roots' 13275 // type, we can proceed with the narrowing. Otherwise, do nothing. 13276 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 13277 return; 13278 13279 // If we can truncate the root, we must collect additional values that might 13280 // be demoted as a result. That is, those seeded by truncations we will 13281 // modify. 13282 while (!Roots.empty()) { 13283 DenseSet<Value *> Visited; 13284 collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots, 13285 Visited); 13286 } 13287 13288 // Finally, map the values we can demote to the maximum bit with we computed. 13289 for (auto *Scalar : ToDemote) { 13290 auto *TE = getTreeEntry(Scalar); 13291 assert(TE && "Expected vectorized scalar."); 13292 if (MinBWs.contains(TE)) 13293 continue; 13294 bool IsSigned = any_of(TE->Scalars, [&](Value *R) { 13295 KnownBits Known = computeKnownBits(R, *DL); 13296 return !Known.isNonNegative(); 13297 }); 13298 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned); 13299 const auto *I = cast<Instruction>(Scalar); 13300 auto DCIt = DemotedConsts.find(I); 13301 if (DCIt != DemotedConsts.end()) { 13302 for (unsigned Idx : DCIt->getSecond()) { 13303 // Check that all instructions operands are demoted. 13304 if (all_of(TE->Scalars, [&](Value *V) { 13305 auto SIt = DemotedConsts.find(cast<Instruction>(V)); 13306 return SIt != DemotedConsts.end() && 13307 is_contained(SIt->getSecond(), Idx); 13308 })) { 13309 const TreeEntry *CTE = getOperandEntry(TE, Idx); 13310 MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned); 13311 } 13312 } 13313 } 13314 } 13315 } 13316 13317 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 13318 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 13319 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 13320 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 13321 auto *AA = &AM.getResult<AAManager>(F); 13322 auto *LI = &AM.getResult<LoopAnalysis>(F); 13323 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 13324 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 13325 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 13326 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 13327 13328 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 13329 if (!Changed) 13330 return PreservedAnalyses::all(); 13331 13332 PreservedAnalyses PA; 13333 PA.preserveSet<CFGAnalyses>(); 13334 return PA; 13335 } 13336 13337 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 13338 TargetTransformInfo *TTI_, 13339 TargetLibraryInfo *TLI_, AAResults *AA_, 13340 LoopInfo *LI_, DominatorTree *DT_, 13341 AssumptionCache *AC_, DemandedBits *DB_, 13342 OptimizationRemarkEmitter *ORE_) { 13343 if (!RunSLPVectorization) 13344 return false; 13345 SE = SE_; 13346 TTI = TTI_; 13347 TLI = TLI_; 13348 AA = AA_; 13349 LI = LI_; 13350 DT = DT_; 13351 AC = AC_; 13352 DB = DB_; 13353 DL = &F.getParent()->getDataLayout(); 13354 13355 Stores.clear(); 13356 GEPs.clear(); 13357 bool Changed = false; 13358 13359 // If the target claims to have no vector registers don't attempt 13360 // vectorization. 13361 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 13362 LLVM_DEBUG( 13363 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 13364 return false; 13365 } 13366 13367 // Don't vectorize when the attribute NoImplicitFloat is used. 13368 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 13369 return false; 13370 13371 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 13372 13373 // Use the bottom up slp vectorizer to construct chains that start with 13374 // store instructions. 13375 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 13376 13377 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 13378 // delete instructions. 13379 13380 // Update DFS numbers now so that we can use them for ordering. 13381 DT->updateDFSNumbers(); 13382 13383 // Scan the blocks in the function in post order. 13384 for (auto *BB : post_order(&F.getEntryBlock())) { 13385 // Start new block - clear the list of reduction roots. 13386 R.clearReductionData(); 13387 collectSeedInstructions(BB); 13388 13389 // Vectorize trees that end at stores. 13390 if (!Stores.empty()) { 13391 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 13392 << " underlying objects.\n"); 13393 Changed |= vectorizeStoreChains(R); 13394 } 13395 13396 // Vectorize trees that end at reductions. 13397 Changed |= vectorizeChainsInBlock(BB, R); 13398 13399 // Vectorize the index computations of getelementptr instructions. This 13400 // is primarily intended to catch gather-like idioms ending at 13401 // non-consecutive loads. 13402 if (!GEPs.empty()) { 13403 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 13404 << " underlying objects.\n"); 13405 Changed |= vectorizeGEPIndices(BB, R); 13406 } 13407 } 13408 13409 if (Changed) { 13410 R.optimizeGatherSequence(); 13411 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 13412 } 13413 return Changed; 13414 } 13415 13416 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 13417 unsigned Idx, unsigned MinVF) { 13418 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 13419 << "\n"); 13420 const unsigned Sz = R.getVectorElementSize(Chain[0]); 13421 unsigned VF = Chain.size(); 13422 13423 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 13424 return false; 13425 13426 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 13427 << "\n"); 13428 13429 R.buildTree(Chain); 13430 if (R.isTreeTinyAndNotFullyVectorizable()) 13431 return false; 13432 if (R.isLoadCombineCandidate()) 13433 return false; 13434 R.reorderTopToBottom(); 13435 R.reorderBottomToTop(); 13436 R.buildExternalUses(); 13437 13438 R.computeMinimumValueSizes(); 13439 13440 InstructionCost Cost = R.getTreeCost(); 13441 13442 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n"); 13443 if (Cost < -SLPCostThreshold) { 13444 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 13445 13446 using namespace ore; 13447 13448 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 13449 cast<StoreInst>(Chain[0])) 13450 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 13451 << " and with tree size " 13452 << NV("TreeSize", R.getTreeSize())); 13453 13454 R.vectorizeTree(); 13455 return true; 13456 } 13457 13458 return false; 13459 } 13460 13461 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 13462 BoUpSLP &R) { 13463 // We may run into multiple chains that merge into a single chain. We mark the 13464 // stores that we vectorized so that we don't visit the same store twice. 13465 BoUpSLP::ValueSet VectorizedStores; 13466 bool Changed = false; 13467 13468 // Stores the pair of stores (first_store, last_store) in a range, that were 13469 // already tried to be vectorized. Allows to skip the store ranges that were 13470 // already tried to be vectorized but the attempts were unsuccessful. 13471 DenseSet<std::pair<Value *, Value *>> TriedSequences; 13472 struct StoreDistCompare { 13473 bool operator()(const std::pair<unsigned, int> &Op1, 13474 const std::pair<unsigned, int> &Op2) const { 13475 return Op1.second < Op2.second; 13476 } 13477 }; 13478 // A set of pairs (index of store in Stores array ref, Distance of the store 13479 // address relative to base store address in units). 13480 using StoreIndexToDistSet = 13481 std::set<std::pair<unsigned, int>, StoreDistCompare>; 13482 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) { 13483 int PrevDist = -1; 13484 BoUpSLP::ValueList Operands; 13485 // Collect the chain into a list. 13486 for (auto [Idx, Data] : enumerate(Set)) { 13487 if (Operands.empty() || Data.second - PrevDist == 1) { 13488 Operands.push_back(Stores[Data.first]); 13489 PrevDist = Data.second; 13490 if (Idx != Set.size() - 1) 13491 continue; 13492 } 13493 if (Operands.size() <= 1) { 13494 Operands.clear(); 13495 Operands.push_back(Stores[Data.first]); 13496 PrevDist = Data.second; 13497 continue; 13498 } 13499 13500 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 13501 unsigned EltSize = R.getVectorElementSize(Operands[0]); 13502 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize); 13503 13504 unsigned MaxVF = 13505 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts); 13506 auto *Store = cast<StoreInst>(Operands[0]); 13507 Type *StoreTy = Store->getValueOperand()->getType(); 13508 Type *ValueTy = StoreTy; 13509 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 13510 ValueTy = Trunc->getSrcTy(); 13511 unsigned MinVF = TTI->getStoreMinimumVF( 13512 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 13513 13514 if (MaxVF <= MinVF) { 13515 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF 13516 << ") <= " 13517 << "MinVF (" << MinVF << ")\n"); 13518 } 13519 13520 // FIXME: Is division-by-2 the correct step? Should we assert that the 13521 // register size is a power-of-2? 13522 unsigned StartIdx = 0; 13523 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 13524 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 13525 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size); 13526 assert( 13527 all_of( 13528 Slice, 13529 [&](Value *V) { 13530 return cast<StoreInst>(V)->getValueOperand()->getType() == 13531 cast<StoreInst>(Slice.front()) 13532 ->getValueOperand() 13533 ->getType(); 13534 }) && 13535 "Expected all operands of same type."); 13536 if (!VectorizedStores.count(Slice.front()) && 13537 !VectorizedStores.count(Slice.back()) && 13538 TriedSequences.insert(std::make_pair(Slice.front(), Slice.back())) 13539 .second && 13540 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 13541 // Mark the vectorized stores so that we don't vectorize them again. 13542 VectorizedStores.insert(Slice.begin(), Slice.end()); 13543 Changed = true; 13544 // If we vectorized initial block, no need to try to vectorize it 13545 // again. 13546 if (Cnt == StartIdx) 13547 StartIdx += Size; 13548 Cnt += Size; 13549 continue; 13550 } 13551 ++Cnt; 13552 } 13553 // Check if the whole array was vectorized already - exit. 13554 if (StartIdx >= Operands.size()) 13555 break; 13556 } 13557 Operands.clear(); 13558 Operands.push_back(Stores[Data.first]); 13559 PrevDist = Data.second; 13560 } 13561 }; 13562 13563 // Stores pair (first: index of the store into Stores array ref, address of 13564 // which taken as base, second: sorted set of pairs {index, dist}, which are 13565 // indices of stores in the set and their store location distances relative to 13566 // the base address). 13567 13568 // Need to store the index of the very first store separately, since the set 13569 // may be reordered after the insertion and the first store may be moved. This 13570 // container allows to reduce number of calls of getPointersDiff() function. 13571 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores; 13572 // Inserts the specified store SI with the given index Idx to the set of the 13573 // stores. If the store with the same distance is found already - stop 13574 // insertion, try to vectorize already found stores. If some stores from this 13575 // sequence were not vectorized - try to vectorize them with the new store 13576 // later. But this logic is applied only to the stores, that come before the 13577 // previous store with the same distance. 13578 // Example: 13579 // 1. store x, %p 13580 // 2. store y, %p+1 13581 // 3. store z, %p+2 13582 // 4. store a, %p 13583 // 5. store b, %p+3 13584 // - Scan this from the last to first store. The very first bunch of stores is 13585 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores 13586 // vector). 13587 // - The next store in the list - #1 - has the same distance from store #5 as 13588 // the store #4. 13589 // - Try to vectorize sequence of stores 4,2,3,5. 13590 // - If all these stores are vectorized - just drop them. 13591 // - If some of them are not vectorized (say, #3 and #5), do extra analysis. 13592 // - Start new stores sequence. 13593 // The new bunch of stores is {1, {1, 0}}. 13594 // - Add the stores from previous sequence, that were not vectorized. 13595 // Here we consider the stores in the reversed order, rather they are used in 13596 // the IR (Stores are reversed already, see vectorizeStoreChains() function). 13597 // Store #3 can be added -> comes after store #4 with the same distance as 13598 // store #1. 13599 // Store #5 cannot be added - comes before store #4. 13600 // This logic allows to improve the compile time, we assume that the stores 13601 // after previous store with the same distance most likely have memory 13602 // dependencies and no need to waste compile time to try to vectorize them. 13603 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}. 13604 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) { 13605 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) { 13606 std::optional<int> Diff = getPointersDiff( 13607 Stores[Set.first]->getValueOperand()->getType(), 13608 Stores[Set.first]->getPointerOperand(), 13609 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE, 13610 /*StrictCheck=*/true); 13611 if (!Diff) 13612 continue; 13613 auto It = Set.second.find(std::make_pair(Idx, *Diff)); 13614 if (It == Set.second.end()) { 13615 Set.second.emplace(Idx, *Diff); 13616 return; 13617 } 13618 // Try to vectorize the first found set to avoid duplicate analysis. 13619 TryToVectorize(Set.second); 13620 StoreIndexToDistSet PrevSet; 13621 PrevSet.swap(Set.second); 13622 Set.first = Idx; 13623 Set.second.emplace(Idx, 0); 13624 // Insert stores that followed previous match to try to vectorize them 13625 // with this store. 13626 unsigned StartIdx = It->first + 1; 13627 SmallBitVector UsedStores(Idx - StartIdx); 13628 // Distances to previously found dup store (or this store, since they 13629 // store to the same addresses). 13630 SmallVector<int> Dists(Idx - StartIdx, 0); 13631 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) { 13632 // Do not try to vectorize sequences, we already tried. 13633 if (Pair.first <= It->first || 13634 VectorizedStores.contains(Stores[Pair.first])) 13635 break; 13636 unsigned BI = Pair.first - StartIdx; 13637 UsedStores.set(BI); 13638 Dists[BI] = Pair.second - It->second; 13639 } 13640 for (unsigned I = StartIdx; I < Idx; ++I) { 13641 unsigned BI = I - StartIdx; 13642 if (UsedStores.test(BI)) 13643 Set.second.emplace(I, Dists[BI]); 13644 } 13645 return; 13646 } 13647 auto &Res = SortedStores.emplace_back(); 13648 Res.first = Idx; 13649 Res.second.emplace(Idx, 0); 13650 }; 13651 StoreInst *PrevStore = Stores.front(); 13652 for (auto [I, SI] : enumerate(Stores)) { 13653 // Check that we do not try to vectorize stores of different types. 13654 if (PrevStore->getValueOperand()->getType() != 13655 SI->getValueOperand()->getType()) { 13656 for (auto &Set : SortedStores) 13657 TryToVectorize(Set.second); 13658 SortedStores.clear(); 13659 PrevStore = SI; 13660 } 13661 FillStoresSet(I, SI); 13662 } 13663 13664 // Final vectorization attempt. 13665 for (auto &Set : SortedStores) 13666 TryToVectorize(Set.second); 13667 13668 return Changed; 13669 } 13670 13671 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 13672 // Initialize the collections. We will make a single pass over the block. 13673 Stores.clear(); 13674 GEPs.clear(); 13675 13676 // Visit the store and getelementptr instructions in BB and organize them in 13677 // Stores and GEPs according to the underlying objects of their pointer 13678 // operands. 13679 for (Instruction &I : *BB) { 13680 // Ignore store instructions that are volatile or have a pointer operand 13681 // that doesn't point to a scalar type. 13682 if (auto *SI = dyn_cast<StoreInst>(&I)) { 13683 if (!SI->isSimple()) 13684 continue; 13685 if (!isValidElementType(SI->getValueOperand()->getType())) 13686 continue; 13687 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 13688 } 13689 13690 // Ignore getelementptr instructions that have more than one index, a 13691 // constant index, or a pointer operand that doesn't point to a scalar 13692 // type. 13693 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 13694 if (GEP->getNumIndices() != 1) 13695 continue; 13696 Value *Idx = GEP->idx_begin()->get(); 13697 if (isa<Constant>(Idx)) 13698 continue; 13699 if (!isValidElementType(Idx->getType())) 13700 continue; 13701 if (GEP->getType()->isVectorTy()) 13702 continue; 13703 GEPs[GEP->getPointerOperand()].push_back(GEP); 13704 } 13705 } 13706 } 13707 13708 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 13709 bool MaxVFOnly) { 13710 if (VL.size() < 2) 13711 return false; 13712 13713 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 13714 << VL.size() << ".\n"); 13715 13716 // Check that all of the parts are instructions of the same type, 13717 // we permit an alternate opcode via InstructionsState. 13718 InstructionsState S = getSameOpcode(VL, *TLI); 13719 if (!S.getOpcode()) 13720 return false; 13721 13722 Instruction *I0 = cast<Instruction>(S.OpValue); 13723 // Make sure invalid types (including vector type) are rejected before 13724 // determining vectorization factor for scalar instructions. 13725 for (Value *V : VL) { 13726 Type *Ty = V->getType(); 13727 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 13728 // NOTE: the following will give user internal llvm type name, which may 13729 // not be useful. 13730 R.getORE()->emit([&]() { 13731 std::string TypeStr; 13732 llvm::raw_string_ostream rso(TypeStr); 13733 Ty->print(rso); 13734 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 13735 << "Cannot SLP vectorize list: type " 13736 << rso.str() + " is unsupported by vectorizer"; 13737 }); 13738 return false; 13739 } 13740 } 13741 13742 unsigned Sz = R.getVectorElementSize(I0); 13743 unsigned MinVF = R.getMinVF(Sz); 13744 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF); 13745 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 13746 if (MaxVF < 2) { 13747 R.getORE()->emit([&]() { 13748 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 13749 << "Cannot SLP vectorize list: vectorization factor " 13750 << "less than 2 is not supported"; 13751 }); 13752 return false; 13753 } 13754 13755 bool Changed = false; 13756 bool CandidateFound = false; 13757 InstructionCost MinCost = SLPCostThreshold.getValue(); 13758 Type *ScalarTy = VL[0]->getType(); 13759 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 13760 ScalarTy = IE->getOperand(1)->getType(); 13761 13762 unsigned NextInst = 0, MaxInst = VL.size(); 13763 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 13764 // No actual vectorization should happen, if number of parts is the same as 13765 // provided vectorization factor (i.e. the scalar type is used for vector 13766 // code during codegen). 13767 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 13768 if (TTI->getNumberOfParts(VecTy) == VF) 13769 continue; 13770 for (unsigned I = NextInst; I < MaxInst; ++I) { 13771 unsigned ActualVF = std::min(MaxInst - I, VF); 13772 13773 if (!isPowerOf2_32(ActualVF)) 13774 continue; 13775 13776 if (MaxVFOnly && ActualVF < MaxVF) 13777 break; 13778 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2)) 13779 break; 13780 13781 ArrayRef<Value *> Ops = VL.slice(I, ActualVF); 13782 // Check that a previous iteration of this loop did not delete the Value. 13783 if (llvm::any_of(Ops, [&R](Value *V) { 13784 auto *I = dyn_cast<Instruction>(V); 13785 return I && R.isDeleted(I); 13786 })) 13787 continue; 13788 13789 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations " 13790 << "\n"); 13791 13792 R.buildTree(Ops); 13793 if (R.isTreeTinyAndNotFullyVectorizable()) 13794 continue; 13795 R.reorderTopToBottom(); 13796 R.reorderBottomToTop( 13797 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) && 13798 !R.doesRootHaveInTreeUses()); 13799 R.buildExternalUses(); 13800 13801 R.computeMinimumValueSizes(); 13802 InstructionCost Cost = R.getTreeCost(); 13803 CandidateFound = true; 13804 MinCost = std::min(MinCost, Cost); 13805 13806 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 13807 << " for VF=" << ActualVF << "\n"); 13808 if (Cost < -SLPCostThreshold) { 13809 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 13810 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 13811 cast<Instruction>(Ops[0])) 13812 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 13813 << " and with tree size " 13814 << ore::NV("TreeSize", R.getTreeSize())); 13815 13816 R.vectorizeTree(); 13817 // Move to the next bundle. 13818 I += VF - 1; 13819 NextInst = I + 1; 13820 Changed = true; 13821 } 13822 } 13823 } 13824 13825 if (!Changed && CandidateFound) { 13826 R.getORE()->emit([&]() { 13827 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 13828 << "List vectorization was possible but not beneficial with cost " 13829 << ore::NV("Cost", MinCost) << " >= " 13830 << ore::NV("Treshold", -SLPCostThreshold); 13831 }); 13832 } else if (!Changed) { 13833 R.getORE()->emit([&]() { 13834 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 13835 << "Cannot SLP vectorize list: vectorization was impossible" 13836 << " with available vectorization factors"; 13837 }); 13838 } 13839 return Changed; 13840 } 13841 13842 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 13843 if (!I) 13844 return false; 13845 13846 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType())) 13847 return false; 13848 13849 Value *P = I->getParent(); 13850 13851 // Vectorize in current basic block only. 13852 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 13853 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 13854 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 13855 return false; 13856 13857 // First collect all possible candidates 13858 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 13859 Candidates.emplace_back(Op0, Op1); 13860 13861 auto *A = dyn_cast<BinaryOperator>(Op0); 13862 auto *B = dyn_cast<BinaryOperator>(Op1); 13863 // Try to skip B. 13864 if (A && B && B->hasOneUse()) { 13865 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 13866 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 13867 if (B0 && B0->getParent() == P) 13868 Candidates.emplace_back(A, B0); 13869 if (B1 && B1->getParent() == P) 13870 Candidates.emplace_back(A, B1); 13871 } 13872 // Try to skip A. 13873 if (B && A && A->hasOneUse()) { 13874 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 13875 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 13876 if (A0 && A0->getParent() == P) 13877 Candidates.emplace_back(A0, B); 13878 if (A1 && A1->getParent() == P) 13879 Candidates.emplace_back(A1, B); 13880 } 13881 13882 if (Candidates.size() == 1) 13883 return tryToVectorizeList({Op0, Op1}, R); 13884 13885 // We have multiple options. Try to pick the single best. 13886 std::optional<int> BestCandidate = R.findBestRootPair(Candidates); 13887 if (!BestCandidate) 13888 return false; 13889 return tryToVectorizeList( 13890 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R); 13891 } 13892 13893 namespace { 13894 13895 /// Model horizontal reductions. 13896 /// 13897 /// A horizontal reduction is a tree of reduction instructions that has values 13898 /// that can be put into a vector as its leaves. For example: 13899 /// 13900 /// mul mul mul mul 13901 /// \ / \ / 13902 /// + + 13903 /// \ / 13904 /// + 13905 /// This tree has "mul" as its leaf values and "+" as its reduction 13906 /// instructions. A reduction can feed into a store or a binary operation 13907 /// feeding a phi. 13908 /// ... 13909 /// \ / 13910 /// + 13911 /// | 13912 /// phi += 13913 /// 13914 /// Or: 13915 /// ... 13916 /// \ / 13917 /// + 13918 /// | 13919 /// *p = 13920 /// 13921 class HorizontalReduction { 13922 using ReductionOpsType = SmallVector<Value *, 16>; 13923 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 13924 ReductionOpsListType ReductionOps; 13925 /// List of possibly reduced values. 13926 SmallVector<SmallVector<Value *>> ReducedVals; 13927 /// Maps reduced value to the corresponding reduction operation. 13928 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 13929 // Use map vector to make stable output. 13930 MapVector<Instruction *, Value *> ExtraArgs; 13931 WeakTrackingVH ReductionRoot; 13932 /// The type of reduction operation. 13933 RecurKind RdxKind; 13934 /// Checks if the optimization of original scalar identity operations on 13935 /// matched horizontal reductions is enabled and allowed. 13936 bool IsSupportedHorRdxIdentityOp = false; 13937 13938 static bool isCmpSelMinMax(Instruction *I) { 13939 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 13940 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 13941 } 13942 13943 // And/or are potentially poison-safe logical patterns like: 13944 // select x, y, false 13945 // select x, true, y 13946 static bool isBoolLogicOp(Instruction *I) { 13947 return isa<SelectInst>(I) && 13948 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr())); 13949 } 13950 13951 /// Checks if instruction is associative and can be vectorized. 13952 static bool isVectorizable(RecurKind Kind, Instruction *I) { 13953 if (Kind == RecurKind::None) 13954 return false; 13955 13956 // Integer ops that map to select instructions or intrinsics are fine. 13957 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 13958 isBoolLogicOp(I)) 13959 return true; 13960 13961 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 13962 // FP min/max are associative except for NaN and -0.0. We do not 13963 // have to rule out -0.0 here because the intrinsic semantics do not 13964 // specify a fixed result for it. 13965 return I->getFastMathFlags().noNaNs(); 13966 } 13967 13968 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) 13969 return true; 13970 13971 return I->isAssociative(); 13972 } 13973 13974 static Value *getRdxOperand(Instruction *I, unsigned Index) { 13975 // Poison-safe 'or' takes the form: select X, true, Y 13976 // To make that work with the normal operand processing, we skip the 13977 // true value operand. 13978 // TODO: Change the code and data structures to handle this without a hack. 13979 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 13980 return I->getOperand(2); 13981 return I->getOperand(Index); 13982 } 13983 13984 /// Creates reduction operation with the current opcode. 13985 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 13986 Value *RHS, const Twine &Name, bool UseSelect) { 13987 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 13988 bool IsConstant = isConstant(LHS) && isConstant(RHS); 13989 switch (Kind) { 13990 case RecurKind::Or: 13991 if (UseSelect && 13992 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13993 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 13994 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13995 Name); 13996 case RecurKind::And: 13997 if (UseSelect && 13998 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13999 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 14000 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 14001 Name); 14002 case RecurKind::Add: 14003 case RecurKind::Mul: 14004 case RecurKind::Xor: 14005 case RecurKind::FAdd: 14006 case RecurKind::FMul: 14007 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 14008 Name); 14009 case RecurKind::FMax: 14010 if (IsConstant) 14011 return ConstantFP::get(LHS->getType(), 14012 maxnum(cast<ConstantFP>(LHS)->getValueAPF(), 14013 cast<ConstantFP>(RHS)->getValueAPF())); 14014 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 14015 case RecurKind::FMin: 14016 if (IsConstant) 14017 return ConstantFP::get(LHS->getType(), 14018 minnum(cast<ConstantFP>(LHS)->getValueAPF(), 14019 cast<ConstantFP>(RHS)->getValueAPF())); 14020 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 14021 case RecurKind::FMaximum: 14022 if (IsConstant) 14023 return ConstantFP::get(LHS->getType(), 14024 maximum(cast<ConstantFP>(LHS)->getValueAPF(), 14025 cast<ConstantFP>(RHS)->getValueAPF())); 14026 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); 14027 case RecurKind::FMinimum: 14028 if (IsConstant) 14029 return ConstantFP::get(LHS->getType(), 14030 minimum(cast<ConstantFP>(LHS)->getValueAPF(), 14031 cast<ConstantFP>(RHS)->getValueAPF())); 14032 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); 14033 case RecurKind::SMax: 14034 if (IsConstant || UseSelect) { 14035 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 14036 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14037 } 14038 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 14039 case RecurKind::SMin: 14040 if (IsConstant || UseSelect) { 14041 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 14042 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14043 } 14044 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 14045 case RecurKind::UMax: 14046 if (IsConstant || UseSelect) { 14047 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 14048 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14049 } 14050 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 14051 case RecurKind::UMin: 14052 if (IsConstant || UseSelect) { 14053 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 14054 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14055 } 14056 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 14057 default: 14058 llvm_unreachable("Unknown reduction operation."); 14059 } 14060 } 14061 14062 /// Creates reduction operation with the current opcode with the IR flags 14063 /// from \p ReductionOps, dropping nuw/nsw flags. 14064 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 14065 Value *RHS, const Twine &Name, 14066 const ReductionOpsListType &ReductionOps) { 14067 bool UseSelect = 14068 ReductionOps.size() == 2 || 14069 // Logical or/and. 14070 (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) { 14071 return isa<SelectInst>(V); 14072 })); 14073 assert((!UseSelect || ReductionOps.size() != 2 || 14074 isa<SelectInst>(ReductionOps[1][0])) && 14075 "Expected cmp + select pairs for reduction"); 14076 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 14077 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 14078 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 14079 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr, 14080 /*IncludeWrapFlags=*/false); 14081 propagateIRFlags(Op, ReductionOps[1], nullptr, 14082 /*IncludeWrapFlags=*/false); 14083 return Op; 14084 } 14085 } 14086 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false); 14087 return Op; 14088 } 14089 14090 public: 14091 static RecurKind getRdxKind(Value *V) { 14092 auto *I = dyn_cast<Instruction>(V); 14093 if (!I) 14094 return RecurKind::None; 14095 if (match(I, m_Add(m_Value(), m_Value()))) 14096 return RecurKind::Add; 14097 if (match(I, m_Mul(m_Value(), m_Value()))) 14098 return RecurKind::Mul; 14099 if (match(I, m_And(m_Value(), m_Value())) || 14100 match(I, m_LogicalAnd(m_Value(), m_Value()))) 14101 return RecurKind::And; 14102 if (match(I, m_Or(m_Value(), m_Value())) || 14103 match(I, m_LogicalOr(m_Value(), m_Value()))) 14104 return RecurKind::Or; 14105 if (match(I, m_Xor(m_Value(), m_Value()))) 14106 return RecurKind::Xor; 14107 if (match(I, m_FAdd(m_Value(), m_Value()))) 14108 return RecurKind::FAdd; 14109 if (match(I, m_FMul(m_Value(), m_Value()))) 14110 return RecurKind::FMul; 14111 14112 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 14113 return RecurKind::FMax; 14114 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 14115 return RecurKind::FMin; 14116 14117 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 14118 return RecurKind::FMaximum; 14119 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 14120 return RecurKind::FMinimum; 14121 // This matches either cmp+select or intrinsics. SLP is expected to handle 14122 // either form. 14123 // TODO: If we are canonicalizing to intrinsics, we can remove several 14124 // special-case paths that deal with selects. 14125 if (match(I, m_SMax(m_Value(), m_Value()))) 14126 return RecurKind::SMax; 14127 if (match(I, m_SMin(m_Value(), m_Value()))) 14128 return RecurKind::SMin; 14129 if (match(I, m_UMax(m_Value(), m_Value()))) 14130 return RecurKind::UMax; 14131 if (match(I, m_UMin(m_Value(), m_Value()))) 14132 return RecurKind::UMin; 14133 14134 if (auto *Select = dyn_cast<SelectInst>(I)) { 14135 // Try harder: look for min/max pattern based on instructions producing 14136 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 14137 // During the intermediate stages of SLP, it's very common to have 14138 // pattern like this (since optimizeGatherSequence is run only once 14139 // at the end): 14140 // %1 = extractelement <2 x i32> %a, i32 0 14141 // %2 = extractelement <2 x i32> %a, i32 1 14142 // %cond = icmp sgt i32 %1, %2 14143 // %3 = extractelement <2 x i32> %a, i32 0 14144 // %4 = extractelement <2 x i32> %a, i32 1 14145 // %select = select i1 %cond, i32 %3, i32 %4 14146 CmpInst::Predicate Pred; 14147 Instruction *L1; 14148 Instruction *L2; 14149 14150 Value *LHS = Select->getTrueValue(); 14151 Value *RHS = Select->getFalseValue(); 14152 Value *Cond = Select->getCondition(); 14153 14154 // TODO: Support inverse predicates. 14155 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 14156 if (!isa<ExtractElementInst>(RHS) || 14157 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14158 return RecurKind::None; 14159 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 14160 if (!isa<ExtractElementInst>(LHS) || 14161 !L1->isIdenticalTo(cast<Instruction>(LHS))) 14162 return RecurKind::None; 14163 } else { 14164 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 14165 return RecurKind::None; 14166 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 14167 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 14168 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14169 return RecurKind::None; 14170 } 14171 14172 switch (Pred) { 14173 default: 14174 return RecurKind::None; 14175 case CmpInst::ICMP_SGT: 14176 case CmpInst::ICMP_SGE: 14177 return RecurKind::SMax; 14178 case CmpInst::ICMP_SLT: 14179 case CmpInst::ICMP_SLE: 14180 return RecurKind::SMin; 14181 case CmpInst::ICMP_UGT: 14182 case CmpInst::ICMP_UGE: 14183 return RecurKind::UMax; 14184 case CmpInst::ICMP_ULT: 14185 case CmpInst::ICMP_ULE: 14186 return RecurKind::UMin; 14187 } 14188 } 14189 return RecurKind::None; 14190 } 14191 14192 /// Get the index of the first operand. 14193 static unsigned getFirstOperandIndex(Instruction *I) { 14194 return isCmpSelMinMax(I) ? 1 : 0; 14195 } 14196 14197 private: 14198 /// Total number of operands in the reduction operation. 14199 static unsigned getNumberOfOperands(Instruction *I) { 14200 return isCmpSelMinMax(I) ? 3 : 2; 14201 } 14202 14203 /// Checks if the instruction is in basic block \p BB. 14204 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 14205 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 14206 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) { 14207 auto *Sel = cast<SelectInst>(I); 14208 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 14209 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 14210 } 14211 return I->getParent() == BB; 14212 } 14213 14214 /// Expected number of uses for reduction operations/reduced values. 14215 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 14216 if (IsCmpSelMinMax) { 14217 // SelectInst must be used twice while the condition op must have single 14218 // use only. 14219 if (auto *Sel = dyn_cast<SelectInst>(I)) 14220 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 14221 return I->hasNUses(2); 14222 } 14223 14224 // Arithmetic reduction operation must be used once only. 14225 return I->hasOneUse(); 14226 } 14227 14228 /// Initializes the list of reduction operations. 14229 void initReductionOps(Instruction *I) { 14230 if (isCmpSelMinMax(I)) 14231 ReductionOps.assign(2, ReductionOpsType()); 14232 else 14233 ReductionOps.assign(1, ReductionOpsType()); 14234 } 14235 14236 /// Add all reduction operations for the reduction instruction \p I. 14237 void addReductionOps(Instruction *I) { 14238 if (isCmpSelMinMax(I)) { 14239 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 14240 ReductionOps[1].emplace_back(I); 14241 } else { 14242 ReductionOps[0].emplace_back(I); 14243 } 14244 } 14245 14246 static bool isGoodForReduction(ArrayRef<Value *> Data) { 14247 int Sz = Data.size(); 14248 auto *I = dyn_cast<Instruction>(Data.front()); 14249 return Sz > 1 || isConstant(Data.front()) || 14250 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode())); 14251 } 14252 14253 public: 14254 HorizontalReduction() = default; 14255 14256 /// Try to find a reduction tree. 14257 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root, 14258 ScalarEvolution &SE, const DataLayout &DL, 14259 const TargetLibraryInfo &TLI) { 14260 RdxKind = HorizontalReduction::getRdxKind(Root); 14261 if (!isVectorizable(RdxKind, Root)) 14262 return false; 14263 14264 // Analyze "regular" integer/FP types for reductions - no target-specific 14265 // types or pointers. 14266 Type *Ty = Root->getType(); 14267 if (!isValidElementType(Ty) || Ty->isPointerTy()) 14268 return false; 14269 14270 // Though the ultimate reduction may have multiple uses, its condition must 14271 // have only single use. 14272 if (auto *Sel = dyn_cast<SelectInst>(Root)) 14273 if (!Sel->getCondition()->hasOneUse()) 14274 return false; 14275 14276 ReductionRoot = Root; 14277 14278 // Iterate through all the operands of the possible reduction tree and 14279 // gather all the reduced values, sorting them by their value id. 14280 BasicBlock *BB = Root->getParent(); 14281 bool IsCmpSelMinMax = isCmpSelMinMax(Root); 14282 SmallVector<Instruction *> Worklist(1, Root); 14283 // Checks if the operands of the \p TreeN instruction are also reduction 14284 // operations or should be treated as reduced values or an extra argument, 14285 // which is not part of the reduction. 14286 auto CheckOperands = [&](Instruction *TreeN, 14287 SmallVectorImpl<Value *> &ExtraArgs, 14288 SmallVectorImpl<Value *> &PossibleReducedVals, 14289 SmallVectorImpl<Instruction *> &ReductionOps) { 14290 for (int I = getFirstOperandIndex(TreeN), 14291 End = getNumberOfOperands(TreeN); 14292 I < End; ++I) { 14293 Value *EdgeVal = getRdxOperand(TreeN, I); 14294 ReducedValsToOps[EdgeVal].push_back(TreeN); 14295 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 14296 // Edge has wrong parent - mark as an extra argument. 14297 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 14298 !hasSameParent(EdgeInst, BB)) { 14299 ExtraArgs.push_back(EdgeVal); 14300 continue; 14301 } 14302 // If the edge is not an instruction, or it is different from the main 14303 // reduction opcode or has too many uses - possible reduced value. 14304 // Also, do not try to reduce const values, if the operation is not 14305 // foldable. 14306 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 14307 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 14308 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 14309 !isVectorizable(RdxKind, EdgeInst) || 14310 (R.isAnalyzedReductionRoot(EdgeInst) && 14311 all_of(EdgeInst->operands(), Constant::classof))) { 14312 PossibleReducedVals.push_back(EdgeVal); 14313 continue; 14314 } 14315 ReductionOps.push_back(EdgeInst); 14316 } 14317 }; 14318 // Try to regroup reduced values so that it gets more profitable to try to 14319 // reduce them. Values are grouped by their value ids, instructions - by 14320 // instruction op id and/or alternate op id, plus do extra analysis for 14321 // loads (grouping them by the distabce between pointers) and cmp 14322 // instructions (grouping them by the predicate). 14323 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 14324 PossibleReducedVals; 14325 initReductionOps(Root); 14326 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap; 14327 SmallSet<size_t, 2> LoadKeyUsed; 14328 SmallPtrSet<Value *, 4> DoNotReverseVals; 14329 14330 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) { 14331 Value *Ptr = getUnderlyingObject(LI->getPointerOperand()); 14332 if (LoadKeyUsed.contains(Key)) { 14333 auto LIt = LoadsMap.find(Ptr); 14334 if (LIt != LoadsMap.end()) { 14335 for (LoadInst *RLI : LIt->second) { 14336 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 14337 LI->getType(), LI->getPointerOperand(), DL, SE, 14338 /*StrictCheck=*/true)) 14339 return hash_value(RLI->getPointerOperand()); 14340 } 14341 for (LoadInst *RLI : LIt->second) { 14342 if (arePointersCompatible(RLI->getPointerOperand(), 14343 LI->getPointerOperand(), TLI)) { 14344 hash_code SubKey = hash_value(RLI->getPointerOperand()); 14345 DoNotReverseVals.insert(RLI); 14346 return SubKey; 14347 } 14348 } 14349 if (LIt->second.size() > 2) { 14350 hash_code SubKey = 14351 hash_value(LIt->second.back()->getPointerOperand()); 14352 DoNotReverseVals.insert(LIt->second.back()); 14353 return SubKey; 14354 } 14355 } 14356 } 14357 LoadKeyUsed.insert(Key); 14358 LoadsMap.try_emplace(Ptr).first->second.push_back(LI); 14359 return hash_value(LI->getPointerOperand()); 14360 }; 14361 14362 while (!Worklist.empty()) { 14363 Instruction *TreeN = Worklist.pop_back_val(); 14364 SmallVector<Value *> Args; 14365 SmallVector<Value *> PossibleRedVals; 14366 SmallVector<Instruction *> PossibleReductionOps; 14367 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 14368 // If too many extra args - mark the instruction itself as a reduction 14369 // value, not a reduction operation. 14370 if (Args.size() < 2) { 14371 addReductionOps(TreeN); 14372 // Add extra args. 14373 if (!Args.empty()) { 14374 assert(Args.size() == 1 && "Expected only single argument."); 14375 ExtraArgs[TreeN] = Args.front(); 14376 } 14377 // Add reduction values. The values are sorted for better vectorization 14378 // results. 14379 for (Value *V : PossibleRedVals) { 14380 size_t Key, Idx; 14381 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey, 14382 /*AllowAlternate=*/false); 14383 ++PossibleReducedVals[Key][Idx] 14384 .insert(std::make_pair(V, 0)) 14385 .first->second; 14386 } 14387 Worklist.append(PossibleReductionOps.rbegin(), 14388 PossibleReductionOps.rend()); 14389 } else { 14390 size_t Key, Idx; 14391 std::tie(Key, Idx) = generateKeySubkey(TreeN, &TLI, GenerateLoadsSubkey, 14392 /*AllowAlternate=*/false); 14393 ++PossibleReducedVals[Key][Idx] 14394 .insert(std::make_pair(TreeN, 0)) 14395 .first->second; 14396 } 14397 } 14398 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 14399 // Sort values by the total number of values kinds to start the reduction 14400 // from the longest possible reduced values sequences. 14401 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 14402 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 14403 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 14404 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 14405 It != E; ++It) { 14406 PossibleRedValsVect.emplace_back(); 14407 auto RedValsVect = It->second.takeVector(); 14408 stable_sort(RedValsVect, llvm::less_second()); 14409 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 14410 PossibleRedValsVect.back().append(Data.second, Data.first); 14411 } 14412 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 14413 return P1.size() > P2.size(); 14414 }); 14415 int NewIdx = -1; 14416 for (ArrayRef<Value *> Data : PossibleRedValsVect) { 14417 if (isGoodForReduction(Data) || 14418 (isa<LoadInst>(Data.front()) && NewIdx >= 0 && 14419 isa<LoadInst>(ReducedVals[NewIdx].front()) && 14420 getUnderlyingObject( 14421 cast<LoadInst>(Data.front())->getPointerOperand()) == 14422 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front()) 14423 ->getPointerOperand()))) { 14424 if (NewIdx < 0) { 14425 NewIdx = ReducedVals.size(); 14426 ReducedVals.emplace_back(); 14427 } 14428 if (DoNotReverseVals.contains(Data.front())) 14429 ReducedVals[NewIdx].append(Data.begin(), Data.end()); 14430 else 14431 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend()); 14432 } else { 14433 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend()); 14434 } 14435 } 14436 } 14437 // Sort the reduced values by number of same/alternate opcode and/or pointer 14438 // operand. 14439 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 14440 return P1.size() > P2.size(); 14441 }); 14442 return true; 14443 } 14444 14445 /// Attempt to vectorize the tree found by matchAssociativeReduction. 14446 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI, 14447 const TargetLibraryInfo &TLI) { 14448 constexpr int ReductionLimit = 4; 14449 constexpr unsigned RegMaxNumber = 4; 14450 constexpr unsigned RedValsMaxNumber = 128; 14451 // If there are a sufficient number of reduction values, reduce 14452 // to a nearby power-of-2. We can safely generate oversized 14453 // vectors and rely on the backend to split them to legal sizes. 14454 unsigned NumReducedVals = 14455 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0, 14456 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned { 14457 if (!isGoodForReduction(Vals)) 14458 return Num; 14459 return Num + Vals.size(); 14460 }); 14461 if (NumReducedVals < ReductionLimit && 14462 (!AllowHorRdxIdenityOptimization || 14463 all_of(ReducedVals, [](ArrayRef<Value *> RedV) { 14464 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV); 14465 }))) { 14466 for (ReductionOpsType &RdxOps : ReductionOps) 14467 for (Value *RdxOp : RdxOps) 14468 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 14469 return nullptr; 14470 } 14471 14472 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 14473 14474 // Track the reduced values in case if they are replaced by extractelement 14475 // because of the vectorization. 14476 DenseMap<Value *, WeakTrackingVH> TrackedVals( 14477 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size()); 14478 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 14479 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 14480 ExternallyUsedValues.reserve(ExtraArgs.size() + 1); 14481 // The same extra argument may be used several times, so log each attempt 14482 // to use it. 14483 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 14484 assert(Pair.first && "DebugLoc must be set."); 14485 ExternallyUsedValues[Pair.second].push_back(Pair.first); 14486 TrackedVals.try_emplace(Pair.second, Pair.second); 14487 } 14488 14489 // The compare instruction of a min/max is the insertion point for new 14490 // instructions and may be replaced with a new compare instruction. 14491 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 14492 assert(isa<SelectInst>(RdxRootInst) && 14493 "Expected min/max reduction to have select root instruction"); 14494 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 14495 assert(isa<Instruction>(ScalarCond) && 14496 "Expected min/max reduction to have compare condition"); 14497 return cast<Instruction>(ScalarCond); 14498 }; 14499 14500 // Return new VectorizedTree, based on previous value. 14501 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) { 14502 if (VectorizedTree) { 14503 // Update the final value in the reduction. 14504 Builder.SetCurrentDebugLocation( 14505 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 14506 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) || 14507 (isGuaranteedNotToBePoison(Res) && 14508 !isGuaranteedNotToBePoison(VectorizedTree))) { 14509 auto It = ReducedValsToOps.find(Res); 14510 if (It != ReducedValsToOps.end() && 14511 any_of(It->getSecond(), 14512 [](Instruction *I) { return isBoolLogicOp(I); })) 14513 std::swap(VectorizedTree, Res); 14514 } 14515 14516 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx", 14517 ReductionOps); 14518 } 14519 // Initialize the final value in the reduction. 14520 return Res; 14521 }; 14522 bool AnyBoolLogicOp = 14523 any_of(ReductionOps.back(), [](Value *V) { 14524 return isBoolLogicOp(cast<Instruction>(V)); 14525 }); 14526 // The reduction root is used as the insertion point for new instructions, 14527 // so set it as externally used to prevent it from being deleted. 14528 ExternallyUsedValues[ReductionRoot]; 14529 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() * 14530 ReductionOps.front().size()); 14531 for (ReductionOpsType &RdxOps : ReductionOps) 14532 for (Value *RdxOp : RdxOps) { 14533 if (!RdxOp) 14534 continue; 14535 IgnoreList.insert(RdxOp); 14536 } 14537 // Intersect the fast-math-flags from all reduction operations. 14538 FastMathFlags RdxFMF; 14539 RdxFMF.set(); 14540 for (Value *U : IgnoreList) 14541 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 14542 RdxFMF &= FPMO->getFastMathFlags(); 14543 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 14544 14545 // Need to track reduced vals, they may be changed during vectorization of 14546 // subvectors. 14547 for (ArrayRef<Value *> Candidates : ReducedVals) 14548 for (Value *V : Candidates) 14549 TrackedVals.try_emplace(V, V); 14550 14551 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size()); 14552 // List of the values that were reduced in other trees as part of gather 14553 // nodes and thus requiring extract if fully vectorized in other trees. 14554 SmallPtrSet<Value *, 4> RequiredExtract; 14555 Value *VectorizedTree = nullptr; 14556 bool CheckForReusedReductionOps = false; 14557 // Try to vectorize elements based on their type. 14558 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 14559 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 14560 InstructionsState S = getSameOpcode(OrigReducedVals, TLI); 14561 SmallVector<Value *> Candidates; 14562 Candidates.reserve(2 * OrigReducedVals.size()); 14563 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size()); 14564 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 14565 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 14566 // Check if the reduction value was not overriden by the extractelement 14567 // instruction because of the vectorization and exclude it, if it is not 14568 // compatible with other values. 14569 // Also check if the instruction was folded to constant/other value. 14570 auto *Inst = dyn_cast<Instruction>(RdxVal); 14571 if ((Inst && isVectorLikeInstWithConstOps(Inst) && 14572 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) || 14573 (S.getOpcode() && !Inst)) 14574 continue; 14575 Candidates.push_back(RdxVal); 14576 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 14577 } 14578 bool ShuffledExtracts = false; 14579 // Try to handle shuffled extractelements. 14580 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 14581 I + 1 < E) { 14582 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI); 14583 if (NextS.getOpcode() == Instruction::ExtractElement && 14584 !NextS.isAltShuffle()) { 14585 SmallVector<Value *> CommonCandidates(Candidates); 14586 for (Value *RV : ReducedVals[I + 1]) { 14587 Value *RdxVal = TrackedVals.find(RV)->second; 14588 // Check if the reduction value was not overriden by the 14589 // extractelement instruction because of the vectorization and 14590 // exclude it, if it is not compatible with other values. 14591 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 14592 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 14593 continue; 14594 CommonCandidates.push_back(RdxVal); 14595 TrackedToOrig.try_emplace(RdxVal, RV); 14596 } 14597 SmallVector<int> Mask; 14598 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 14599 ++I; 14600 Candidates.swap(CommonCandidates); 14601 ShuffledExtracts = true; 14602 } 14603 } 14604 } 14605 14606 // Emit code for constant values. 14607 if (AllowHorRdxIdenityOptimization && Candidates.size() > 1 && 14608 allConstant(Candidates)) { 14609 Value *Res = Candidates.front(); 14610 ++VectorizedVals.try_emplace(Candidates.front(), 0).first->getSecond(); 14611 for (Value *VC : ArrayRef(Candidates).drop_front()) { 14612 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps); 14613 ++VectorizedVals.try_emplace(VC, 0).first->getSecond(); 14614 if (auto *ResI = dyn_cast<Instruction>(Res)) 14615 V.analyzedReductionRoot(ResI); 14616 } 14617 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res); 14618 continue; 14619 } 14620 14621 unsigned NumReducedVals = Candidates.size(); 14622 if (NumReducedVals < ReductionLimit && 14623 (NumReducedVals < 2 || !AllowHorRdxIdenityOptimization || 14624 !isSplat(Candidates))) 14625 continue; 14626 14627 // Check if we support repeated scalar values processing (optimization of 14628 // original scalar identity operations on matched horizontal reductions). 14629 IsSupportedHorRdxIdentityOp = 14630 AllowHorRdxIdenityOptimization && RdxKind != RecurKind::Mul && 14631 RdxKind != RecurKind::FMul && RdxKind != RecurKind::FMulAdd; 14632 // Gather same values. 14633 MapVector<Value *, unsigned> SameValuesCounter; 14634 if (IsSupportedHorRdxIdentityOp) 14635 for (Value *V : Candidates) 14636 ++SameValuesCounter.insert(std::make_pair(V, 0)).first->second; 14637 // Used to check if the reduced values used same number of times. In this 14638 // case the compiler may produce better code. E.g. if reduced values are 14639 // aabbccdd (8 x values), then the first node of the tree will have a node 14640 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>. 14641 // Plus, the final reduction will be performed on <8 x aabbccdd>. 14642 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4 14643 // x abcd) * 2. 14644 // Currently it only handles add/fadd/xor. and/or/min/max do not require 14645 // this analysis, other operations may require an extra estimation of 14646 // the profitability. 14647 bool SameScaleFactor = false; 14648 bool OptReusedScalars = IsSupportedHorRdxIdentityOp && 14649 SameValuesCounter.size() != Candidates.size(); 14650 if (OptReusedScalars) { 14651 SameScaleFactor = 14652 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd || 14653 RdxKind == RecurKind::Xor) && 14654 all_of(drop_begin(SameValuesCounter), 14655 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) { 14656 return P.second == SameValuesCounter.front().second; 14657 }); 14658 Candidates.resize(SameValuesCounter.size()); 14659 transform(SameValuesCounter, Candidates.begin(), 14660 [](const auto &P) { return P.first; }); 14661 NumReducedVals = Candidates.size(); 14662 // Have a reduction of the same element. 14663 if (NumReducedVals == 1) { 14664 Value *OrigV = TrackedToOrig.find(Candidates.front())->second; 14665 unsigned Cnt = SameValuesCounter.lookup(OrigV); 14666 Value *RedVal = 14667 emitScaleForReusedOps(Candidates.front(), Builder, Cnt); 14668 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14669 VectorizedVals.try_emplace(OrigV, Cnt); 14670 continue; 14671 } 14672 } 14673 14674 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 14675 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 14676 unsigned MaxElts = 14677 RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize); 14678 14679 unsigned ReduxWidth = std::min<unsigned>( 14680 llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 14681 unsigned Start = 0; 14682 unsigned Pos = Start; 14683 // Restarts vectorization attempt with lower vector factor. 14684 unsigned PrevReduxWidth = ReduxWidth; 14685 bool CheckForReusedReductionOpsLocal = false; 14686 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 14687 &CheckForReusedReductionOpsLocal, 14688 &PrevReduxWidth, &V, 14689 &IgnoreList](bool IgnoreVL = false) { 14690 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 14691 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 14692 // Check if any of the reduction ops are gathered. If so, worth 14693 // trying again with less number of reduction ops. 14694 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 14695 } 14696 ++Pos; 14697 if (Pos < NumReducedVals - ReduxWidth + 1) 14698 return IsAnyRedOpGathered; 14699 Pos = Start; 14700 ReduxWidth /= 2; 14701 return IsAnyRedOpGathered; 14702 }; 14703 bool AnyVectorized = false; 14704 while (Pos < NumReducedVals - ReduxWidth + 1 && 14705 ReduxWidth >= ReductionLimit) { 14706 // Dependency in tree of the reduction ops - drop this attempt, try 14707 // later. 14708 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 14709 Start == 0) { 14710 CheckForReusedReductionOps = true; 14711 break; 14712 } 14713 PrevReduxWidth = ReduxWidth; 14714 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 14715 // Beeing analyzed already - skip. 14716 if (V.areAnalyzedReductionVals(VL)) { 14717 (void)AdjustReducedVals(/*IgnoreVL=*/true); 14718 continue; 14719 } 14720 // Early exit if any of the reduction values were deleted during 14721 // previous vectorization attempts. 14722 if (any_of(VL, [&V](Value *RedVal) { 14723 auto *RedValI = dyn_cast<Instruction>(RedVal); 14724 if (!RedValI) 14725 return false; 14726 return V.isDeleted(RedValI); 14727 })) 14728 break; 14729 V.buildTree(VL, IgnoreList); 14730 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 14731 if (!AdjustReducedVals()) 14732 V.analyzedReductionVals(VL); 14733 continue; 14734 } 14735 if (V.isLoadCombineReductionCandidate(RdxKind)) { 14736 if (!AdjustReducedVals()) 14737 V.analyzedReductionVals(VL); 14738 continue; 14739 } 14740 V.reorderTopToBottom(); 14741 // No need to reorder the root node at all. 14742 V.reorderBottomToTop(/*IgnoreReorder=*/true); 14743 // Keep extracted other reduction values, if they are used in the 14744 // vectorization trees. 14745 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 14746 ExternallyUsedValues); 14747 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 14748 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 14749 continue; 14750 for (Value *V : ReducedVals[Cnt]) 14751 if (isa<Instruction>(V)) 14752 LocalExternallyUsedValues[TrackedVals[V]]; 14753 } 14754 if (!IsSupportedHorRdxIdentityOp) { 14755 // Number of uses of the candidates in the vector of values. 14756 assert(SameValuesCounter.empty() && 14757 "Reused values counter map is not empty"); 14758 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14759 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14760 continue; 14761 Value *V = Candidates[Cnt]; 14762 Value *OrigV = TrackedToOrig.find(V)->second; 14763 ++SameValuesCounter[OrigV]; 14764 } 14765 } 14766 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end()); 14767 // Gather externally used values. 14768 SmallPtrSet<Value *, 4> Visited; 14769 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14770 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14771 continue; 14772 Value *RdxVal = Candidates[Cnt]; 14773 if (!Visited.insert(RdxVal).second) 14774 continue; 14775 // Check if the scalar was vectorized as part of the vectorization 14776 // tree but not the top node. 14777 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) { 14778 LocalExternallyUsedValues[RdxVal]; 14779 continue; 14780 } 14781 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14782 unsigned NumOps = 14783 VectorizedVals.lookup(RdxVal) + SameValuesCounter[OrigV]; 14784 if (NumOps != ReducedValsToOps.find(OrigV)->second.size()) 14785 LocalExternallyUsedValues[RdxVal]; 14786 } 14787 // Do not need the list of reused scalars in regular mode anymore. 14788 if (!IsSupportedHorRdxIdentityOp) 14789 SameValuesCounter.clear(); 14790 for (Value *RdxVal : VL) 14791 if (RequiredExtract.contains(RdxVal)) 14792 LocalExternallyUsedValues[RdxVal]; 14793 // Update LocalExternallyUsedValues for the scalar, replaced by 14794 // extractelement instructions. 14795 for (const std::pair<Value *, Value *> &Pair : ReplacedExternals) { 14796 auto *It = ExternallyUsedValues.find(Pair.first); 14797 if (It == ExternallyUsedValues.end()) 14798 continue; 14799 LocalExternallyUsedValues[Pair.second].append(It->second); 14800 } 14801 V.buildExternalUses(LocalExternallyUsedValues); 14802 14803 V.computeMinimumValueSizes(); 14804 14805 // Estimate cost. 14806 InstructionCost TreeCost = V.getTreeCost(VL); 14807 InstructionCost ReductionCost = 14808 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF); 14809 InstructionCost Cost = TreeCost + ReductionCost; 14810 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 14811 << " for reduction\n"); 14812 if (!Cost.isValid()) 14813 return nullptr; 14814 if (Cost >= -SLPCostThreshold) { 14815 V.getORE()->emit([&]() { 14816 return OptimizationRemarkMissed( 14817 SV_NAME, "HorSLPNotBeneficial", 14818 ReducedValsToOps.find(VL[0])->second.front()) 14819 << "Vectorizing horizontal reduction is possible " 14820 << "but not beneficial with cost " << ore::NV("Cost", Cost) 14821 << " and threshold " 14822 << ore::NV("Threshold", -SLPCostThreshold); 14823 }); 14824 if (!AdjustReducedVals()) 14825 V.analyzedReductionVals(VL); 14826 continue; 14827 } 14828 14829 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 14830 << Cost << ". (HorRdx)\n"); 14831 V.getORE()->emit([&]() { 14832 return OptimizationRemark( 14833 SV_NAME, "VectorizedHorizontalReduction", 14834 ReducedValsToOps.find(VL[0])->second.front()) 14835 << "Vectorized horizontal reduction with cost " 14836 << ore::NV("Cost", Cost) << " and with tree size " 14837 << ore::NV("TreeSize", V.getTreeSize()); 14838 }); 14839 14840 Builder.setFastMathFlags(RdxFMF); 14841 14842 // Emit a reduction. If the root is a select (min/max idiom), the insert 14843 // point is the compare condition of that select. 14844 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 14845 Instruction *InsertPt = RdxRootInst; 14846 if (IsCmpSelMinMax) 14847 InsertPt = GetCmpForMinMaxReduction(RdxRootInst); 14848 14849 // Vectorize a tree. 14850 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues, 14851 ReplacedExternals, InsertPt); 14852 14853 Builder.SetInsertPoint(InsertPt); 14854 14855 // To prevent poison from leaking across what used to be sequential, 14856 // safe, scalar boolean logic operations, the reduction operand must be 14857 // frozen. 14858 if ((isBoolLogicOp(RdxRootInst) || 14859 (AnyBoolLogicOp && VL.size() != TrackedVals.size())) && 14860 !isGuaranteedNotToBePoison(VectorizedRoot)) 14861 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 14862 14863 // Emit code to correctly handle reused reduced values, if required. 14864 if (OptReusedScalars && !SameScaleFactor) { 14865 VectorizedRoot = 14866 emitReusedOps(VectorizedRoot, Builder, V.getRootNodeScalars(), 14867 SameValuesCounter, TrackedToOrig); 14868 } 14869 14870 Value *ReducedSubTree = 14871 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 14872 if (ReducedSubTree->getType() != VL.front()->getType()) { 14873 ReducedSubTree = Builder.CreateIntCast( 14874 ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) { 14875 KnownBits Known = computeKnownBits( 14876 R, cast<Instruction>(ReductionOps.front().front()) 14877 ->getModule() 14878 ->getDataLayout()); 14879 return !Known.isNonNegative(); 14880 })); 14881 } 14882 14883 // Improved analysis for add/fadd/xor reductions with same scale factor 14884 // for all operands of reductions. We can emit scalar ops for them 14885 // instead. 14886 if (OptReusedScalars && SameScaleFactor) 14887 ReducedSubTree = emitScaleForReusedOps( 14888 ReducedSubTree, Builder, SameValuesCounter.front().second); 14889 14890 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree); 14891 // Count vectorized reduced values to exclude them from final reduction. 14892 for (Value *RdxVal : VL) { 14893 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14894 if (IsSupportedHorRdxIdentityOp) { 14895 VectorizedVals.try_emplace(OrigV, SameValuesCounter[RdxVal]); 14896 continue; 14897 } 14898 ++VectorizedVals.try_emplace(OrigV, 0).first->getSecond(); 14899 if (!V.isVectorized(RdxVal)) 14900 RequiredExtract.insert(RdxVal); 14901 } 14902 Pos += ReduxWidth; 14903 Start = Pos; 14904 ReduxWidth = llvm::bit_floor(NumReducedVals - Pos); 14905 AnyVectorized = true; 14906 } 14907 if (OptReusedScalars && !AnyVectorized) { 14908 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) { 14909 Value *RedVal = emitScaleForReusedOps(P.first, Builder, P.second); 14910 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14911 Value *OrigV = TrackedToOrig.find(P.first)->second; 14912 VectorizedVals.try_emplace(OrigV, P.second); 14913 } 14914 continue; 14915 } 14916 } 14917 if (VectorizedTree) { 14918 // Reorder operands of bool logical op in the natural order to avoid 14919 // possible problem with poison propagation. If not possible to reorder 14920 // (both operands are originally RHS), emit an extra freeze instruction 14921 // for the LHS operand. 14922 // I.e., if we have original code like this: 14923 // RedOp1 = select i1 ?, i1 LHS, i1 false 14924 // RedOp2 = select i1 RHS, i1 ?, i1 false 14925 14926 // Then, we swap LHS/RHS to create a new op that matches the poison 14927 // semantics of the original code. 14928 14929 // If we have original code like this and both values could be poison: 14930 // RedOp1 = select i1 ?, i1 LHS, i1 false 14931 // RedOp2 = select i1 ?, i1 RHS, i1 false 14932 14933 // Then, we must freeze LHS in the new op. 14934 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS, 14935 Instruction *RedOp1, 14936 Instruction *RedOp2, 14937 bool InitStep) { 14938 if (!AnyBoolLogicOp) 14939 return; 14940 if (isBoolLogicOp(RedOp1) && 14941 ((!InitStep && LHS == VectorizedTree) || 14942 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS))) 14943 return; 14944 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) || 14945 getRdxOperand(RedOp2, 0) == RHS || 14946 isGuaranteedNotToBePoison(RHS))) { 14947 std::swap(LHS, RHS); 14948 return; 14949 } 14950 if (LHS != VectorizedTree) 14951 LHS = Builder.CreateFreeze(LHS); 14952 }; 14953 // Finish the reduction. 14954 // Need to add extra arguments and not vectorized possible reduction 14955 // values. 14956 // Try to avoid dependencies between the scalar remainders after 14957 // reductions. 14958 auto FinalGen = 14959 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals, 14960 bool InitStep) { 14961 unsigned Sz = InstVals.size(); 14962 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 14963 Sz % 2); 14964 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 14965 Instruction *RedOp = InstVals[I + 1].first; 14966 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 14967 Value *RdxVal1 = InstVals[I].second; 14968 Value *StableRdxVal1 = RdxVal1; 14969 auto It1 = TrackedVals.find(RdxVal1); 14970 if (It1 != TrackedVals.end()) 14971 StableRdxVal1 = It1->second; 14972 Value *RdxVal2 = InstVals[I + 1].second; 14973 Value *StableRdxVal2 = RdxVal2; 14974 auto It2 = TrackedVals.find(RdxVal2); 14975 if (It2 != TrackedVals.end()) 14976 StableRdxVal2 = It2->second; 14977 // To prevent poison from leaking across what used to be 14978 // sequential, safe, scalar boolean logic operations, the 14979 // reduction operand must be frozen. 14980 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, 14981 RedOp, InitStep); 14982 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 14983 StableRdxVal2, "op.rdx", ReductionOps); 14984 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 14985 } 14986 if (Sz % 2 == 1) 14987 ExtraReds[Sz / 2] = InstVals.back(); 14988 return ExtraReds; 14989 }; 14990 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 14991 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot), 14992 VectorizedTree); 14993 SmallPtrSet<Value *, 8> Visited; 14994 for (ArrayRef<Value *> Candidates : ReducedVals) { 14995 for (Value *RdxVal : Candidates) { 14996 if (!Visited.insert(RdxVal).second) 14997 continue; 14998 unsigned NumOps = VectorizedVals.lookup(RdxVal); 14999 for (Instruction *RedOp : 15000 ArrayRef(ReducedValsToOps.find(RdxVal)->second) 15001 .drop_back(NumOps)) 15002 ExtraReductions.emplace_back(RedOp, RdxVal); 15003 } 15004 } 15005 for (auto &Pair : ExternallyUsedValues) { 15006 // Add each externally used value to the final reduction. 15007 for (auto *I : Pair.second) 15008 ExtraReductions.emplace_back(I, Pair.first); 15009 } 15010 // Iterate through all not-vectorized reduction values/extra arguments. 15011 bool InitStep = true; 15012 while (ExtraReductions.size() > 1) { 15013 VectorizedTree = ExtraReductions.front().second; 15014 SmallVector<std::pair<Instruction *, Value *>> NewReds = 15015 FinalGen(ExtraReductions, InitStep); 15016 ExtraReductions.swap(NewReds); 15017 InitStep = false; 15018 } 15019 VectorizedTree = ExtraReductions.front().second; 15020 15021 ReductionRoot->replaceAllUsesWith(VectorizedTree); 15022 15023 // The original scalar reduction is expected to have no remaining 15024 // uses outside the reduction tree itself. Assert that we got this 15025 // correct, replace internal uses with undef, and mark for eventual 15026 // deletion. 15027 #ifndef NDEBUG 15028 SmallSet<Value *, 4> IgnoreSet; 15029 for (ArrayRef<Value *> RdxOps : ReductionOps) 15030 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 15031 #endif 15032 for (ArrayRef<Value *> RdxOps : ReductionOps) { 15033 for (Value *Ignore : RdxOps) { 15034 if (!Ignore) 15035 continue; 15036 #ifndef NDEBUG 15037 for (auto *U : Ignore->users()) { 15038 assert(IgnoreSet.count(U) && 15039 "All users must be either in the reduction ops list."); 15040 } 15041 #endif 15042 if (!Ignore->use_empty()) { 15043 Value *Undef = UndefValue::get(Ignore->getType()); 15044 Ignore->replaceAllUsesWith(Undef); 15045 } 15046 V.eraseInstruction(cast<Instruction>(Ignore)); 15047 } 15048 } 15049 } else if (!CheckForReusedReductionOps) { 15050 for (ReductionOpsType &RdxOps : ReductionOps) 15051 for (Value *RdxOp : RdxOps) 15052 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 15053 } 15054 return VectorizedTree; 15055 } 15056 15057 private: 15058 /// Calculate the cost of a reduction. 15059 InstructionCost getReductionCost(TargetTransformInfo *TTI, 15060 ArrayRef<Value *> ReducedVals, 15061 bool IsCmpSelMinMax, unsigned ReduxWidth, 15062 FastMathFlags FMF) { 15063 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 15064 Type *ScalarTy = ReducedVals.front()->getType(); 15065 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 15066 InstructionCost VectorCost = 0, ScalarCost; 15067 // If all of the reduced values are constant, the vector cost is 0, since 15068 // the reduction value can be calculated at the compile time. 15069 bool AllConsts = allConstant(ReducedVals); 15070 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) { 15071 InstructionCost Cost = 0; 15072 // Scalar cost is repeated for N-1 elements. 15073 int Cnt = ReducedVals.size(); 15074 for (Value *RdxVal : ReducedVals) { 15075 if (Cnt == 1) 15076 break; 15077 --Cnt; 15078 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) { 15079 Cost += GenCostFn(); 15080 continue; 15081 } 15082 InstructionCost ScalarCost = 0; 15083 for (User *U : RdxVal->users()) { 15084 auto *RdxOp = cast<Instruction>(U); 15085 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) { 15086 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind); 15087 continue; 15088 } 15089 ScalarCost = InstructionCost::getInvalid(); 15090 break; 15091 } 15092 if (ScalarCost.isValid()) 15093 Cost += ScalarCost; 15094 else 15095 Cost += GenCostFn(); 15096 } 15097 return Cost; 15098 }; 15099 switch (RdxKind) { 15100 case RecurKind::Add: 15101 case RecurKind::Mul: 15102 case RecurKind::Or: 15103 case RecurKind::And: 15104 case RecurKind::Xor: 15105 case RecurKind::FAdd: 15106 case RecurKind::FMul: { 15107 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 15108 if (!AllConsts) 15109 VectorCost = 15110 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 15111 ScalarCost = EvaluateScalarCost([&]() { 15112 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 15113 }); 15114 break; 15115 } 15116 case RecurKind::FMax: 15117 case RecurKind::FMin: 15118 case RecurKind::FMaximum: 15119 case RecurKind::FMinimum: 15120 case RecurKind::SMax: 15121 case RecurKind::SMin: 15122 case RecurKind::UMax: 15123 case RecurKind::UMin: { 15124 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind); 15125 if (!AllConsts) 15126 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind); 15127 ScalarCost = EvaluateScalarCost([&]() { 15128 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF); 15129 return TTI->getIntrinsicInstrCost(ICA, CostKind); 15130 }); 15131 break; 15132 } 15133 default: 15134 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 15135 } 15136 15137 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 15138 << " for reduction of " << shortBundleName(ReducedVals) 15139 << " (It is a splitting reduction)\n"); 15140 return VectorCost - ScalarCost; 15141 } 15142 15143 /// Emit a horizontal reduction of the vectorized value. 15144 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 15145 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 15146 assert(VectorizedValue && "Need to have a vectorized tree node"); 15147 assert(isPowerOf2_32(ReduxWidth) && 15148 "We only handle power-of-two reductions for now"); 15149 assert(RdxKind != RecurKind::FMulAdd && 15150 "A call to the llvm.fmuladd intrinsic is not handled yet"); 15151 15152 ++NumVectorInstructions; 15153 return createSimpleTargetReduction(Builder, VectorizedValue, RdxKind); 15154 } 15155 15156 /// Emits optimized code for unique scalar value reused \p Cnt times. 15157 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15158 unsigned Cnt) { 15159 assert(IsSupportedHorRdxIdentityOp && 15160 "The optimization of matched scalar identity horizontal reductions " 15161 "must be supported."); 15162 switch (RdxKind) { 15163 case RecurKind::Add: { 15164 // res = mul vv, n 15165 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt); 15166 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of " 15167 << VectorizedValue << ". (HorRdx)\n"); 15168 return Builder.CreateMul(VectorizedValue, Scale); 15169 } 15170 case RecurKind::Xor: { 15171 // res = n % 2 ? 0 : vv 15172 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue 15173 << ". (HorRdx)\n"); 15174 if (Cnt % 2 == 0) 15175 return Constant::getNullValue(VectorizedValue->getType()); 15176 return VectorizedValue; 15177 } 15178 case RecurKind::FAdd: { 15179 // res = fmul v, n 15180 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt); 15181 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of " 15182 << VectorizedValue << ". (HorRdx)\n"); 15183 return Builder.CreateFMul(VectorizedValue, Scale); 15184 } 15185 case RecurKind::And: 15186 case RecurKind::Or: 15187 case RecurKind::SMax: 15188 case RecurKind::SMin: 15189 case RecurKind::UMax: 15190 case RecurKind::UMin: 15191 case RecurKind::FMax: 15192 case RecurKind::FMin: 15193 case RecurKind::FMaximum: 15194 case RecurKind::FMinimum: 15195 // res = vv 15196 return VectorizedValue; 15197 case RecurKind::Mul: 15198 case RecurKind::FMul: 15199 case RecurKind::FMulAdd: 15200 case RecurKind::IAnyOf: 15201 case RecurKind::FAnyOf: 15202 case RecurKind::None: 15203 llvm_unreachable("Unexpected reduction kind for repeated scalar."); 15204 } 15205 return nullptr; 15206 } 15207 15208 /// Emits actual operation for the scalar identity values, found during 15209 /// horizontal reduction analysis. 15210 Value *emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15211 ArrayRef<Value *> VL, 15212 const MapVector<Value *, unsigned> &SameValuesCounter, 15213 const DenseMap<Value *, Value *> &TrackedToOrig) { 15214 assert(IsSupportedHorRdxIdentityOp && 15215 "The optimization of matched scalar identity horizontal reductions " 15216 "must be supported."); 15217 switch (RdxKind) { 15218 case RecurKind::Add: { 15219 // root = mul prev_root, <1, 1, n, 1> 15220 SmallVector<Constant *> Vals; 15221 for (Value *V : VL) { 15222 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15223 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false)); 15224 } 15225 auto *Scale = ConstantVector::get(Vals); 15226 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of " 15227 << VectorizedValue << ". (HorRdx)\n"); 15228 return Builder.CreateMul(VectorizedValue, Scale); 15229 } 15230 case RecurKind::And: 15231 case RecurKind::Or: 15232 // No need for multiple or/and(s). 15233 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue 15234 << ". (HorRdx)\n"); 15235 return VectorizedValue; 15236 case RecurKind::SMax: 15237 case RecurKind::SMin: 15238 case RecurKind::UMax: 15239 case RecurKind::UMin: 15240 case RecurKind::FMax: 15241 case RecurKind::FMin: 15242 case RecurKind::FMaximum: 15243 case RecurKind::FMinimum: 15244 // No need for multiple min/max(s) of the same value. 15245 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue 15246 << ". (HorRdx)\n"); 15247 return VectorizedValue; 15248 case RecurKind::Xor: { 15249 // Replace values with even number of repeats with 0, since 15250 // x xor x = 0. 15251 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6, 15252 // 7>, if elements 4th and 6th elements have even number of repeats. 15253 SmallVector<int> Mask( 15254 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(), 15255 PoisonMaskElem); 15256 std::iota(Mask.begin(), Mask.end(), 0); 15257 bool NeedShuffle = false; 15258 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) { 15259 Value *V = VL[I]; 15260 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15261 if (Cnt % 2 == 0) { 15262 Mask[I] = VF; 15263 NeedShuffle = true; 15264 } 15265 } 15266 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I 15267 : Mask) dbgs() 15268 << I << " "; 15269 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n"); 15270 if (NeedShuffle) 15271 VectorizedValue = Builder.CreateShuffleVector( 15272 VectorizedValue, 15273 ConstantVector::getNullValue(VectorizedValue->getType()), Mask); 15274 return VectorizedValue; 15275 } 15276 case RecurKind::FAdd: { 15277 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0> 15278 SmallVector<Constant *> Vals; 15279 for (Value *V : VL) { 15280 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15281 Vals.push_back(ConstantFP::get(V->getType(), Cnt)); 15282 } 15283 auto *Scale = ConstantVector::get(Vals); 15284 return Builder.CreateFMul(VectorizedValue, Scale); 15285 } 15286 case RecurKind::Mul: 15287 case RecurKind::FMul: 15288 case RecurKind::FMulAdd: 15289 case RecurKind::IAnyOf: 15290 case RecurKind::FAnyOf: 15291 case RecurKind::None: 15292 llvm_unreachable("Unexpected reduction kind for reused scalars."); 15293 } 15294 return nullptr; 15295 } 15296 }; 15297 } // end anonymous namespace 15298 15299 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) { 15300 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 15301 return cast<FixedVectorType>(IE->getType())->getNumElements(); 15302 15303 unsigned AggregateSize = 1; 15304 auto *IV = cast<InsertValueInst>(InsertInst); 15305 Type *CurrentType = IV->getType(); 15306 do { 15307 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 15308 for (auto *Elt : ST->elements()) 15309 if (Elt != ST->getElementType(0)) // check homogeneity 15310 return std::nullopt; 15311 AggregateSize *= ST->getNumElements(); 15312 CurrentType = ST->getElementType(0); 15313 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 15314 AggregateSize *= AT->getNumElements(); 15315 CurrentType = AT->getElementType(); 15316 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 15317 AggregateSize *= VT->getNumElements(); 15318 return AggregateSize; 15319 } else if (CurrentType->isSingleValueType()) { 15320 return AggregateSize; 15321 } else { 15322 return std::nullopt; 15323 } 15324 } while (true); 15325 } 15326 15327 static void findBuildAggregate_rec(Instruction *LastInsertInst, 15328 TargetTransformInfo *TTI, 15329 SmallVectorImpl<Value *> &BuildVectorOpds, 15330 SmallVectorImpl<Value *> &InsertElts, 15331 unsigned OperandOffset) { 15332 do { 15333 Value *InsertedOperand = LastInsertInst->getOperand(1); 15334 std::optional<unsigned> OperandIndex = 15335 getInsertIndex(LastInsertInst, OperandOffset); 15336 if (!OperandIndex) 15337 return; 15338 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) { 15339 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 15340 BuildVectorOpds, InsertElts, *OperandIndex); 15341 15342 } else { 15343 BuildVectorOpds[*OperandIndex] = InsertedOperand; 15344 InsertElts[*OperandIndex] = LastInsertInst; 15345 } 15346 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 15347 } while (LastInsertInst != nullptr && 15348 isa<InsertValueInst, InsertElementInst>(LastInsertInst) && 15349 LastInsertInst->hasOneUse()); 15350 } 15351 15352 /// Recognize construction of vectors like 15353 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 15354 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 15355 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 15356 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 15357 /// starting from the last insertelement or insertvalue instruction. 15358 /// 15359 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 15360 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 15361 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 15362 /// 15363 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 15364 /// 15365 /// \return true if it matches. 15366 static bool findBuildAggregate(Instruction *LastInsertInst, 15367 TargetTransformInfo *TTI, 15368 SmallVectorImpl<Value *> &BuildVectorOpds, 15369 SmallVectorImpl<Value *> &InsertElts) { 15370 15371 assert((isa<InsertElementInst>(LastInsertInst) || 15372 isa<InsertValueInst>(LastInsertInst)) && 15373 "Expected insertelement or insertvalue instruction!"); 15374 15375 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 15376 "Expected empty result vectors!"); 15377 15378 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 15379 if (!AggregateSize) 15380 return false; 15381 BuildVectorOpds.resize(*AggregateSize); 15382 InsertElts.resize(*AggregateSize); 15383 15384 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 15385 llvm::erase(BuildVectorOpds, nullptr); 15386 llvm::erase(InsertElts, nullptr); 15387 if (BuildVectorOpds.size() >= 2) 15388 return true; 15389 15390 return false; 15391 } 15392 15393 /// Try and get a reduction instruction from a phi node. 15394 /// 15395 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 15396 /// if they come from either \p ParentBB or a containing loop latch. 15397 /// 15398 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 15399 /// if not possible. 15400 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P, 15401 BasicBlock *ParentBB, LoopInfo *LI) { 15402 // There are situations where the reduction value is not dominated by the 15403 // reduction phi. Vectorizing such cases has been reported to cause 15404 // miscompiles. See PR25787. 15405 auto DominatedReduxValue = [&](Value *R) { 15406 return isa<Instruction>(R) && 15407 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 15408 }; 15409 15410 Instruction *Rdx = nullptr; 15411 15412 // Return the incoming value if it comes from the same BB as the phi node. 15413 if (P->getIncomingBlock(0) == ParentBB) { 15414 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15415 } else if (P->getIncomingBlock(1) == ParentBB) { 15416 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15417 } 15418 15419 if (Rdx && DominatedReduxValue(Rdx)) 15420 return Rdx; 15421 15422 // Otherwise, check whether we have a loop latch to look at. 15423 Loop *BBL = LI->getLoopFor(ParentBB); 15424 if (!BBL) 15425 return nullptr; 15426 BasicBlock *BBLatch = BBL->getLoopLatch(); 15427 if (!BBLatch) 15428 return nullptr; 15429 15430 // There is a loop latch, return the incoming value if it comes from 15431 // that. This reduction pattern occasionally turns up. 15432 if (P->getIncomingBlock(0) == BBLatch) { 15433 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15434 } else if (P->getIncomingBlock(1) == BBLatch) { 15435 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15436 } 15437 15438 if (Rdx && DominatedReduxValue(Rdx)) 15439 return Rdx; 15440 15441 return nullptr; 15442 } 15443 15444 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 15445 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 15446 return true; 15447 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 15448 return true; 15449 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 15450 return true; 15451 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1)))) 15452 return true; 15453 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1)))) 15454 return true; 15455 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 15456 return true; 15457 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 15458 return true; 15459 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 15460 return true; 15461 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 15462 return true; 15463 return false; 15464 } 15465 15466 /// We could have an initial reduction that is not an add. 15467 /// r *= v1 + v2 + v3 + v4 15468 /// In such a case start looking for a tree rooted in the first '+'. 15469 /// \Returns the new root if found, which may be nullptr if not an instruction. 15470 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi, 15471 Instruction *Root) { 15472 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) || 15473 isa<IntrinsicInst>(Root)) && 15474 "Expected binop, select, or intrinsic for reduction matching"); 15475 Value *LHS = 15476 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root)); 15477 Value *RHS = 15478 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1); 15479 if (LHS == Phi) 15480 return dyn_cast<Instruction>(RHS); 15481 if (RHS == Phi) 15482 return dyn_cast<Instruction>(LHS); 15483 return nullptr; 15484 } 15485 15486 /// \p Returns the first operand of \p I that does not match \p Phi. If 15487 /// operand is not an instruction it returns nullptr. 15488 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) { 15489 Value *Op0 = nullptr; 15490 Value *Op1 = nullptr; 15491 if (!matchRdxBop(I, Op0, Op1)) 15492 return nullptr; 15493 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0); 15494 } 15495 15496 /// \Returns true if \p I is a candidate instruction for reduction vectorization. 15497 static bool isReductionCandidate(Instruction *I) { 15498 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value())); 15499 Value *B0 = nullptr, *B1 = nullptr; 15500 bool IsBinop = matchRdxBop(I, B0, B1); 15501 return IsBinop || IsSelect; 15502 } 15503 15504 bool SLPVectorizerPass::vectorizeHorReduction( 15505 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI, 15506 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) { 15507 if (!ShouldVectorizeHor) 15508 return false; 15509 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root); 15510 15511 if (Root->getParent() != BB || isa<PHINode>(Root)) 15512 return false; 15513 15514 // If we can find a secondary reduction root, use that instead. 15515 auto SelectRoot = [&]() { 15516 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) && 15517 HorizontalReduction::getRdxKind(Root) != RecurKind::None) 15518 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root)) 15519 return NewRoot; 15520 return Root; 15521 }; 15522 15523 // Start analysis starting from Root instruction. If horizontal reduction is 15524 // found, try to vectorize it. If it is not a horizontal reduction or 15525 // vectorization is not possible or not effective, and currently analyzed 15526 // instruction is a binary operation, try to vectorize the operands, using 15527 // pre-order DFS traversal order. If the operands were not vectorized, repeat 15528 // the same procedure considering each operand as a possible root of the 15529 // horizontal reduction. 15530 // Interrupt the process if the Root instruction itself was vectorized or all 15531 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 15532 // If a horizintal reduction was not matched or vectorized we collect 15533 // instructions for possible later attempts for vectorization. 15534 std::queue<std::pair<Instruction *, unsigned>> Stack; 15535 Stack.emplace(SelectRoot(), 0); 15536 SmallPtrSet<Value *, 8> VisitedInstrs; 15537 bool Res = false; 15538 auto &&TryToReduce = [this, TTI, &R](Instruction *Inst) -> Value * { 15539 if (R.isAnalyzedReductionRoot(Inst)) 15540 return nullptr; 15541 if (!isReductionCandidate(Inst)) 15542 return nullptr; 15543 HorizontalReduction HorRdx; 15544 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI)) 15545 return nullptr; 15546 return HorRdx.tryToReduce(R, TTI, *TLI); 15547 }; 15548 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) { 15549 if (TryOperandsAsNewSeeds && FutureSeed == Root) { 15550 FutureSeed = getNonPhiOperand(Root, P); 15551 if (!FutureSeed) 15552 return false; 15553 } 15554 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their 15555 // analysis is done separately. 15556 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed)) 15557 PostponedInsts.push_back(FutureSeed); 15558 return true; 15559 }; 15560 15561 while (!Stack.empty()) { 15562 Instruction *Inst; 15563 unsigned Level; 15564 std::tie(Inst, Level) = Stack.front(); 15565 Stack.pop(); 15566 // Do not try to analyze instruction that has already been vectorized. 15567 // This may happen when we vectorize instruction operands on a previous 15568 // iteration while stack was populated before that happened. 15569 if (R.isDeleted(Inst)) 15570 continue; 15571 if (Value *VectorizedV = TryToReduce(Inst)) { 15572 Res = true; 15573 if (auto *I = dyn_cast<Instruction>(VectorizedV)) { 15574 // Try to find another reduction. 15575 Stack.emplace(I, Level); 15576 continue; 15577 } 15578 } else { 15579 // We could not vectorize `Inst` so try to use it as a future seed. 15580 if (!TryAppendToPostponedInsts(Inst)) { 15581 assert(Stack.empty() && "Expected empty stack"); 15582 break; 15583 } 15584 } 15585 15586 // Try to vectorize operands. 15587 // Continue analysis for the instruction from the same basic block only to 15588 // save compile time. 15589 if (++Level < RecursionMaxDepth) 15590 for (auto *Op : Inst->operand_values()) 15591 if (VisitedInstrs.insert(Op).second) 15592 if (auto *I = dyn_cast<Instruction>(Op)) 15593 // Do not try to vectorize CmpInst operands, this is done 15594 // separately. 15595 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 15596 !R.isDeleted(I) && I->getParent() == BB) 15597 Stack.emplace(I, Level); 15598 } 15599 return Res; 15600 } 15601 15602 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root, 15603 BasicBlock *BB, BoUpSLP &R, 15604 TargetTransformInfo *TTI) { 15605 SmallVector<WeakTrackingVH> PostponedInsts; 15606 bool Res = vectorizeHorReduction(P, Root, BB, R, TTI, PostponedInsts); 15607 Res |= tryToVectorize(PostponedInsts, R); 15608 return Res; 15609 } 15610 15611 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts, 15612 BoUpSLP &R) { 15613 bool Res = false; 15614 for (Value *V : Insts) 15615 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst)) 15616 Res |= tryToVectorize(Inst, R); 15617 return Res; 15618 } 15619 15620 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 15621 BasicBlock *BB, BoUpSLP &R) { 15622 if (!R.canMapToVector(IVI->getType())) 15623 return false; 15624 15625 SmallVector<Value *, 16> BuildVectorOpds; 15626 SmallVector<Value *, 16> BuildVectorInsts; 15627 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 15628 return false; 15629 15630 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 15631 // Aggregate value is unlikely to be processed in vector register. 15632 return tryToVectorizeList(BuildVectorOpds, R); 15633 } 15634 15635 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 15636 BasicBlock *BB, BoUpSLP &R) { 15637 SmallVector<Value *, 16> BuildVectorInsts; 15638 SmallVector<Value *, 16> BuildVectorOpds; 15639 SmallVector<int> Mask; 15640 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 15641 (llvm::all_of( 15642 BuildVectorOpds, 15643 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 15644 isFixedVectorShuffle(BuildVectorOpds, Mask))) 15645 return false; 15646 15647 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 15648 return tryToVectorizeList(BuildVectorInsts, R); 15649 } 15650 15651 template <typename T> 15652 static bool tryToVectorizeSequence( 15653 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator, 15654 function_ref<bool(T *, T *)> AreCompatible, 15655 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 15656 bool MaxVFOnly, BoUpSLP &R) { 15657 bool Changed = false; 15658 // Sort by type, parent, operands. 15659 stable_sort(Incoming, Comparator); 15660 15661 // Try to vectorize elements base on their type. 15662 SmallVector<T *> Candidates; 15663 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 15664 // Look for the next elements with the same type, parent and operand 15665 // kinds. 15666 auto *SameTypeIt = IncIt; 15667 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 15668 ++SameTypeIt; 15669 15670 // Try to vectorize them. 15671 unsigned NumElts = (SameTypeIt - IncIt); 15672 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 15673 << NumElts << ")\n"); 15674 // The vectorization is a 3-state attempt: 15675 // 1. Try to vectorize instructions with the same/alternate opcodes with the 15676 // size of maximal register at first. 15677 // 2. Try to vectorize remaining instructions with the same type, if 15678 // possible. This may result in the better vectorization results rather than 15679 // if we try just to vectorize instructions with the same/alternate opcodes. 15680 // 3. Final attempt to try to vectorize all instructions with the 15681 // same/alternate ops only, this may result in some extra final 15682 // vectorization. 15683 if (NumElts > 1 && 15684 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), MaxVFOnly)) { 15685 // Success start over because instructions might have been changed. 15686 Changed = true; 15687 } else { 15688 /// \Returns the minimum number of elements that we will attempt to 15689 /// vectorize. 15690 auto GetMinNumElements = [&R](Value *V) { 15691 unsigned EltSize = R.getVectorElementSize(V); 15692 return std::max(2U, R.getMaxVecRegSize() / EltSize); 15693 }; 15694 if (NumElts < GetMinNumElements(*IncIt) && 15695 (Candidates.empty() || 15696 Candidates.front()->getType() == (*IncIt)->getType())) { 15697 Candidates.append(IncIt, std::next(IncIt, NumElts)); 15698 } 15699 } 15700 // Final attempt to vectorize instructions with the same types. 15701 if (Candidates.size() > 1 && 15702 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 15703 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) { 15704 // Success start over because instructions might have been changed. 15705 Changed = true; 15706 } else if (MaxVFOnly) { 15707 // Try to vectorize using small vectors. 15708 for (auto *It = Candidates.begin(), *End = Candidates.end(); 15709 It != End;) { 15710 auto *SameTypeIt = It; 15711 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 15712 ++SameTypeIt; 15713 unsigned NumElts = (SameTypeIt - It); 15714 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(It, NumElts), 15715 /*MaxVFOnly=*/false)) 15716 Changed = true; 15717 It = SameTypeIt; 15718 } 15719 } 15720 Candidates.clear(); 15721 } 15722 15723 // Start over at the next instruction of a different type (or the end). 15724 IncIt = SameTypeIt; 15725 } 15726 return Changed; 15727 } 15728 15729 /// Compare two cmp instructions. If IsCompatibility is true, function returns 15730 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 15731 /// operands. If IsCompatibility is false, function implements strict weak 15732 /// ordering relation between two cmp instructions, returning true if the first 15733 /// instruction is "less" than the second, i.e. its predicate is less than the 15734 /// predicate of the second or the operands IDs are less than the operands IDs 15735 /// of the second cmp instruction. 15736 template <bool IsCompatibility> 15737 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI, 15738 const DominatorTree &DT) { 15739 assert(isValidElementType(V->getType()) && 15740 isValidElementType(V2->getType()) && 15741 "Expected valid element types only."); 15742 if (V == V2) 15743 return IsCompatibility; 15744 auto *CI1 = cast<CmpInst>(V); 15745 auto *CI2 = cast<CmpInst>(V2); 15746 if (CI1->getOperand(0)->getType()->getTypeID() < 15747 CI2->getOperand(0)->getType()->getTypeID()) 15748 return !IsCompatibility; 15749 if (CI1->getOperand(0)->getType()->getTypeID() > 15750 CI2->getOperand(0)->getType()->getTypeID()) 15751 return false; 15752 CmpInst::Predicate Pred1 = CI1->getPredicate(); 15753 CmpInst::Predicate Pred2 = CI2->getPredicate(); 15754 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 15755 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 15756 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 15757 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 15758 if (BasePred1 < BasePred2) 15759 return !IsCompatibility; 15760 if (BasePred1 > BasePred2) 15761 return false; 15762 // Compare operands. 15763 bool CI1Preds = Pred1 == BasePred1; 15764 bool CI2Preds = Pred2 == BasePred1; 15765 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 15766 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1); 15767 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1); 15768 if (Op1 == Op2) 15769 continue; 15770 if (Op1->getValueID() < Op2->getValueID()) 15771 return !IsCompatibility; 15772 if (Op1->getValueID() > Op2->getValueID()) 15773 return false; 15774 if (auto *I1 = dyn_cast<Instruction>(Op1)) 15775 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 15776 if (IsCompatibility) { 15777 if (I1->getParent() != I2->getParent()) 15778 return false; 15779 } else { 15780 // Try to compare nodes with same parent. 15781 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent()); 15782 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent()); 15783 if (!NodeI1) 15784 return NodeI2 != nullptr; 15785 if (!NodeI2) 15786 return false; 15787 assert((NodeI1 == NodeI2) == 15788 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15789 "Different nodes should have different DFS numbers"); 15790 if (NodeI1 != NodeI2) 15791 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15792 } 15793 InstructionsState S = getSameOpcode({I1, I2}, TLI); 15794 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle())) 15795 continue; 15796 if (IsCompatibility) 15797 return false; 15798 if (I1->getOpcode() != I2->getOpcode()) 15799 return I1->getOpcode() < I2->getOpcode(); 15800 } 15801 } 15802 return IsCompatibility; 15803 } 15804 15805 template <typename ItT> 15806 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts, 15807 BasicBlock *BB, BoUpSLP &R) { 15808 bool Changed = false; 15809 // Try to find reductions first. 15810 for (CmpInst *I : CmpInsts) { 15811 if (R.isDeleted(I)) 15812 continue; 15813 for (Value *Op : I->operands()) 15814 if (auto *RootOp = dyn_cast<Instruction>(Op)) 15815 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R, TTI); 15816 } 15817 // Try to vectorize operands as vector bundles. 15818 for (CmpInst *I : CmpInsts) { 15819 if (R.isDeleted(I)) 15820 continue; 15821 Changed |= tryToVectorize(I, R); 15822 } 15823 // Try to vectorize list of compares. 15824 // Sort by type, compare predicate, etc. 15825 auto CompareSorter = [&](Value *V, Value *V2) { 15826 if (V == V2) 15827 return false; 15828 return compareCmp<false>(V, V2, *TLI, *DT); 15829 }; 15830 15831 auto AreCompatibleCompares = [&](Value *V1, Value *V2) { 15832 if (V1 == V2) 15833 return true; 15834 return compareCmp<true>(V1, V2, *TLI, *DT); 15835 }; 15836 15837 SmallVector<Value *> Vals; 15838 for (Instruction *V : CmpInsts) 15839 if (!R.isDeleted(V) && isValidElementType(V->getType())) 15840 Vals.push_back(V); 15841 if (Vals.size() <= 1) 15842 return Changed; 15843 Changed |= tryToVectorizeSequence<Value>( 15844 Vals, CompareSorter, AreCompatibleCompares, 15845 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 15846 // Exclude possible reductions from other blocks. 15847 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) { 15848 return any_of(V->users(), [V](User *U) { 15849 auto *Select = dyn_cast<SelectInst>(U); 15850 return Select && 15851 Select->getParent() != cast<Instruction>(V)->getParent(); 15852 }); 15853 }); 15854 if (ArePossiblyReducedInOtherBlock) 15855 return false; 15856 return tryToVectorizeList(Candidates, R, MaxVFOnly); 15857 }, 15858 /*MaxVFOnly=*/true, R); 15859 return Changed; 15860 } 15861 15862 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions, 15863 BasicBlock *BB, BoUpSLP &R) { 15864 assert(all_of(Instructions, 15865 [](auto *I) { 15866 return isa<InsertElementInst, InsertValueInst>(I); 15867 }) && 15868 "This function only accepts Insert instructions"); 15869 bool OpsChanged = false; 15870 SmallVector<WeakTrackingVH> PostponedInsts; 15871 // pass1 - try to vectorize reductions only 15872 for (auto *I : reverse(Instructions)) { 15873 if (R.isDeleted(I)) 15874 continue; 15875 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts); 15876 } 15877 // pass2 - try to match and vectorize a buildvector sequence. 15878 for (auto *I : reverse(Instructions)) { 15879 if (R.isDeleted(I) || isa<CmpInst>(I)) 15880 continue; 15881 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 15882 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 15883 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 15884 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 15885 } 15886 } 15887 // Now try to vectorize postponed instructions. 15888 OpsChanged |= tryToVectorize(PostponedInsts, R); 15889 15890 Instructions.clear(); 15891 return OpsChanged; 15892 } 15893 15894 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 15895 bool Changed = false; 15896 SmallVector<Value *, 4> Incoming; 15897 SmallPtrSet<Value *, 16> VisitedInstrs; 15898 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 15899 // node. Allows better to identify the chains that can be vectorized in the 15900 // better way. 15901 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 15902 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 15903 assert(isValidElementType(V1->getType()) && 15904 isValidElementType(V2->getType()) && 15905 "Expected vectorizable types only."); 15906 // It is fine to compare type IDs here, since we expect only vectorizable 15907 // types, like ints, floats and pointers, we don't care about other type. 15908 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 15909 return true; 15910 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 15911 return false; 15912 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15913 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15914 if (Opcodes1.size() < Opcodes2.size()) 15915 return true; 15916 if (Opcodes1.size() > Opcodes2.size()) 15917 return false; 15918 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15919 // Undefs are compatible with any other value. 15920 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 15921 if (isa<Instruction>(Opcodes1[I])) 15922 return true; 15923 if (isa<Instruction>(Opcodes2[I])) 15924 return false; 15925 if (isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I])) 15926 return true; 15927 if (isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I])) 15928 return false; 15929 if (isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I])) 15930 continue; 15931 return isa<UndefValue>(Opcodes2[I]); 15932 } 15933 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15934 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15935 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 15936 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 15937 if (!NodeI1) 15938 return NodeI2 != nullptr; 15939 if (!NodeI2) 15940 return false; 15941 assert((NodeI1 == NodeI2) == 15942 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15943 "Different nodes should have different DFS numbers"); 15944 if (NodeI1 != NodeI2) 15945 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15946 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15947 if (S.getOpcode() && !S.isAltShuffle()) 15948 continue; 15949 return I1->getOpcode() < I2->getOpcode(); 15950 } 15951 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15952 return Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 15953 if (isa<Instruction>(Opcodes1[I])) 15954 return true; 15955 if (isa<Instruction>(Opcodes2[I])) 15956 return false; 15957 if (isa<Constant>(Opcodes1[I])) 15958 return true; 15959 if (isa<Constant>(Opcodes2[I])) 15960 return false; 15961 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 15962 return true; 15963 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 15964 return false; 15965 } 15966 return false; 15967 }; 15968 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) { 15969 if (V1 == V2) 15970 return true; 15971 if (V1->getType() != V2->getType()) 15972 return false; 15973 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15974 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15975 if (Opcodes1.size() != Opcodes2.size()) 15976 return false; 15977 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15978 // Undefs are compatible with any other value. 15979 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 15980 continue; 15981 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15982 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15983 if (I1->getParent() != I2->getParent()) 15984 return false; 15985 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15986 if (S.getOpcode()) 15987 continue; 15988 return false; 15989 } 15990 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15991 continue; 15992 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 15993 return false; 15994 } 15995 return true; 15996 }; 15997 15998 bool HaveVectorizedPhiNodes = false; 15999 do { 16000 // Collect the incoming values from the PHIs. 16001 Incoming.clear(); 16002 for (Instruction &I : *BB) { 16003 PHINode *P = dyn_cast<PHINode>(&I); 16004 if (!P) 16005 break; 16006 16007 // No need to analyze deleted, vectorized and non-vectorizable 16008 // instructions. 16009 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 16010 isValidElementType(P->getType())) 16011 Incoming.push_back(P); 16012 } 16013 16014 if (Incoming.size() <= 1) 16015 break; 16016 16017 // Find the corresponding non-phi nodes for better matching when trying to 16018 // build the tree. 16019 for (Value *V : Incoming) { 16020 SmallVectorImpl<Value *> &Opcodes = 16021 PHIToOpcodes.try_emplace(V).first->getSecond(); 16022 if (!Opcodes.empty()) 16023 continue; 16024 SmallVector<Value *, 4> Nodes(1, V); 16025 SmallPtrSet<Value *, 4> Visited; 16026 while (!Nodes.empty()) { 16027 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 16028 if (!Visited.insert(PHI).second) 16029 continue; 16030 for (Value *V : PHI->incoming_values()) { 16031 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 16032 Nodes.push_back(PHI1); 16033 continue; 16034 } 16035 Opcodes.emplace_back(V); 16036 } 16037 } 16038 } 16039 16040 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 16041 Incoming, PHICompare, AreCompatiblePHIs, 16042 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 16043 return tryToVectorizeList(Candidates, R, MaxVFOnly); 16044 }, 16045 /*MaxVFOnly=*/true, R); 16046 Changed |= HaveVectorizedPhiNodes; 16047 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 16048 } while (HaveVectorizedPhiNodes); 16049 16050 VisitedInstrs.clear(); 16051 16052 InstSetVector PostProcessInserts; 16053 SmallSetVector<CmpInst *, 8> PostProcessCmps; 16054 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true 16055 // also vectorizes `PostProcessCmps`. 16056 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) { 16057 bool Changed = vectorizeInserts(PostProcessInserts, BB, R); 16058 if (VectorizeCmps) { 16059 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R); 16060 PostProcessCmps.clear(); 16061 } 16062 PostProcessInserts.clear(); 16063 return Changed; 16064 }; 16065 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`. 16066 auto IsInPostProcessInstrs = [&](Instruction *I) { 16067 if (auto *Cmp = dyn_cast<CmpInst>(I)) 16068 return PostProcessCmps.contains(Cmp); 16069 return isa<InsertElementInst, InsertValueInst>(I) && 16070 PostProcessInserts.contains(I); 16071 }; 16072 // Returns true if `I` is an instruction without users, like terminator, or 16073 // function call with ignored return value, store. Ignore unused instructions 16074 // (basing on instruction type, except for CallInst and InvokeInst). 16075 auto HasNoUsers = [](Instruction *I) { 16076 return I->use_empty() && 16077 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I)); 16078 }; 16079 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) { 16080 // Skip instructions with scalable type. The num of elements is unknown at 16081 // compile-time for scalable type. 16082 if (isa<ScalableVectorType>(It->getType())) 16083 continue; 16084 16085 // Skip instructions marked for the deletion. 16086 if (R.isDeleted(&*It)) 16087 continue; 16088 // We may go through BB multiple times so skip the one we have checked. 16089 if (!VisitedInstrs.insert(&*It).second) { 16090 if (HasNoUsers(&*It) && 16091 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) { 16092 // We would like to start over since some instructions are deleted 16093 // and the iterator may become invalid value. 16094 Changed = true; 16095 It = BB->begin(); 16096 E = BB->end(); 16097 } 16098 continue; 16099 } 16100 16101 if (isa<DbgInfoIntrinsic>(It)) 16102 continue; 16103 16104 // Try to vectorize reductions that use PHINodes. 16105 if (PHINode *P = dyn_cast<PHINode>(It)) { 16106 // Check that the PHI is a reduction PHI. 16107 if (P->getNumIncomingValues() == 2) { 16108 // Try to match and vectorize a horizontal reduction. 16109 Instruction *Root = getReductionInstr(DT, P, BB, LI); 16110 if (Root && vectorizeRootInstruction(P, Root, BB, R, TTI)) { 16111 Changed = true; 16112 It = BB->begin(); 16113 E = BB->end(); 16114 continue; 16115 } 16116 } 16117 // Try to vectorize the incoming values of the PHI, to catch reductions 16118 // that feed into PHIs. 16119 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 16120 // Skip if the incoming block is the current BB for now. Also, bypass 16121 // unreachable IR for efficiency and to avoid crashing. 16122 // TODO: Collect the skipped incoming values and try to vectorize them 16123 // after processing BB. 16124 if (BB == P->getIncomingBlock(I) || 16125 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 16126 continue; 16127 16128 // Postponed instructions should not be vectorized here, delay their 16129 // vectorization. 16130 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I)); 16131 PI && !IsInPostProcessInstrs(PI)) 16132 Changed |= vectorizeRootInstruction(nullptr, PI, 16133 P->getIncomingBlock(I), R, TTI); 16134 } 16135 continue; 16136 } 16137 16138 if (HasNoUsers(&*It)) { 16139 bool OpsChanged = false; 16140 auto *SI = dyn_cast<StoreInst>(It); 16141 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI; 16142 if (SI) { 16143 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand())); 16144 // Try to vectorize chain in store, if this is the only store to the 16145 // address in the block. 16146 // TODO: This is just a temporarily solution to save compile time. Need 16147 // to investigate if we can safely turn on slp-vectorize-hor-store 16148 // instead to allow lookup for reduction chains in all non-vectorized 16149 // stores (need to check side effects and compile time). 16150 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) && 16151 SI->getValueOperand()->hasOneUse(); 16152 } 16153 if (TryToVectorizeRoot) { 16154 for (auto *V : It->operand_values()) { 16155 // Postponed instructions should not be vectorized here, delay their 16156 // vectorization. 16157 if (auto *VI = dyn_cast<Instruction>(V); 16158 VI && !IsInPostProcessInstrs(VI)) 16159 // Try to match and vectorize a horizontal reduction. 16160 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R, TTI); 16161 } 16162 } 16163 // Start vectorization of post-process list of instructions from the 16164 // top-tree instructions to try to vectorize as many instructions as 16165 // possible. 16166 OpsChanged |= 16167 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator()); 16168 if (OpsChanged) { 16169 // We would like to start over since some instructions are deleted 16170 // and the iterator may become invalid value. 16171 Changed = true; 16172 It = BB->begin(); 16173 E = BB->end(); 16174 continue; 16175 } 16176 } 16177 16178 if (isa<InsertElementInst, InsertValueInst>(It)) 16179 PostProcessInserts.insert(&*It); 16180 else if (isa<CmpInst>(It)) 16181 PostProcessCmps.insert(cast<CmpInst>(&*It)); 16182 } 16183 16184 return Changed; 16185 } 16186 16187 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 16188 auto Changed = false; 16189 for (auto &Entry : GEPs) { 16190 // If the getelementptr list has fewer than two elements, there's nothing 16191 // to do. 16192 if (Entry.second.size() < 2) 16193 continue; 16194 16195 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 16196 << Entry.second.size() << ".\n"); 16197 16198 // Process the GEP list in chunks suitable for the target's supported 16199 // vector size. If a vector register can't hold 1 element, we are done. We 16200 // are trying to vectorize the index computations, so the maximum number of 16201 // elements is based on the size of the index expression, rather than the 16202 // size of the GEP itself (the target's pointer size). 16203 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 16204 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 16205 if (MaxVecRegSize < EltSize) 16206 continue; 16207 16208 unsigned MaxElts = MaxVecRegSize / EltSize; 16209 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 16210 auto Len = std::min<unsigned>(BE - BI, MaxElts); 16211 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 16212 16213 // Initialize a set a candidate getelementptrs. Note that we use a 16214 // SetVector here to preserve program order. If the index computations 16215 // are vectorizable and begin with loads, we want to minimize the chance 16216 // of having to reorder them later. 16217 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 16218 16219 // Some of the candidates may have already been vectorized after we 16220 // initially collected them. If so, they are marked as deleted, so remove 16221 // them from the set of candidates. 16222 Candidates.remove_if( 16223 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 16224 16225 // Remove from the set of candidates all pairs of getelementptrs with 16226 // constant differences. Such getelementptrs are likely not good 16227 // candidates for vectorization in a bottom-up phase since one can be 16228 // computed from the other. We also ensure all candidate getelementptr 16229 // indices are unique. 16230 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 16231 auto *GEPI = GEPList[I]; 16232 if (!Candidates.count(GEPI)) 16233 continue; 16234 auto *SCEVI = SE->getSCEV(GEPList[I]); 16235 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 16236 auto *GEPJ = GEPList[J]; 16237 auto *SCEVJ = SE->getSCEV(GEPList[J]); 16238 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 16239 Candidates.remove(GEPI); 16240 Candidates.remove(GEPJ); 16241 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 16242 Candidates.remove(GEPJ); 16243 } 16244 } 16245 } 16246 16247 // We break out of the above computation as soon as we know there are 16248 // fewer than two candidates remaining. 16249 if (Candidates.size() < 2) 16250 continue; 16251 16252 // Add the single, non-constant index of each candidate to the bundle. We 16253 // ensured the indices met these constraints when we originally collected 16254 // the getelementptrs. 16255 SmallVector<Value *, 16> Bundle(Candidates.size()); 16256 auto BundleIndex = 0u; 16257 for (auto *V : Candidates) { 16258 auto *GEP = cast<GetElementPtrInst>(V); 16259 auto *GEPIdx = GEP->idx_begin()->get(); 16260 assert(GEP->getNumIndices() == 1 && !isa<Constant>(GEPIdx)); 16261 Bundle[BundleIndex++] = GEPIdx; 16262 } 16263 16264 // Try and vectorize the indices. We are currently only interested in 16265 // gather-like cases of the form: 16266 // 16267 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 16268 // 16269 // where the loads of "a", the loads of "b", and the subtractions can be 16270 // performed in parallel. It's likely that detecting this pattern in a 16271 // bottom-up phase will be simpler and less costly than building a 16272 // full-blown top-down phase beginning at the consecutive loads. 16273 Changed |= tryToVectorizeList(Bundle, R); 16274 } 16275 } 16276 return Changed; 16277 } 16278 16279 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 16280 bool Changed = false; 16281 // Sort by type, base pointers and values operand. Value operands must be 16282 // compatible (have the same opcode, same parent), otherwise it is 16283 // definitely not profitable to try to vectorize them. 16284 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 16285 if (V->getValueOperand()->getType()->getTypeID() < 16286 V2->getValueOperand()->getType()->getTypeID()) 16287 return true; 16288 if (V->getValueOperand()->getType()->getTypeID() > 16289 V2->getValueOperand()->getType()->getTypeID()) 16290 return false; 16291 if (V->getPointerOperandType()->getTypeID() < 16292 V2->getPointerOperandType()->getTypeID()) 16293 return true; 16294 if (V->getPointerOperandType()->getTypeID() > 16295 V2->getPointerOperandType()->getTypeID()) 16296 return false; 16297 // UndefValues are compatible with all other values. 16298 if (isa<UndefValue>(V->getValueOperand()) || 16299 isa<UndefValue>(V2->getValueOperand())) 16300 return false; 16301 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 16302 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16303 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 16304 DT->getNode(I1->getParent()); 16305 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 16306 DT->getNode(I2->getParent()); 16307 assert(NodeI1 && "Should only process reachable instructions"); 16308 assert(NodeI2 && "Should only process reachable instructions"); 16309 assert((NodeI1 == NodeI2) == 16310 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 16311 "Different nodes should have different DFS numbers"); 16312 if (NodeI1 != NodeI2) 16313 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 16314 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16315 if (S.getOpcode()) 16316 return false; 16317 return I1->getOpcode() < I2->getOpcode(); 16318 } 16319 if (isa<Constant>(V->getValueOperand()) && 16320 isa<Constant>(V2->getValueOperand())) 16321 return false; 16322 return V->getValueOperand()->getValueID() < 16323 V2->getValueOperand()->getValueID(); 16324 }; 16325 16326 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) { 16327 if (V1 == V2) 16328 return true; 16329 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType()) 16330 return false; 16331 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 16332 return false; 16333 // Undefs are compatible with any other value. 16334 if (isa<UndefValue>(V1->getValueOperand()) || 16335 isa<UndefValue>(V2->getValueOperand())) 16336 return true; 16337 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 16338 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16339 if (I1->getParent() != I2->getParent()) 16340 return false; 16341 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16342 return S.getOpcode() > 0; 16343 } 16344 if (isa<Constant>(V1->getValueOperand()) && 16345 isa<Constant>(V2->getValueOperand())) 16346 return true; 16347 return V1->getValueOperand()->getValueID() == 16348 V2->getValueOperand()->getValueID(); 16349 }; 16350 16351 // Attempt to sort and vectorize each of the store-groups. 16352 for (auto &Pair : Stores) { 16353 if (Pair.second.size() < 2) 16354 continue; 16355 16356 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 16357 << Pair.second.size() << ".\n"); 16358 16359 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 16360 continue; 16361 16362 // Reverse stores to do bottom-to-top analysis. This is important if the 16363 // values are stores to the same addresses several times, in this case need 16364 // to follow the stores order (reversed to meet the memory dependecies). 16365 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(), 16366 Pair.second.rend()); 16367 Changed |= tryToVectorizeSequence<StoreInst>( 16368 ReversedStores, StoreSorter, AreCompatibleStores, 16369 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 16370 return vectorizeStores(Candidates, R); 16371 }, 16372 /*MaxVFOnly=*/false, R); 16373 } 16374 return Changed; 16375 } 16376