1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/PriorityQueue.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetOperations.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/ConstantFolding.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/IVDescriptors.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #ifdef EXPENSIVE_CHECKS 73 #include "llvm/IR/Verifier.h" 74 #endif 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/LoopUtils.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <optional> 96 #include <set> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 static cl::opt<bool> 111 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 112 cl::desc("Run the SLP vectorization passes")); 113 114 static cl::opt<int> 115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 116 cl::desc("Only vectorize if you gain more than this " 117 "number ")); 118 119 static cl::opt<bool> 120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 121 cl::desc("Attempt to vectorize horizontal reductions")); 122 123 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 125 cl::desc( 126 "Attempt to vectorize horizontal reductions feeding into a store")); 127 128 // NOTE: If AllowHorRdxIdenityOptimization is true, the optimization will run 129 // even if we match a reduction but do not vectorize in the end. 130 static cl::opt<bool> AllowHorRdxIdenityOptimization( 131 "slp-optimize-identity-hor-reduction-ops", cl::init(true), cl::Hidden, 132 cl::desc("Allow optimization of original scalar identity operations on " 133 "matched horizontal reductions.")); 134 135 static cl::opt<int> 136 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> 140 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 141 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 142 143 /// Limits the size of scheduling regions in a block. 144 /// It avoid long compile times for _very_ large blocks where vector 145 /// instructions are spread over a wide range. 146 /// This limit is way higher than needed by real-world functions. 147 static cl::opt<int> 148 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 149 cl::desc("Limit the size of the SLP scheduling region per block")); 150 151 static cl::opt<int> MinVectorRegSizeOption( 152 "slp-min-reg-size", cl::init(128), cl::Hidden, 153 cl::desc("Attempt to vectorize for this register size in bits")); 154 155 static cl::opt<unsigned> RecursionMaxDepth( 156 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 157 cl::desc("Limit the recursion depth when building a vectorizable tree")); 158 159 static cl::opt<unsigned> MinTreeSize( 160 "slp-min-tree-size", cl::init(3), cl::Hidden, 161 cl::desc("Only vectorize small trees if they are fully vectorizable")); 162 163 // The maximum depth that the look-ahead score heuristic will explore. 164 // The higher this value, the higher the compilation time overhead. 165 static cl::opt<int> LookAheadMaxDepth( 166 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 167 cl::desc("The maximum look-ahead depth for operand reordering scores")); 168 169 // The maximum depth that the look-ahead score heuristic will explore 170 // when it probing among candidates for vectorization tree roots. 171 // The higher this value, the higher the compilation time overhead but unlike 172 // similar limit for operands ordering this is less frequently used, hence 173 // impact of higher value is less noticeable. 174 static cl::opt<int> RootLookAheadMaxDepth( 175 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 176 cl::desc("The maximum look-ahead depth for searching best rooting option")); 177 178 static cl::opt<bool> 179 ViewSLPTree("view-slp-tree", cl::Hidden, 180 cl::desc("Display the SLP trees with Graphviz")); 181 182 // Limit the number of alias checks. The limit is chosen so that 183 // it has no negative effect on the llvm benchmarks. 184 static const unsigned AliasedCheckLimit = 10; 185 186 // Another limit for the alias checks: The maximum distance between load/store 187 // instructions where alias checks are done. 188 // This limit is useful for very large basic blocks. 189 static const unsigned MaxMemDepDistance = 160; 190 191 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 192 /// regions to be handled. 193 static const int MinScheduleRegionSize = 16; 194 195 /// Predicate for the element types that the SLP vectorizer supports. 196 /// 197 /// The most important thing to filter here are types which are invalid in LLVM 198 /// vectors. We also filter target specific types which have absolutely no 199 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 200 /// avoids spending time checking the cost model and realizing that they will 201 /// be inevitably scalarized. 202 static bool isValidElementType(Type *Ty) { 203 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 204 !Ty->isPPC_FP128Ty(); 205 } 206 207 /// \returns True if the value is a constant (but not globals/constant 208 /// expressions). 209 static bool isConstant(Value *V) { 210 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V); 211 } 212 213 /// Checks if \p V is one of vector-like instructions, i.e. undef, 214 /// insertelement/extractelement with constant indices for fixed vector type or 215 /// extractvalue instruction. 216 static bool isVectorLikeInstWithConstOps(Value *V) { 217 if (!isa<InsertElementInst, ExtractElementInst>(V) && 218 !isa<ExtractValueInst, UndefValue>(V)) 219 return false; 220 auto *I = dyn_cast<Instruction>(V); 221 if (!I || isa<ExtractValueInst>(I)) 222 return true; 223 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 224 return false; 225 if (isa<ExtractElementInst>(I)) 226 return isConstant(I->getOperand(1)); 227 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 228 return isConstant(I->getOperand(2)); 229 } 230 231 #if !defined(NDEBUG) 232 /// Print a short descriptor of the instruction bundle suitable for debug output. 233 static std::string shortBundleName(ArrayRef<Value *> VL) { 234 std::string Result; 235 raw_string_ostream OS(Result); 236 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]"; 237 OS.flush(); 238 return Result; 239 } 240 #endif 241 242 /// \returns true if all of the instructions in \p VL are in the same block or 243 /// false otherwise. 244 static bool allSameBlock(ArrayRef<Value *> VL) { 245 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 246 if (!I0) 247 return false; 248 if (all_of(VL, isVectorLikeInstWithConstOps)) 249 return true; 250 251 BasicBlock *BB = I0->getParent(); 252 for (int I = 1, E = VL.size(); I < E; I++) { 253 auto *II = dyn_cast<Instruction>(VL[I]); 254 if (!II) 255 return false; 256 257 if (BB != II->getParent()) 258 return false; 259 } 260 return true; 261 } 262 263 /// \returns True if all of the values in \p VL are constants (but not 264 /// globals/constant expressions). 265 static bool allConstant(ArrayRef<Value *> VL) { 266 // Constant expressions and globals can't be vectorized like normal integer/FP 267 // constants. 268 return all_of(VL, isConstant); 269 } 270 271 /// \returns True if all of the values in \p VL are identical or some of them 272 /// are UndefValue. 273 static bool isSplat(ArrayRef<Value *> VL) { 274 Value *FirstNonUndef = nullptr; 275 for (Value *V : VL) { 276 if (isa<UndefValue>(V)) 277 continue; 278 if (!FirstNonUndef) { 279 FirstNonUndef = V; 280 continue; 281 } 282 if (V != FirstNonUndef) 283 return false; 284 } 285 return FirstNonUndef != nullptr; 286 } 287 288 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 289 static bool isCommutative(Instruction *I) { 290 if (auto *Cmp = dyn_cast<CmpInst>(I)) 291 return Cmp->isCommutative(); 292 if (auto *BO = dyn_cast<BinaryOperator>(I)) 293 return BO->isCommutative(); 294 // TODO: This should check for generic Instruction::isCommutative(), but 295 // we need to confirm that the caller code correctly handles Intrinsics 296 // for example (does not have 2 operands). 297 return false; 298 } 299 300 /// \returns inserting index of InsertElement or InsertValue instruction, 301 /// using Offset as base offset for index. 302 static std::optional<unsigned> getInsertIndex(const Value *InsertInst, 303 unsigned Offset = 0) { 304 int Index = Offset; 305 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 306 const auto *VT = dyn_cast<FixedVectorType>(IE->getType()); 307 if (!VT) 308 return std::nullopt; 309 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)); 310 if (!CI) 311 return std::nullopt; 312 if (CI->getValue().uge(VT->getNumElements())) 313 return std::nullopt; 314 Index *= VT->getNumElements(); 315 Index += CI->getZExtValue(); 316 return Index; 317 } 318 319 const auto *IV = cast<InsertValueInst>(InsertInst); 320 Type *CurrentType = IV->getType(); 321 for (unsigned I : IV->indices()) { 322 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 323 Index *= ST->getNumElements(); 324 CurrentType = ST->getElementType(I); 325 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 326 Index *= AT->getNumElements(); 327 CurrentType = AT->getElementType(); 328 } else { 329 return std::nullopt; 330 } 331 Index += I; 332 } 333 return Index; 334 } 335 336 namespace { 337 /// Specifies the way the mask should be analyzed for undefs/poisonous elements 338 /// in the shuffle mask. 339 enum class UseMask { 340 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors, 341 ///< check for the mask elements for the first argument (mask 342 ///< indices are in range [0:VF)). 343 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check 344 ///< for the mask elements for the second argument (mask indices 345 ///< are in range [VF:2*VF)) 346 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for 347 ///< future shuffle elements and mark them as ones as being used 348 ///< in future. Non-undef elements are considered as unused since 349 ///< they're already marked as used in the mask. 350 }; 351 } // namespace 352 353 /// Prepares a use bitset for the given mask either for the first argument or 354 /// for the second. 355 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask, 356 UseMask MaskArg) { 357 SmallBitVector UseMask(VF, true); 358 for (auto [Idx, Value] : enumerate(Mask)) { 359 if (Value == PoisonMaskElem) { 360 if (MaskArg == UseMask::UndefsAsMask) 361 UseMask.reset(Idx); 362 continue; 363 } 364 if (MaskArg == UseMask::FirstArg && Value < VF) 365 UseMask.reset(Value); 366 else if (MaskArg == UseMask::SecondArg && Value >= VF) 367 UseMask.reset(Value - VF); 368 } 369 return UseMask; 370 } 371 372 /// Checks if the given value is actually an undefined constant vector. 373 /// Also, if the \p UseMask is not empty, tries to check if the non-masked 374 /// elements actually mask the insertelement buildvector, if any. 375 template <bool IsPoisonOnly = false> 376 static SmallBitVector isUndefVector(const Value *V, 377 const SmallBitVector &UseMask = {}) { 378 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true); 379 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>; 380 if (isa<T>(V)) 381 return Res; 382 auto *VecTy = dyn_cast<FixedVectorType>(V->getType()); 383 if (!VecTy) 384 return Res.reset(); 385 auto *C = dyn_cast<Constant>(V); 386 if (!C) { 387 if (!UseMask.empty()) { 388 const Value *Base = V; 389 while (auto *II = dyn_cast<InsertElementInst>(Base)) { 390 Base = II->getOperand(0); 391 if (isa<T>(II->getOperand(1))) 392 continue; 393 std::optional<unsigned> Idx = getInsertIndex(II); 394 if (!Idx) { 395 Res.reset(); 396 return Res; 397 } 398 if (*Idx < UseMask.size() && !UseMask.test(*Idx)) 399 Res.reset(*Idx); 400 } 401 // TODO: Add analysis for shuffles here too. 402 if (V == Base) { 403 Res.reset(); 404 } else { 405 SmallBitVector SubMask(UseMask.size(), false); 406 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask); 407 } 408 } else { 409 Res.reset(); 410 } 411 return Res; 412 } 413 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 414 if (Constant *Elem = C->getAggregateElement(I)) 415 if (!isa<T>(Elem) && 416 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I)))) 417 Res.reset(I); 418 } 419 return Res; 420 } 421 422 /// Checks if the vector of instructions can be represented as a shuffle, like: 423 /// %x0 = extractelement <4 x i8> %x, i32 0 424 /// %x3 = extractelement <4 x i8> %x, i32 3 425 /// %y1 = extractelement <4 x i8> %y, i32 1 426 /// %y2 = extractelement <4 x i8> %y, i32 2 427 /// %x0x0 = mul i8 %x0, %x0 428 /// %x3x3 = mul i8 %x3, %x3 429 /// %y1y1 = mul i8 %y1, %y1 430 /// %y2y2 = mul i8 %y2, %y2 431 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 432 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 433 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 434 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 435 /// ret <4 x i8> %ins4 436 /// can be transformed into: 437 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 438 /// i32 6> 439 /// %2 = mul <4 x i8> %1, %1 440 /// ret <4 x i8> %2 441 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 442 /// TODO: Can we split off and reuse the shuffle mask detection from 443 /// ShuffleVectorInst/getShuffleCost? 444 static std::optional<TargetTransformInfo::ShuffleKind> 445 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 446 const auto *It = 447 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 448 if (It == VL.end()) 449 return std::nullopt; 450 auto *EI0 = cast<ExtractElementInst>(*It); 451 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 452 return std::nullopt; 453 unsigned Size = 454 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 455 Value *Vec1 = nullptr; 456 Value *Vec2 = nullptr; 457 enum ShuffleMode { Unknown, Select, Permute }; 458 ShuffleMode CommonShuffleMode = Unknown; 459 Mask.assign(VL.size(), PoisonMaskElem); 460 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 461 // Undef can be represented as an undef element in a vector. 462 if (isa<UndefValue>(VL[I])) 463 continue; 464 auto *EI = cast<ExtractElementInst>(VL[I]); 465 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 466 return std::nullopt; 467 auto *Vec = EI->getVectorOperand(); 468 // We can extractelement from undef or poison vector. 469 if (isUndefVector(Vec).all()) 470 continue; 471 // All vector operands must have the same number of vector elements. 472 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 473 return std::nullopt; 474 if (isa<UndefValue>(EI->getIndexOperand())) 475 continue; 476 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 477 if (!Idx) 478 return std::nullopt; 479 // Undefined behavior if Idx is negative or >= Size. 480 if (Idx->getValue().uge(Size)) 481 continue; 482 unsigned IntIdx = Idx->getValue().getZExtValue(); 483 Mask[I] = IntIdx; 484 // For correct shuffling we have to have at most 2 different vector operands 485 // in all extractelement instructions. 486 if (!Vec1 || Vec1 == Vec) { 487 Vec1 = Vec; 488 } else if (!Vec2 || Vec2 == Vec) { 489 Vec2 = Vec; 490 Mask[I] += Size; 491 } else { 492 return std::nullopt; 493 } 494 if (CommonShuffleMode == Permute) 495 continue; 496 // If the extract index is not the same as the operation number, it is a 497 // permutation. 498 if (IntIdx != I) { 499 CommonShuffleMode = Permute; 500 continue; 501 } 502 CommonShuffleMode = Select; 503 } 504 // If we're not crossing lanes in different vectors, consider it as blending. 505 if (CommonShuffleMode == Select && Vec2) 506 return TargetTransformInfo::SK_Select; 507 // If Vec2 was never used, we have a permutation of a single vector, otherwise 508 // we have permutation of 2 vectors. 509 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 510 : TargetTransformInfo::SK_PermuteSingleSrc; 511 } 512 513 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 514 static std::optional<unsigned> getExtractIndex(Instruction *E) { 515 unsigned Opcode = E->getOpcode(); 516 assert((Opcode == Instruction::ExtractElement || 517 Opcode == Instruction::ExtractValue) && 518 "Expected extractelement or extractvalue instruction."); 519 if (Opcode == Instruction::ExtractElement) { 520 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 521 if (!CI) 522 return std::nullopt; 523 return CI->getZExtValue(); 524 } 525 auto *EI = cast<ExtractValueInst>(E); 526 if (EI->getNumIndices() != 1) 527 return std::nullopt; 528 return *EI->idx_begin(); 529 } 530 531 namespace { 532 533 /// Main data required for vectorization of instructions. 534 struct InstructionsState { 535 /// The very first instruction in the list with the main opcode. 536 Value *OpValue = nullptr; 537 538 /// The main/alternate instruction. 539 Instruction *MainOp = nullptr; 540 Instruction *AltOp = nullptr; 541 542 /// The main/alternate opcodes for the list of instructions. 543 unsigned getOpcode() const { 544 return MainOp ? MainOp->getOpcode() : 0; 545 } 546 547 unsigned getAltOpcode() const { 548 return AltOp ? AltOp->getOpcode() : 0; 549 } 550 551 /// Some of the instructions in the list have alternate opcodes. 552 bool isAltShuffle() const { return AltOp != MainOp; } 553 554 bool isOpcodeOrAlt(Instruction *I) const { 555 unsigned CheckedOpcode = I->getOpcode(); 556 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 557 } 558 559 InstructionsState() = delete; 560 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 561 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 562 }; 563 564 } // end anonymous namespace 565 566 /// Chooses the correct key for scheduling data. If \p Op has the same (or 567 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 568 /// OpValue. 569 static Value *isOneOf(const InstructionsState &S, Value *Op) { 570 auto *I = dyn_cast<Instruction>(Op); 571 if (I && S.isOpcodeOrAlt(I)) 572 return Op; 573 return S.OpValue; 574 } 575 576 /// \returns true if \p Opcode is allowed as part of the main/alternate 577 /// instruction for SLP vectorization. 578 /// 579 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 580 /// "shuffled out" lane would result in division by zero. 581 static bool isValidForAlternation(unsigned Opcode) { 582 if (Instruction::isIntDivRem(Opcode)) 583 return false; 584 585 return true; 586 } 587 588 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 589 const TargetLibraryInfo &TLI, 590 unsigned BaseIndex = 0); 591 592 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 593 /// compatible instructions or constants, or just some other regular values. 594 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 595 Value *Op1, const TargetLibraryInfo &TLI) { 596 return (isConstant(BaseOp0) && isConstant(Op0)) || 597 (isConstant(BaseOp1) && isConstant(Op1)) || 598 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 599 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 600 BaseOp0 == Op0 || BaseOp1 == Op1 || 601 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() || 602 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode(); 603 } 604 605 /// \returns true if a compare instruction \p CI has similar "look" and 606 /// same predicate as \p BaseCI, "as is" or with its operands and predicate 607 /// swapped, false otherwise. 608 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI, 609 const TargetLibraryInfo &TLI) { 610 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && 611 "Assessing comparisons of different types?"); 612 CmpInst::Predicate BasePred = BaseCI->getPredicate(); 613 CmpInst::Predicate Pred = CI->getPredicate(); 614 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred); 615 616 Value *BaseOp0 = BaseCI->getOperand(0); 617 Value *BaseOp1 = BaseCI->getOperand(1); 618 Value *Op0 = CI->getOperand(0); 619 Value *Op1 = CI->getOperand(1); 620 621 return (BasePred == Pred && 622 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) || 623 (BasePred == SwappedPred && 624 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI)); 625 } 626 627 /// \returns analysis of the Instructions in \p VL described in 628 /// InstructionsState, the Opcode that we suppose the whole list 629 /// could be vectorized even if its structure is diverse. 630 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 631 const TargetLibraryInfo &TLI, 632 unsigned BaseIndex) { 633 // Make sure these are all Instructions. 634 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 635 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 636 637 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 638 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 639 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 640 CmpInst::Predicate BasePred = 641 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 642 : CmpInst::BAD_ICMP_PREDICATE; 643 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 644 unsigned AltOpcode = Opcode; 645 unsigned AltIndex = BaseIndex; 646 647 // Check for one alternate opcode from another BinaryOperator. 648 // TODO - generalize to support all operators (types, calls etc.). 649 auto *IBase = cast<Instruction>(VL[BaseIndex]); 650 Intrinsic::ID BaseID = 0; 651 SmallVector<VFInfo> BaseMappings; 652 if (auto *CallBase = dyn_cast<CallInst>(IBase)) { 653 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI); 654 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase); 655 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty()) 656 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 657 } 658 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 659 auto *I = cast<Instruction>(VL[Cnt]); 660 unsigned InstOpcode = I->getOpcode(); 661 if (IsBinOp && isa<BinaryOperator>(I)) { 662 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 663 continue; 664 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 665 isValidForAlternation(Opcode)) { 666 AltOpcode = InstOpcode; 667 AltIndex = Cnt; 668 continue; 669 } 670 } else if (IsCastOp && isa<CastInst>(I)) { 671 Value *Op0 = IBase->getOperand(0); 672 Type *Ty0 = Op0->getType(); 673 Value *Op1 = I->getOperand(0); 674 Type *Ty1 = Op1->getType(); 675 if (Ty0 == Ty1) { 676 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 677 continue; 678 if (Opcode == AltOpcode) { 679 assert(isValidForAlternation(Opcode) && 680 isValidForAlternation(InstOpcode) && 681 "Cast isn't safe for alternation, logic needs to be updated!"); 682 AltOpcode = InstOpcode; 683 AltIndex = Cnt; 684 continue; 685 } 686 } 687 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) { 688 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]); 689 Type *Ty0 = BaseInst->getOperand(0)->getType(); 690 Type *Ty1 = Inst->getOperand(0)->getType(); 691 if (Ty0 == Ty1) { 692 assert(InstOpcode == Opcode && "Expected same CmpInst opcode."); 693 // Check for compatible operands. If the corresponding operands are not 694 // compatible - need to perform alternate vectorization. 695 CmpInst::Predicate CurrentPred = Inst->getPredicate(); 696 CmpInst::Predicate SwappedCurrentPred = 697 CmpInst::getSwappedPredicate(CurrentPred); 698 699 if (E == 2 && 700 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 701 continue; 702 703 if (isCmpSameOrSwapped(BaseInst, Inst, TLI)) 704 continue; 705 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 706 if (AltIndex != BaseIndex) { 707 if (isCmpSameOrSwapped(AltInst, Inst, TLI)) 708 continue; 709 } else if (BasePred != CurrentPred) { 710 assert( 711 isValidForAlternation(InstOpcode) && 712 "CmpInst isn't safe for alternation, logic needs to be updated!"); 713 AltIndex = Cnt; 714 continue; 715 } 716 CmpInst::Predicate AltPred = AltInst->getPredicate(); 717 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 718 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 719 continue; 720 } 721 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) { 722 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 723 if (Gep->getNumOperands() != 2 || 724 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType()) 725 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 726 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) { 727 if (!isVectorLikeInstWithConstOps(EI)) 728 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 729 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 730 auto *BaseLI = cast<LoadInst>(IBase); 731 if (!LI->isSimple() || !BaseLI->isSimple()) 732 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 733 } else if (auto *Call = dyn_cast<CallInst>(I)) { 734 auto *CallBase = cast<CallInst>(IBase); 735 if (Call->getCalledFunction() != CallBase->getCalledFunction()) 736 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 737 if (Call->hasOperandBundles() && 738 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(), 739 Call->op_begin() + Call->getBundleOperandsEndIndex(), 740 CallBase->op_begin() + 741 CallBase->getBundleOperandsStartIndex())) 742 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 743 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI); 744 if (ID != BaseID) 745 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 746 if (!ID) { 747 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call); 748 if (Mappings.size() != BaseMappings.size() || 749 Mappings.front().ISA != BaseMappings.front().ISA || 750 Mappings.front().ScalarName != BaseMappings.front().ScalarName || 751 Mappings.front().VectorName != BaseMappings.front().VectorName || 752 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF || 753 Mappings.front().Shape.Parameters != 754 BaseMappings.front().Shape.Parameters) 755 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 756 } 757 } 758 continue; 759 } 760 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 761 } 762 763 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 764 cast<Instruction>(VL[AltIndex])); 765 } 766 767 /// \returns true if all of the values in \p VL have the same type or false 768 /// otherwise. 769 static bool allSameType(ArrayRef<Value *> VL) { 770 Type *Ty = VL.front()->getType(); 771 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; }); 772 } 773 774 /// \returns True if in-tree use also needs extract. This refers to 775 /// possible scalar operand in vectorized instruction. 776 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 777 TargetLibraryInfo *TLI) { 778 unsigned Opcode = UserInst->getOpcode(); 779 switch (Opcode) { 780 case Instruction::Load: { 781 LoadInst *LI = cast<LoadInst>(UserInst); 782 return (LI->getPointerOperand() == Scalar); 783 } 784 case Instruction::Store: { 785 StoreInst *SI = cast<StoreInst>(UserInst); 786 return (SI->getPointerOperand() == Scalar); 787 } 788 case Instruction::Call: { 789 CallInst *CI = cast<CallInst>(UserInst); 790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 791 return any_of(enumerate(CI->args()), [&](auto &&Arg) { 792 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) && 793 Arg.value().get() == Scalar; 794 }); 795 } 796 default: 797 return false; 798 } 799 } 800 801 /// \returns the AA location that is being access by the instruction. 802 static MemoryLocation getLocation(Instruction *I) { 803 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 804 return MemoryLocation::get(SI); 805 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 806 return MemoryLocation::get(LI); 807 return MemoryLocation(); 808 } 809 810 /// \returns True if the instruction is not a volatile or atomic load/store. 811 static bool isSimple(Instruction *I) { 812 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 813 return LI->isSimple(); 814 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 815 return SI->isSimple(); 816 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 817 return !MI->isVolatile(); 818 return true; 819 } 820 821 /// Shuffles \p Mask in accordance with the given \p SubMask. 822 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only 823 /// one but two input vectors. 824 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask, 825 bool ExtendingManyInputs = false) { 826 if (SubMask.empty()) 827 return; 828 assert( 829 (!ExtendingManyInputs || SubMask.size() > Mask.size() || 830 // Check if input scalars were extended to match the size of other node. 831 (SubMask.size() == Mask.size() && 832 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(), 833 [](int Idx) { return Idx == PoisonMaskElem; }))) && 834 "SubMask with many inputs support must be larger than the mask."); 835 if (Mask.empty()) { 836 Mask.append(SubMask.begin(), SubMask.end()); 837 return; 838 } 839 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem); 840 int TermValue = std::min(Mask.size(), SubMask.size()); 841 for (int I = 0, E = SubMask.size(); I < E; ++I) { 842 if (SubMask[I] == PoisonMaskElem || 843 (!ExtendingManyInputs && 844 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue))) 845 continue; 846 NewMask[I] = Mask[SubMask[I]]; 847 } 848 Mask.swap(NewMask); 849 } 850 851 /// Order may have elements assigned special value (size) which is out of 852 /// bounds. Such indices only appear on places which correspond to undef values 853 /// (see canReuseExtract for details) and used in order to avoid undef values 854 /// have effect on operands ordering. 855 /// The first loop below simply finds all unused indices and then the next loop 856 /// nest assigns these indices for undef values positions. 857 /// As an example below Order has two undef positions and they have assigned 858 /// values 3 and 7 respectively: 859 /// before: 6 9 5 4 9 2 1 0 860 /// after: 6 3 5 4 7 2 1 0 861 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 862 const unsigned Sz = Order.size(); 863 SmallBitVector UnusedIndices(Sz, /*t=*/true); 864 SmallBitVector MaskedIndices(Sz); 865 for (unsigned I = 0; I < Sz; ++I) { 866 if (Order[I] < Sz) 867 UnusedIndices.reset(Order[I]); 868 else 869 MaskedIndices.set(I); 870 } 871 if (MaskedIndices.none()) 872 return; 873 assert(UnusedIndices.count() == MaskedIndices.count() && 874 "Non-synced masked/available indices."); 875 int Idx = UnusedIndices.find_first(); 876 int MIdx = MaskedIndices.find_first(); 877 while (MIdx >= 0) { 878 assert(Idx >= 0 && "Indices must be synced."); 879 Order[MIdx] = Idx; 880 Idx = UnusedIndices.find_next(Idx); 881 MIdx = MaskedIndices.find_next(MIdx); 882 } 883 } 884 885 namespace llvm { 886 887 static void inversePermutation(ArrayRef<unsigned> Indices, 888 SmallVectorImpl<int> &Mask) { 889 Mask.clear(); 890 const unsigned E = Indices.size(); 891 Mask.resize(E, PoisonMaskElem); 892 for (unsigned I = 0; I < E; ++I) 893 Mask[Indices[I]] = I; 894 } 895 896 /// Reorders the list of scalars in accordance with the given \p Mask. 897 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 898 ArrayRef<int> Mask) { 899 assert(!Mask.empty() && "Expected non-empty mask."); 900 SmallVector<Value *> Prev(Scalars.size(), 901 UndefValue::get(Scalars.front()->getType())); 902 Prev.swap(Scalars); 903 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 904 if (Mask[I] != PoisonMaskElem) 905 Scalars[Mask[I]] = Prev[I]; 906 } 907 908 /// Checks if the provided value does not require scheduling. It does not 909 /// require scheduling if this is not an instruction or it is an instruction 910 /// that does not read/write memory and all operands are either not instructions 911 /// or phi nodes or instructions from different blocks. 912 static bool areAllOperandsNonInsts(Value *V) { 913 auto *I = dyn_cast<Instruction>(V); 914 if (!I) 915 return true; 916 return !mayHaveNonDefUseDependency(*I) && 917 all_of(I->operands(), [I](Value *V) { 918 auto *IO = dyn_cast<Instruction>(V); 919 if (!IO) 920 return true; 921 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 922 }); 923 } 924 925 /// Checks if the provided value does not require scheduling. It does not 926 /// require scheduling if this is not an instruction or it is an instruction 927 /// that does not read/write memory and all users are phi nodes or instructions 928 /// from the different blocks. 929 static bool isUsedOutsideBlock(Value *V) { 930 auto *I = dyn_cast<Instruction>(V); 931 if (!I) 932 return true; 933 // Limits the number of uses to save compile time. 934 constexpr int UsesLimit = 8; 935 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 936 all_of(I->users(), [I](User *U) { 937 auto *IU = dyn_cast<Instruction>(U); 938 if (!IU) 939 return true; 940 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 941 }); 942 } 943 944 /// Checks if the specified value does not require scheduling. It does not 945 /// require scheduling if all operands and all users do not need to be scheduled 946 /// in the current basic block. 947 static bool doesNotNeedToBeScheduled(Value *V) { 948 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 949 } 950 951 /// Checks if the specified array of instructions does not require scheduling. 952 /// It is so if all either instructions have operands that do not require 953 /// scheduling or their users do not require scheduling since they are phis or 954 /// in other basic blocks. 955 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 956 return !VL.empty() && 957 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 958 } 959 960 namespace slpvectorizer { 961 962 /// Bottom Up SLP Vectorizer. 963 class BoUpSLP { 964 struct TreeEntry; 965 struct ScheduleData; 966 class ShuffleCostEstimator; 967 class ShuffleInstructionBuilder; 968 969 public: 970 using ValueList = SmallVector<Value *, 8>; 971 using InstrList = SmallVector<Instruction *, 16>; 972 using ValueSet = SmallPtrSet<Value *, 16>; 973 using StoreList = SmallVector<StoreInst *, 8>; 974 using ExtraValueToDebugLocsMap = 975 MapVector<Value *, SmallVector<Instruction *, 2>>; 976 using OrdersType = SmallVector<unsigned, 4>; 977 978 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 979 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 980 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 981 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 982 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 983 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 984 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 985 // Use the vector register size specified by the target unless overridden 986 // by a command-line option. 987 // TODO: It would be better to limit the vectorization factor based on 988 // data type rather than just register size. For example, x86 AVX has 989 // 256-bit registers, but it does not support integer operations 990 // at that width (that requires AVX2). 991 if (MaxVectorRegSizeOption.getNumOccurrences()) 992 MaxVecRegSize = MaxVectorRegSizeOption; 993 else 994 MaxVecRegSize = 995 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 996 .getFixedValue(); 997 998 if (MinVectorRegSizeOption.getNumOccurrences()) 999 MinVecRegSize = MinVectorRegSizeOption; 1000 else 1001 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 1002 } 1003 1004 /// Vectorize the tree that starts with the elements in \p VL. 1005 /// Returns the vectorized root. 1006 Value *vectorizeTree(); 1007 1008 /// Vectorize the tree but with the list of externally used values \p 1009 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 1010 /// generated extractvalue instructions. 1011 /// \param ReplacedExternals containd list of replaced external values 1012 /// {scalar, replace} after emitting extractelement for external uses. 1013 Value * 1014 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, 1015 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 1016 Instruction *ReductionRoot = nullptr); 1017 1018 /// \returns the cost incurred by unwanted spills and fills, caused by 1019 /// holding live values over call sites. 1020 InstructionCost getSpillCost() const; 1021 1022 /// \returns the vectorization cost of the subtree that starts at \p VL. 1023 /// A negative number means that this is profitable. 1024 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt); 1025 1026 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 1027 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 1028 void buildTree(ArrayRef<Value *> Roots, 1029 const SmallDenseSet<Value *> &UserIgnoreLst); 1030 1031 /// Construct a vectorizable tree that starts at \p Roots. 1032 void buildTree(ArrayRef<Value *> Roots); 1033 1034 /// Returns whether the root node has in-tree uses. 1035 bool doesRootHaveInTreeUses() const { 1036 return !VectorizableTree.empty() && 1037 !VectorizableTree.front()->UserTreeIndices.empty(); 1038 } 1039 1040 /// Return the scalars of the root node. 1041 ArrayRef<Value *> getRootNodeScalars() const { 1042 assert(!VectorizableTree.empty() && "No graph to get the first node from"); 1043 return VectorizableTree.front()->Scalars; 1044 } 1045 1046 /// Builds external uses of the vectorized scalars, i.e. the list of 1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 1048 /// ExternallyUsedValues contains additional list of external uses to handle 1049 /// vectorization of reductions. 1050 void 1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 1052 1053 /// Clear the internal data structures that are created by 'buildTree'. 1054 void deleteTree() { 1055 VectorizableTree.clear(); 1056 ScalarToTreeEntry.clear(); 1057 MultiNodeScalars.clear(); 1058 MustGather.clear(); 1059 EntryToLastInstruction.clear(); 1060 ExternalUses.clear(); 1061 for (auto &Iter : BlocksSchedules) { 1062 BlockScheduling *BS = Iter.second.get(); 1063 BS->clear(); 1064 } 1065 MinBWs.clear(); 1066 InstrElementSize.clear(); 1067 UserIgnoreList = nullptr; 1068 PostponedGathers.clear(); 1069 ValueToGatherNodes.clear(); 1070 } 1071 1072 unsigned getTreeSize() const { return VectorizableTree.size(); } 1073 1074 /// Perform LICM and CSE on the newly generated gather sequences. 1075 void optimizeGatherSequence(); 1076 1077 /// Checks if the specified gather tree entry \p TE can be represented as a 1078 /// shuffled vector entry + (possibly) permutation with other gathers. It 1079 /// implements the checks only for possibly ordered scalars (Loads, 1080 /// ExtractElement, ExtractValue), which can be part of the graph. 1081 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 1082 1083 /// Sort loads into increasing pointers offsets to allow greater clustering. 1084 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 1085 1086 /// Gets reordering data for the given tree entry. If the entry is vectorized 1087 /// - just return ReorderIndices, otherwise check if the scalars can be 1088 /// reordered and return the most optimal order. 1089 /// \return std::nullopt if ordering is not important, empty order, if 1090 /// identity order is important, or the actual order. 1091 /// \param TopToBottom If true, include the order of vectorized stores and 1092 /// insertelement nodes, otherwise skip them. 1093 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, 1094 bool TopToBottom); 1095 1096 /// Reorders the current graph to the most profitable order starting from the 1097 /// root node to the leaf nodes. The best order is chosen only from the nodes 1098 /// of the same size (vectorization factor). Smaller nodes are considered 1099 /// parts of subgraph with smaller VF and they are reordered independently. We 1100 /// can make it because we still need to extend smaller nodes to the wider VF 1101 /// and we can merge reordering shuffles with the widening shuffles. 1102 void reorderTopToBottom(); 1103 1104 /// Reorders the current graph to the most profitable order starting from 1105 /// leaves to the root. It allows to rotate small subgraphs and reduce the 1106 /// number of reshuffles if the leaf nodes use the same order. In this case we 1107 /// can merge the orders and just shuffle user node instead of shuffling its 1108 /// operands. Plus, even the leaf nodes have different orders, it allows to 1109 /// sink reordering in the graph closer to the root node and merge it later 1110 /// during analysis. 1111 void reorderBottomToTop(bool IgnoreReorder = false); 1112 1113 /// \return The vector element size in bits to use when vectorizing the 1114 /// expression tree ending at \p V. If V is a store, the size is the width of 1115 /// the stored value. Otherwise, the size is the width of the largest loaded 1116 /// value reaching V. This method is used by the vectorizer to calculate 1117 /// vectorization factors. 1118 unsigned getVectorElementSize(Value *V); 1119 1120 /// Compute the minimum type sizes required to represent the entries in a 1121 /// vectorizable tree. 1122 void computeMinimumValueSizes(); 1123 1124 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 1125 unsigned getMaxVecRegSize() const { 1126 return MaxVecRegSize; 1127 } 1128 1129 // \returns minimum vector register size as set by cl::opt. 1130 unsigned getMinVecRegSize() const { 1131 return MinVecRegSize; 1132 } 1133 1134 unsigned getMinVF(unsigned Sz) const { 1135 return std::max(2U, getMinVecRegSize() / Sz); 1136 } 1137 1138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 1139 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 1140 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 1141 return MaxVF ? MaxVF : UINT_MAX; 1142 } 1143 1144 /// Check if homogeneous aggregate is isomorphic to some VectorType. 1145 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 1146 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 1147 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 1148 /// 1149 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1150 unsigned canMapToVector(Type *T) const; 1151 1152 /// \returns True if the VectorizableTree is both tiny and not fully 1153 /// vectorizable. We do not vectorize such trees. 1154 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1155 1156 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1157 /// can be load combined in the backend. Load combining may not be allowed in 1158 /// the IR optimizer, so we do not want to alter the pattern. For example, 1159 /// partially transforming a scalar bswap() pattern into vector code is 1160 /// effectively impossible for the backend to undo. 1161 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1162 /// may not be necessary. 1163 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1164 1165 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1166 /// can be load combined in the backend. Load combining may not be allowed in 1167 /// the IR optimizer, so we do not want to alter the pattern. For example, 1168 /// partially transforming a scalar bswap() pattern into vector code is 1169 /// effectively impossible for the backend to undo. 1170 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1171 /// may not be necessary. 1172 bool isLoadCombineCandidate() const; 1173 1174 OptimizationRemarkEmitter *getORE() { return ORE; } 1175 1176 /// This structure holds any data we need about the edges being traversed 1177 /// during buildTree_rec(). We keep track of: 1178 /// (i) the user TreeEntry index, and 1179 /// (ii) the index of the edge. 1180 struct EdgeInfo { 1181 EdgeInfo() = default; 1182 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1183 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1184 /// The user TreeEntry. 1185 TreeEntry *UserTE = nullptr; 1186 /// The operand index of the use. 1187 unsigned EdgeIdx = UINT_MAX; 1188 #ifndef NDEBUG 1189 friend inline raw_ostream &operator<<(raw_ostream &OS, 1190 const BoUpSLP::EdgeInfo &EI) { 1191 EI.dump(OS); 1192 return OS; 1193 } 1194 /// Debug print. 1195 void dump(raw_ostream &OS) const { 1196 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1197 << " EdgeIdx:" << EdgeIdx << "}"; 1198 } 1199 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1200 #endif 1201 bool operator == (const EdgeInfo &Other) const { 1202 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx; 1203 } 1204 }; 1205 1206 /// A helper class used for scoring candidates for two consecutive lanes. 1207 class LookAheadHeuristics { 1208 const TargetLibraryInfo &TLI; 1209 const DataLayout &DL; 1210 ScalarEvolution &SE; 1211 const BoUpSLP &R; 1212 int NumLanes; // Total number of lanes (aka vectorization factor). 1213 int MaxLevel; // The maximum recursion depth for accumulating score. 1214 1215 public: 1216 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL, 1217 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes, 1218 int MaxLevel) 1219 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes), 1220 MaxLevel(MaxLevel) {} 1221 1222 // The hard-coded scores listed here are not very important, though it shall 1223 // be higher for better matches to improve the resulting cost. When 1224 // computing the scores of matching one sub-tree with another, we are 1225 // basically counting the number of values that are matching. So even if all 1226 // scores are set to 1, we would still get a decent matching result. 1227 // However, sometimes we have to break ties. For example we may have to 1228 // choose between matching loads vs matching opcodes. This is what these 1229 // scores are helping us with: they provide the order of preference. Also, 1230 // this is important if the scalar is externally used or used in another 1231 // tree entry node in the different lane. 1232 1233 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1234 static const int ScoreConsecutiveLoads = 4; 1235 /// The same load multiple times. This should have a better score than 1236 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1237 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1238 /// a vector load and 1.0 for a broadcast. 1239 static const int ScoreSplatLoads = 3; 1240 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1241 static const int ScoreReversedLoads = 3; 1242 /// A load candidate for masked gather. 1243 static const int ScoreMaskedGatherCandidate = 1; 1244 /// ExtractElementInst from same vector and consecutive indexes. 1245 static const int ScoreConsecutiveExtracts = 4; 1246 /// ExtractElementInst from same vector and reversed indices. 1247 static const int ScoreReversedExtracts = 3; 1248 /// Constants. 1249 static const int ScoreConstants = 2; 1250 /// Instructions with the same opcode. 1251 static const int ScoreSameOpcode = 2; 1252 /// Instructions with alt opcodes (e.g, add + sub). 1253 static const int ScoreAltOpcodes = 1; 1254 /// Identical instructions (a.k.a. splat or broadcast). 1255 static const int ScoreSplat = 1; 1256 /// Matching with an undef is preferable to failing. 1257 static const int ScoreUndef = 1; 1258 /// Score for failing to find a decent match. 1259 static const int ScoreFail = 0; 1260 /// Score if all users are vectorized. 1261 static const int ScoreAllUserVectorized = 1; 1262 1263 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1264 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1265 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1266 /// MainAltOps. 1267 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1268 ArrayRef<Value *> MainAltOps) const { 1269 if (!isValidElementType(V1->getType()) || 1270 !isValidElementType(V2->getType())) 1271 return LookAheadHeuristics::ScoreFail; 1272 1273 if (V1 == V2) { 1274 if (isa<LoadInst>(V1)) { 1275 // Retruns true if the users of V1 and V2 won't need to be extracted. 1276 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1277 // Bail out if we have too many uses to save compilation time. 1278 static constexpr unsigned Limit = 8; 1279 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1280 return false; 1281 1282 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1283 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1284 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1285 }); 1286 }; 1287 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1288 }; 1289 // A broadcast of a load can be cheaper on some targets. 1290 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1291 ElementCount::getFixed(NumLanes)) && 1292 ((int)V1->getNumUses() == NumLanes || 1293 AllUsersAreInternal(V1, V2))) 1294 return LookAheadHeuristics::ScoreSplatLoads; 1295 } 1296 return LookAheadHeuristics::ScoreSplat; 1297 } 1298 1299 auto *LI1 = dyn_cast<LoadInst>(V1); 1300 auto *LI2 = dyn_cast<LoadInst>(V2); 1301 if (LI1 && LI2) { 1302 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || 1303 !LI2->isSimple()) 1304 return LookAheadHeuristics::ScoreFail; 1305 1306 std::optional<int> Dist = getPointersDiff( 1307 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1308 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1309 if (!Dist || *Dist == 0) { 1310 if (getUnderlyingObject(LI1->getPointerOperand()) == 1311 getUnderlyingObject(LI2->getPointerOperand()) && 1312 R.TTI->isLegalMaskedGather( 1313 FixedVectorType::get(LI1->getType(), NumLanes), 1314 LI1->getAlign())) 1315 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1316 return LookAheadHeuristics::ScoreFail; 1317 } 1318 // The distance is too large - still may be profitable to use masked 1319 // loads/gathers. 1320 if (std::abs(*Dist) > NumLanes / 2) 1321 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1322 // This still will detect consecutive loads, but we might have "holes" 1323 // in some cases. It is ok for non-power-2 vectorization and may produce 1324 // better results. It should not affect current vectorization. 1325 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1326 : LookAheadHeuristics::ScoreReversedLoads; 1327 } 1328 1329 auto *C1 = dyn_cast<Constant>(V1); 1330 auto *C2 = dyn_cast<Constant>(V2); 1331 if (C1 && C2) 1332 return LookAheadHeuristics::ScoreConstants; 1333 1334 // Extracts from consecutive indexes of the same vector better score as 1335 // the extracts could be optimized away. 1336 Value *EV1; 1337 ConstantInt *Ex1Idx; 1338 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1339 // Undefs are always profitable for extractelements. 1340 // Compiler can easily combine poison and extractelement <non-poison> or 1341 // undef and extractelement <poison>. But combining undef + 1342 // extractelement <non-poison-but-may-produce-poison> requires some 1343 // extra operations. 1344 if (isa<UndefValue>(V2)) 1345 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all()) 1346 ? LookAheadHeuristics::ScoreConsecutiveExtracts 1347 : LookAheadHeuristics::ScoreSameOpcode; 1348 Value *EV2 = nullptr; 1349 ConstantInt *Ex2Idx = nullptr; 1350 if (match(V2, 1351 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1352 m_Undef())))) { 1353 // Undefs are always profitable for extractelements. 1354 if (!Ex2Idx) 1355 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1356 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType()) 1357 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1358 if (EV2 == EV1) { 1359 int Idx1 = Ex1Idx->getZExtValue(); 1360 int Idx2 = Ex2Idx->getZExtValue(); 1361 int Dist = Idx2 - Idx1; 1362 // The distance is too large - still may be profitable to use 1363 // shuffles. 1364 if (std::abs(Dist) == 0) 1365 return LookAheadHeuristics::ScoreSplat; 1366 if (std::abs(Dist) > NumLanes / 2) 1367 return LookAheadHeuristics::ScoreSameOpcode; 1368 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1369 : LookAheadHeuristics::ScoreReversedExtracts; 1370 } 1371 return LookAheadHeuristics::ScoreAltOpcodes; 1372 } 1373 return LookAheadHeuristics::ScoreFail; 1374 } 1375 1376 auto *I1 = dyn_cast<Instruction>(V1); 1377 auto *I2 = dyn_cast<Instruction>(V2); 1378 if (I1 && I2) { 1379 if (I1->getParent() != I2->getParent()) 1380 return LookAheadHeuristics::ScoreFail; 1381 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1382 Ops.push_back(I1); 1383 Ops.push_back(I2); 1384 InstructionsState S = getSameOpcode(Ops, TLI); 1385 // Note: Only consider instructions with <= 2 operands to avoid 1386 // complexity explosion. 1387 if (S.getOpcode() && 1388 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1389 !S.isAltShuffle()) && 1390 all_of(Ops, [&S](Value *V) { 1391 return cast<Instruction>(V)->getNumOperands() == 1392 S.MainOp->getNumOperands(); 1393 })) 1394 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1395 : LookAheadHeuristics::ScoreSameOpcode; 1396 } 1397 1398 if (isa<UndefValue>(V2)) 1399 return LookAheadHeuristics::ScoreUndef; 1400 1401 return LookAheadHeuristics::ScoreFail; 1402 } 1403 1404 /// Go through the operands of \p LHS and \p RHS recursively until 1405 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1406 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1407 /// of \p U1 and \p U2), except at the beginning of the recursion where 1408 /// these are set to nullptr. 1409 /// 1410 /// For example: 1411 /// \verbatim 1412 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1413 /// \ / \ / \ / \ / 1414 /// + + + + 1415 /// G1 G2 G3 G4 1416 /// \endverbatim 1417 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1418 /// each level recursively, accumulating the score. It starts from matching 1419 /// the additions at level 0, then moves on to the loads (level 1). The 1420 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1421 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1422 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1423 /// Please note that the order of the operands does not matter, as we 1424 /// evaluate the score of all profitable combinations of operands. In 1425 /// other words the score of G1 and G4 is the same as G1 and G2. This 1426 /// heuristic is based on ideas described in: 1427 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1428 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1429 /// Luís F. W. Góes 1430 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1431 Instruction *U2, int CurrLevel, 1432 ArrayRef<Value *> MainAltOps) const { 1433 1434 // Get the shallow score of V1 and V2. 1435 int ShallowScoreAtThisLevel = 1436 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1437 1438 // If reached MaxLevel, 1439 // or if V1 and V2 are not instructions, 1440 // or if they are SPLAT, 1441 // or if they are not consecutive, 1442 // or if profitable to vectorize loads or extractelements, early return 1443 // the current cost. 1444 auto *I1 = dyn_cast<Instruction>(LHS); 1445 auto *I2 = dyn_cast<Instruction>(RHS); 1446 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1447 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1448 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1449 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1450 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1451 ShallowScoreAtThisLevel)) 1452 return ShallowScoreAtThisLevel; 1453 assert(I1 && I2 && "Should have early exited."); 1454 1455 // Contains the I2 operand indexes that got matched with I1 operands. 1456 SmallSet<unsigned, 4> Op2Used; 1457 1458 // Recursion towards the operands of I1 and I2. We are trying all possible 1459 // operand pairs, and keeping track of the best score. 1460 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1461 OpIdx1 != NumOperands1; ++OpIdx1) { 1462 // Try to pair op1I with the best operand of I2. 1463 int MaxTmpScore = 0; 1464 unsigned MaxOpIdx2 = 0; 1465 bool FoundBest = false; 1466 // If I2 is commutative try all combinations. 1467 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1468 unsigned ToIdx = isCommutative(I2) 1469 ? I2->getNumOperands() 1470 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1471 assert(FromIdx <= ToIdx && "Bad index"); 1472 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1473 // Skip operands already paired with OpIdx1. 1474 if (Op2Used.count(OpIdx2)) 1475 continue; 1476 // Recursively calculate the cost at each level 1477 int TmpScore = 1478 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1479 I1, I2, CurrLevel + 1, std::nullopt); 1480 // Look for the best score. 1481 if (TmpScore > LookAheadHeuristics::ScoreFail && 1482 TmpScore > MaxTmpScore) { 1483 MaxTmpScore = TmpScore; 1484 MaxOpIdx2 = OpIdx2; 1485 FoundBest = true; 1486 } 1487 } 1488 if (FoundBest) { 1489 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1490 Op2Used.insert(MaxOpIdx2); 1491 ShallowScoreAtThisLevel += MaxTmpScore; 1492 } 1493 } 1494 return ShallowScoreAtThisLevel; 1495 } 1496 }; 1497 /// A helper data structure to hold the operands of a vector of instructions. 1498 /// This supports a fixed vector length for all operand vectors. 1499 class VLOperands { 1500 /// For each operand we need (i) the value, and (ii) the opcode that it 1501 /// would be attached to if the expression was in a left-linearized form. 1502 /// This is required to avoid illegal operand reordering. 1503 /// For example: 1504 /// \verbatim 1505 /// 0 Op1 1506 /// |/ 1507 /// Op1 Op2 Linearized + Op2 1508 /// \ / ----------> |/ 1509 /// - - 1510 /// 1511 /// Op1 - Op2 (0 + Op1) - Op2 1512 /// \endverbatim 1513 /// 1514 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1515 /// 1516 /// Another way to think of this is to track all the operations across the 1517 /// path from the operand all the way to the root of the tree and to 1518 /// calculate the operation that corresponds to this path. For example, the 1519 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1520 /// corresponding operation is a '-' (which matches the one in the 1521 /// linearized tree, as shown above). 1522 /// 1523 /// For lack of a better term, we refer to this operation as Accumulated 1524 /// Path Operation (APO). 1525 struct OperandData { 1526 OperandData() = default; 1527 OperandData(Value *V, bool APO, bool IsUsed) 1528 : V(V), APO(APO), IsUsed(IsUsed) {} 1529 /// The operand value. 1530 Value *V = nullptr; 1531 /// TreeEntries only allow a single opcode, or an alternate sequence of 1532 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1533 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1534 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1535 /// (e.g., Add/Mul) 1536 bool APO = false; 1537 /// Helper data for the reordering function. 1538 bool IsUsed = false; 1539 }; 1540 1541 /// During operand reordering, we are trying to select the operand at lane 1542 /// that matches best with the operand at the neighboring lane. Our 1543 /// selection is based on the type of value we are looking for. For example, 1544 /// if the neighboring lane has a load, we need to look for a load that is 1545 /// accessing a consecutive address. These strategies are summarized in the 1546 /// 'ReorderingMode' enumerator. 1547 enum class ReorderingMode { 1548 Load, ///< Matching loads to consecutive memory addresses 1549 Opcode, ///< Matching instructions based on opcode (same or alternate) 1550 Constant, ///< Matching constants 1551 Splat, ///< Matching the same instruction multiple times (broadcast) 1552 Failed, ///< We failed to create a vectorizable group 1553 }; 1554 1555 using OperandDataVec = SmallVector<OperandData, 2>; 1556 1557 /// A vector of operand vectors. 1558 SmallVector<OperandDataVec, 4> OpsVec; 1559 1560 const TargetLibraryInfo &TLI; 1561 const DataLayout &DL; 1562 ScalarEvolution &SE; 1563 const BoUpSLP &R; 1564 1565 /// \returns the operand data at \p OpIdx and \p Lane. 1566 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1567 return OpsVec[OpIdx][Lane]; 1568 } 1569 1570 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1571 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1572 return OpsVec[OpIdx][Lane]; 1573 } 1574 1575 /// Clears the used flag for all entries. 1576 void clearUsed() { 1577 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1578 OpIdx != NumOperands; ++OpIdx) 1579 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1580 ++Lane) 1581 OpsVec[OpIdx][Lane].IsUsed = false; 1582 } 1583 1584 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1585 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1586 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1587 } 1588 1589 /// \param Lane lane of the operands under analysis. 1590 /// \param OpIdx operand index in \p Lane lane we're looking the best 1591 /// candidate for. 1592 /// \param Idx operand index of the current candidate value. 1593 /// \returns The additional score due to possible broadcasting of the 1594 /// elements in the lane. It is more profitable to have power-of-2 unique 1595 /// elements in the lane, it will be vectorized with higher probability 1596 /// after removing duplicates. Currently the SLP vectorizer supports only 1597 /// vectorization of the power-of-2 number of unique scalars. 1598 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1599 Value *IdxLaneV = getData(Idx, Lane).V; 1600 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1601 return 0; 1602 SmallPtrSet<Value *, 4> Uniques; 1603 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1604 if (Ln == Lane) 1605 continue; 1606 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1607 if (!isa<Instruction>(OpIdxLnV)) 1608 return 0; 1609 Uniques.insert(OpIdxLnV); 1610 } 1611 int UniquesCount = Uniques.size(); 1612 int UniquesCntWithIdxLaneV = 1613 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1614 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1615 int UniquesCntWithOpIdxLaneV = 1616 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1617 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1618 return 0; 1619 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1620 UniquesCntWithOpIdxLaneV) - 1621 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1622 } 1623 1624 /// \param Lane lane of the operands under analysis. 1625 /// \param OpIdx operand index in \p Lane lane we're looking the best 1626 /// candidate for. 1627 /// \param Idx operand index of the current candidate value. 1628 /// \returns The additional score for the scalar which users are all 1629 /// vectorized. 1630 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1631 Value *IdxLaneV = getData(Idx, Lane).V; 1632 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1633 // Do not care about number of uses for vector-like instructions 1634 // (extractelement/extractvalue with constant indices), they are extracts 1635 // themselves and already externally used. Vectorization of such 1636 // instructions does not add extra extractelement instruction, just may 1637 // remove it. 1638 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1639 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1640 return LookAheadHeuristics::ScoreAllUserVectorized; 1641 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1642 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1643 return 0; 1644 return R.areAllUsersVectorized(IdxLaneI) 1645 ? LookAheadHeuristics::ScoreAllUserVectorized 1646 : 0; 1647 } 1648 1649 /// Score scaling factor for fully compatible instructions but with 1650 /// different number of external uses. Allows better selection of the 1651 /// instructions with less external uses. 1652 static const int ScoreScaleFactor = 10; 1653 1654 /// \Returns the look-ahead score, which tells us how much the sub-trees 1655 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1656 /// score. This helps break ties in an informed way when we cannot decide on 1657 /// the order of the operands by just considering the immediate 1658 /// predecessors. 1659 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1660 int Lane, unsigned OpIdx, unsigned Idx, 1661 bool &IsUsed) { 1662 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(), 1663 LookAheadMaxDepth); 1664 // Keep track of the instruction stack as we recurse into the operands 1665 // during the look-ahead score exploration. 1666 int Score = 1667 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1668 /*CurrLevel=*/1, MainAltOps); 1669 if (Score) { 1670 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1671 if (Score <= -SplatScore) { 1672 // Set the minimum score for splat-like sequence to avoid setting 1673 // failed state. 1674 Score = 1; 1675 } else { 1676 Score += SplatScore; 1677 // Scale score to see the difference between different operands 1678 // and similar operands but all vectorized/not all vectorized 1679 // uses. It does not affect actual selection of the best 1680 // compatible operand in general, just allows to select the 1681 // operand with all vectorized uses. 1682 Score *= ScoreScaleFactor; 1683 Score += getExternalUseScore(Lane, OpIdx, Idx); 1684 IsUsed = true; 1685 } 1686 } 1687 return Score; 1688 } 1689 1690 /// Best defined scores per lanes between the passes. Used to choose the 1691 /// best operand (with the highest score) between the passes. 1692 /// The key - {Operand Index, Lane}. 1693 /// The value - the best score between the passes for the lane and the 1694 /// operand. 1695 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1696 BestScoresPerLanes; 1697 1698 // Search all operands in Ops[*][Lane] for the one that matches best 1699 // Ops[OpIdx][LastLane] and return its opreand index. 1700 // If no good match can be found, return std::nullopt. 1701 std::optional<unsigned> 1702 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1703 ArrayRef<ReorderingMode> ReorderingModes, 1704 ArrayRef<Value *> MainAltOps) { 1705 unsigned NumOperands = getNumOperands(); 1706 1707 // The operand of the previous lane at OpIdx. 1708 Value *OpLastLane = getData(OpIdx, LastLane).V; 1709 1710 // Our strategy mode for OpIdx. 1711 ReorderingMode RMode = ReorderingModes[OpIdx]; 1712 if (RMode == ReorderingMode::Failed) 1713 return std::nullopt; 1714 1715 // The linearized opcode of the operand at OpIdx, Lane. 1716 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1717 1718 // The best operand index and its score. 1719 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1720 // are using the score to differentiate between the two. 1721 struct BestOpData { 1722 std::optional<unsigned> Idx; 1723 unsigned Score = 0; 1724 } BestOp; 1725 BestOp.Score = 1726 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1727 .first->second; 1728 1729 // Track if the operand must be marked as used. If the operand is set to 1730 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1731 // want to reestimate the operands again on the following iterations). 1732 bool IsUsed = 1733 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1734 // Iterate through all unused operands and look for the best. 1735 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1736 // Get the operand at Idx and Lane. 1737 OperandData &OpData = getData(Idx, Lane); 1738 Value *Op = OpData.V; 1739 bool OpAPO = OpData.APO; 1740 1741 // Skip already selected operands. 1742 if (OpData.IsUsed) 1743 continue; 1744 1745 // Skip if we are trying to move the operand to a position with a 1746 // different opcode in the linearized tree form. This would break the 1747 // semantics. 1748 if (OpAPO != OpIdxAPO) 1749 continue; 1750 1751 // Look for an operand that matches the current mode. 1752 switch (RMode) { 1753 case ReorderingMode::Load: 1754 case ReorderingMode::Constant: 1755 case ReorderingMode::Opcode: { 1756 bool LeftToRight = Lane > LastLane; 1757 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1758 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1759 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1760 OpIdx, Idx, IsUsed); 1761 if (Score > static_cast<int>(BestOp.Score)) { 1762 BestOp.Idx = Idx; 1763 BestOp.Score = Score; 1764 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1765 } 1766 break; 1767 } 1768 case ReorderingMode::Splat: 1769 if (Op == OpLastLane) 1770 BestOp.Idx = Idx; 1771 break; 1772 case ReorderingMode::Failed: 1773 llvm_unreachable("Not expected Failed reordering mode."); 1774 } 1775 } 1776 1777 if (BestOp.Idx) { 1778 getData(*BestOp.Idx, Lane).IsUsed = IsUsed; 1779 return BestOp.Idx; 1780 } 1781 // If we could not find a good match return std::nullopt. 1782 return std::nullopt; 1783 } 1784 1785 /// Helper for reorderOperandVecs. 1786 /// \returns the lane that we should start reordering from. This is the one 1787 /// which has the least number of operands that can freely move about or 1788 /// less profitable because it already has the most optimal set of operands. 1789 unsigned getBestLaneToStartReordering() const { 1790 unsigned Min = UINT_MAX; 1791 unsigned SameOpNumber = 0; 1792 // std::pair<unsigned, unsigned> is used to implement a simple voting 1793 // algorithm and choose the lane with the least number of operands that 1794 // can freely move about or less profitable because it already has the 1795 // most optimal set of operands. The first unsigned is a counter for 1796 // voting, the second unsigned is the counter of lanes with instructions 1797 // with same/alternate opcodes and same parent basic block. 1798 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1799 // Try to be closer to the original results, if we have multiple lanes 1800 // with same cost. If 2 lanes have the same cost, use the one with the 1801 // lowest index. 1802 for (int I = getNumLanes(); I > 0; --I) { 1803 unsigned Lane = I - 1; 1804 OperandsOrderData NumFreeOpsHash = 1805 getMaxNumOperandsThatCanBeReordered(Lane); 1806 // Compare the number of operands that can move and choose the one with 1807 // the least number. 1808 if (NumFreeOpsHash.NumOfAPOs < Min) { 1809 Min = NumFreeOpsHash.NumOfAPOs; 1810 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1811 HashMap.clear(); 1812 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1813 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1814 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1815 // Select the most optimal lane in terms of number of operands that 1816 // should be moved around. 1817 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1818 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1819 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1820 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1821 auto *It = HashMap.find(NumFreeOpsHash.Hash); 1822 if (It == HashMap.end()) 1823 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1824 else 1825 ++It->second.first; 1826 } 1827 } 1828 // Select the lane with the minimum counter. 1829 unsigned BestLane = 0; 1830 unsigned CntMin = UINT_MAX; 1831 for (const auto &Data : reverse(HashMap)) { 1832 if (Data.second.first < CntMin) { 1833 CntMin = Data.second.first; 1834 BestLane = Data.second.second; 1835 } 1836 } 1837 return BestLane; 1838 } 1839 1840 /// Data structure that helps to reorder operands. 1841 struct OperandsOrderData { 1842 /// The best number of operands with the same APOs, which can be 1843 /// reordered. 1844 unsigned NumOfAPOs = UINT_MAX; 1845 /// Number of operands with the same/alternate instruction opcode and 1846 /// parent. 1847 unsigned NumOpsWithSameOpcodeParent = 0; 1848 /// Hash for the actual operands ordering. 1849 /// Used to count operands, actually their position id and opcode 1850 /// value. It is used in the voting mechanism to find the lane with the 1851 /// least number of operands that can freely move about or less profitable 1852 /// because it already has the most optimal set of operands. Can be 1853 /// replaced with SmallVector<unsigned> instead but hash code is faster 1854 /// and requires less memory. 1855 unsigned Hash = 0; 1856 }; 1857 /// \returns the maximum number of operands that are allowed to be reordered 1858 /// for \p Lane and the number of compatible instructions(with the same 1859 /// parent/opcode). This is used as a heuristic for selecting the first lane 1860 /// to start operand reordering. 1861 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1862 unsigned CntTrue = 0; 1863 unsigned NumOperands = getNumOperands(); 1864 // Operands with the same APO can be reordered. We therefore need to count 1865 // how many of them we have for each APO, like this: Cnt[APO] = x. 1866 // Since we only have two APOs, namely true and false, we can avoid using 1867 // a map. Instead we can simply count the number of operands that 1868 // correspond to one of them (in this case the 'true' APO), and calculate 1869 // the other by subtracting it from the total number of operands. 1870 // Operands with the same instruction opcode and parent are more 1871 // profitable since we don't need to move them in many cases, with a high 1872 // probability such lane already can be vectorized effectively. 1873 bool AllUndefs = true; 1874 unsigned NumOpsWithSameOpcodeParent = 0; 1875 Instruction *OpcodeI = nullptr; 1876 BasicBlock *Parent = nullptr; 1877 unsigned Hash = 0; 1878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1879 const OperandData &OpData = getData(OpIdx, Lane); 1880 if (OpData.APO) 1881 ++CntTrue; 1882 // Use Boyer-Moore majority voting for finding the majority opcode and 1883 // the number of times it occurs. 1884 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1885 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() || 1886 I->getParent() != Parent) { 1887 if (NumOpsWithSameOpcodeParent == 0) { 1888 NumOpsWithSameOpcodeParent = 1; 1889 OpcodeI = I; 1890 Parent = I->getParent(); 1891 } else { 1892 --NumOpsWithSameOpcodeParent; 1893 } 1894 } else { 1895 ++NumOpsWithSameOpcodeParent; 1896 } 1897 } 1898 Hash = hash_combine( 1899 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1900 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1901 } 1902 if (AllUndefs) 1903 return {}; 1904 OperandsOrderData Data; 1905 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1906 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1907 Data.Hash = Hash; 1908 return Data; 1909 } 1910 1911 /// Go through the instructions in VL and append their operands. 1912 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1913 assert(!VL.empty() && "Bad VL"); 1914 assert((empty() || VL.size() == getNumLanes()) && 1915 "Expected same number of lanes"); 1916 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1917 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1918 OpsVec.resize(NumOperands); 1919 unsigned NumLanes = VL.size(); 1920 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1921 OpsVec[OpIdx].resize(NumLanes); 1922 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1923 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1924 // Our tree has just 3 nodes: the root and two operands. 1925 // It is therefore trivial to get the APO. We only need to check the 1926 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1927 // RHS operand. The LHS operand of both add and sub is never attached 1928 // to an inversese operation in the linearized form, therefore its APO 1929 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1930 1931 // Since operand reordering is performed on groups of commutative 1932 // operations or alternating sequences (e.g., +, -), we can safely 1933 // tell the inverse operations by checking commutativity. 1934 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1935 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1936 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1937 APO, false}; 1938 } 1939 } 1940 } 1941 1942 /// \returns the number of operands. 1943 unsigned getNumOperands() const { return OpsVec.size(); } 1944 1945 /// \returns the number of lanes. 1946 unsigned getNumLanes() const { return OpsVec[0].size(); } 1947 1948 /// \returns the operand value at \p OpIdx and \p Lane. 1949 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1950 return getData(OpIdx, Lane).V; 1951 } 1952 1953 /// \returns true if the data structure is empty. 1954 bool empty() const { return OpsVec.empty(); } 1955 1956 /// Clears the data. 1957 void clear() { OpsVec.clear(); } 1958 1959 /// \Returns true if there are enough operands identical to \p Op to fill 1960 /// the whole vector. 1961 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1962 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1963 bool OpAPO = getData(OpIdx, Lane).APO; 1964 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1965 if (Ln == Lane) 1966 continue; 1967 // This is set to true if we found a candidate for broadcast at Lane. 1968 bool FoundCandidate = false; 1969 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1970 OperandData &Data = getData(OpI, Ln); 1971 if (Data.APO != OpAPO || Data.IsUsed) 1972 continue; 1973 if (Data.V == Op) { 1974 FoundCandidate = true; 1975 Data.IsUsed = true; 1976 break; 1977 } 1978 } 1979 if (!FoundCandidate) 1980 return false; 1981 } 1982 return true; 1983 } 1984 1985 public: 1986 /// Initialize with all the operands of the instruction vector \p RootVL. 1987 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI, 1988 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) 1989 : TLI(TLI), DL(DL), SE(SE), R(R) { 1990 // Append all the operands of RootVL. 1991 appendOperandsOfVL(RootVL); 1992 } 1993 1994 /// \Returns a value vector with the operands across all lanes for the 1995 /// opearnd at \p OpIdx. 1996 ValueList getVL(unsigned OpIdx) const { 1997 ValueList OpVL(OpsVec[OpIdx].size()); 1998 assert(OpsVec[OpIdx].size() == getNumLanes() && 1999 "Expected same num of lanes across all operands"); 2000 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 2001 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 2002 return OpVL; 2003 } 2004 2005 // Performs operand reordering for 2 or more operands. 2006 // The original operands are in OrigOps[OpIdx][Lane]. 2007 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 2008 void reorder() { 2009 unsigned NumOperands = getNumOperands(); 2010 unsigned NumLanes = getNumLanes(); 2011 // Each operand has its own mode. We are using this mode to help us select 2012 // the instructions for each lane, so that they match best with the ones 2013 // we have selected so far. 2014 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 2015 2016 // This is a greedy single-pass algorithm. We are going over each lane 2017 // once and deciding on the best order right away with no back-tracking. 2018 // However, in order to increase its effectiveness, we start with the lane 2019 // that has operands that can move the least. For example, given the 2020 // following lanes: 2021 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 2022 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 2023 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 2024 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 2025 // we will start at Lane 1, since the operands of the subtraction cannot 2026 // be reordered. Then we will visit the rest of the lanes in a circular 2027 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 2028 2029 // Find the first lane that we will start our search from. 2030 unsigned FirstLane = getBestLaneToStartReordering(); 2031 2032 // Initialize the modes. 2033 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2034 Value *OpLane0 = getValue(OpIdx, FirstLane); 2035 // Keep track if we have instructions with all the same opcode on one 2036 // side. 2037 if (isa<LoadInst>(OpLane0)) 2038 ReorderingModes[OpIdx] = ReorderingMode::Load; 2039 else if (isa<Instruction>(OpLane0)) { 2040 // Check if OpLane0 should be broadcast. 2041 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 2042 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2043 else 2044 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 2045 } 2046 else if (isa<Constant>(OpLane0)) 2047 ReorderingModes[OpIdx] = ReorderingMode::Constant; 2048 else if (isa<Argument>(OpLane0)) 2049 // Our best hope is a Splat. It may save some cost in some cases. 2050 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2051 else 2052 // NOTE: This should be unreachable. 2053 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2054 } 2055 2056 // Check that we don't have same operands. No need to reorder if operands 2057 // are just perfect diamond or shuffled diamond match. Do not do it only 2058 // for possible broadcasts or non-power of 2 number of scalars (just for 2059 // now). 2060 auto &&SkipReordering = [this]() { 2061 SmallPtrSet<Value *, 4> UniqueValues; 2062 ArrayRef<OperandData> Op0 = OpsVec.front(); 2063 for (const OperandData &Data : Op0) 2064 UniqueValues.insert(Data.V); 2065 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 2066 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 2067 return !UniqueValues.contains(Data.V); 2068 })) 2069 return false; 2070 } 2071 // TODO: Check if we can remove a check for non-power-2 number of 2072 // scalars after full support of non-power-2 vectorization. 2073 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 2074 }; 2075 2076 // If the initial strategy fails for any of the operand indexes, then we 2077 // perform reordering again in a second pass. This helps avoid assigning 2078 // high priority to the failed strategy, and should improve reordering for 2079 // the non-failed operand indexes. 2080 for (int Pass = 0; Pass != 2; ++Pass) { 2081 // Check if no need to reorder operands since they're are perfect or 2082 // shuffled diamond match. 2083 // Need to do it to avoid extra external use cost counting for 2084 // shuffled matches, which may cause regressions. 2085 if (SkipReordering()) 2086 break; 2087 // Skip the second pass if the first pass did not fail. 2088 bool StrategyFailed = false; 2089 // Mark all operand data as free to use. 2090 clearUsed(); 2091 // We keep the original operand order for the FirstLane, so reorder the 2092 // rest of the lanes. We are visiting the nodes in a circular fashion, 2093 // using FirstLane as the center point and increasing the radius 2094 // distance. 2095 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 2096 for (unsigned I = 0; I < NumOperands; ++I) 2097 MainAltOps[I].push_back(getData(I, FirstLane).V); 2098 2099 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 2100 // Visit the lane on the right and then the lane on the left. 2101 for (int Direction : {+1, -1}) { 2102 int Lane = FirstLane + Direction * Distance; 2103 if (Lane < 0 || Lane >= (int)NumLanes) 2104 continue; 2105 int LastLane = Lane - Direction; 2106 assert(LastLane >= 0 && LastLane < (int)NumLanes && 2107 "Out of bounds"); 2108 // Look for a good match for each operand. 2109 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2110 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 2111 std::optional<unsigned> BestIdx = getBestOperand( 2112 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 2113 // By not selecting a value, we allow the operands that follow to 2114 // select a better matching value. We will get a non-null value in 2115 // the next run of getBestOperand(). 2116 if (BestIdx) { 2117 // Swap the current operand with the one returned by 2118 // getBestOperand(). 2119 swap(OpIdx, *BestIdx, Lane); 2120 } else { 2121 // We failed to find a best operand, set mode to 'Failed'. 2122 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2123 // Enable the second pass. 2124 StrategyFailed = true; 2125 } 2126 // Try to get the alternate opcode and follow it during analysis. 2127 if (MainAltOps[OpIdx].size() != 2) { 2128 OperandData &AltOp = getData(OpIdx, Lane); 2129 InstructionsState OpS = 2130 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI); 2131 if (OpS.getOpcode() && OpS.isAltShuffle()) 2132 MainAltOps[OpIdx].push_back(AltOp.V); 2133 } 2134 } 2135 } 2136 } 2137 // Skip second pass if the strategy did not fail. 2138 if (!StrategyFailed) 2139 break; 2140 } 2141 } 2142 2143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2144 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 2145 switch (RMode) { 2146 case ReorderingMode::Load: 2147 return "Load"; 2148 case ReorderingMode::Opcode: 2149 return "Opcode"; 2150 case ReorderingMode::Constant: 2151 return "Constant"; 2152 case ReorderingMode::Splat: 2153 return "Splat"; 2154 case ReorderingMode::Failed: 2155 return "Failed"; 2156 } 2157 llvm_unreachable("Unimplemented Reordering Type"); 2158 } 2159 2160 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 2161 raw_ostream &OS) { 2162 return OS << getModeStr(RMode); 2163 } 2164 2165 /// Debug print. 2166 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 2167 printMode(RMode, dbgs()); 2168 } 2169 2170 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 2171 return printMode(RMode, OS); 2172 } 2173 2174 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 2175 const unsigned Indent = 2; 2176 unsigned Cnt = 0; 2177 for (const OperandDataVec &OpDataVec : OpsVec) { 2178 OS << "Operand " << Cnt++ << "\n"; 2179 for (const OperandData &OpData : OpDataVec) { 2180 OS.indent(Indent) << "{"; 2181 if (Value *V = OpData.V) 2182 OS << *V; 2183 else 2184 OS << "null"; 2185 OS << ", APO:" << OpData.APO << "}\n"; 2186 } 2187 OS << "\n"; 2188 } 2189 return OS; 2190 } 2191 2192 /// Debug print. 2193 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2194 #endif 2195 }; 2196 2197 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2198 /// for a pair which have highest score deemed to have best chance to form 2199 /// root of profitable tree to vectorize. Return std::nullopt if no candidate 2200 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit 2201 /// of the cost, considered to be good enough score. 2202 std::optional<int> 2203 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2204 int Limit = LookAheadHeuristics::ScoreFail) { 2205 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2, 2206 RootLookAheadMaxDepth); 2207 int BestScore = Limit; 2208 std::optional<int> Index; 2209 for (int I : seq<int>(0, Candidates.size())) { 2210 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2211 Candidates[I].second, 2212 /*U1=*/nullptr, /*U2=*/nullptr, 2213 /*Level=*/1, std::nullopt); 2214 if (Score > BestScore) { 2215 BestScore = Score; 2216 Index = I; 2217 } 2218 } 2219 return Index; 2220 } 2221 2222 /// Checks if the instruction is marked for deletion. 2223 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2224 2225 /// Removes an instruction from its block and eventually deletes it. 2226 /// It's like Instruction::eraseFromParent() except that the actual deletion 2227 /// is delayed until BoUpSLP is destructed. 2228 void eraseInstruction(Instruction *I) { 2229 DeletedInstructions.insert(I); 2230 } 2231 2232 /// Checks if the instruction was already analyzed for being possible 2233 /// reduction root. 2234 bool isAnalyzedReductionRoot(Instruction *I) const { 2235 return AnalyzedReductionsRoots.count(I); 2236 } 2237 /// Register given instruction as already analyzed for being possible 2238 /// reduction root. 2239 void analyzedReductionRoot(Instruction *I) { 2240 AnalyzedReductionsRoots.insert(I); 2241 } 2242 /// Checks if the provided list of reduced values was checked already for 2243 /// vectorization. 2244 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const { 2245 return AnalyzedReductionVals.contains(hash_value(VL)); 2246 } 2247 /// Adds the list of reduced values to list of already checked values for the 2248 /// vectorization. 2249 void analyzedReductionVals(ArrayRef<Value *> VL) { 2250 AnalyzedReductionVals.insert(hash_value(VL)); 2251 } 2252 /// Clear the list of the analyzed reduction root instructions. 2253 void clearReductionData() { 2254 AnalyzedReductionsRoots.clear(); 2255 AnalyzedReductionVals.clear(); 2256 } 2257 /// Checks if the given value is gathered in one of the nodes. 2258 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2259 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2260 } 2261 2262 /// Check if the value is vectorized in the tree. 2263 bool isVectorized(Value *V) const { return getTreeEntry(V); } 2264 2265 ~BoUpSLP(); 2266 2267 private: 2268 /// Determine if a vectorized value \p V in can be demoted to 2269 /// a smaller type with a truncation. We collect the values that will be 2270 /// demoted in ToDemote and additional roots that require investigating in 2271 /// Roots. 2272 /// \param DemotedConsts list of Instruction/OperandIndex pairs that are 2273 /// constant and to be demoted. Required to correctly identify constant nodes 2274 /// to be demoted. 2275 bool collectValuesToDemote( 2276 Value *V, SmallVectorImpl<Value *> &ToDemote, 2277 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 2278 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const; 2279 2280 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2281 /// reordering (i.e. the operands can be reordered because they have only one 2282 /// user and reordarable). 2283 /// \param ReorderableGathers List of all gather nodes that require reordering 2284 /// (e.g., gather of extractlements or partially vectorizable loads). 2285 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2286 /// reordering, subset of \p NonVectorized. 2287 bool 2288 canReorderOperands(TreeEntry *UserTE, 2289 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2290 ArrayRef<TreeEntry *> ReorderableGathers, 2291 SmallVectorImpl<TreeEntry *> &GatherOps); 2292 2293 /// Checks if the given \p TE is a gather node with clustered reused scalars 2294 /// and reorders it per given \p Mask. 2295 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const; 2296 2297 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2298 /// if any. If it is not vectorized (gather node), returns nullptr. 2299 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2300 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2301 TreeEntry *TE = nullptr; 2302 const auto *It = find_if(VL, [&](Value *V) { 2303 TE = getTreeEntry(V); 2304 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) 2305 return true; 2306 auto It = MultiNodeScalars.find(V); 2307 if (It != MultiNodeScalars.end()) { 2308 for (TreeEntry *E : It->second) { 2309 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) { 2310 TE = E; 2311 return true; 2312 } 2313 } 2314 } 2315 return false; 2316 }); 2317 if (It != VL.end()) { 2318 assert(TE->isSame(VL) && "Expected same scalars."); 2319 return TE; 2320 } 2321 return nullptr; 2322 } 2323 2324 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2325 /// if any. If it is not vectorized (gather node), returns nullptr. 2326 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2327 unsigned OpIdx) const { 2328 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2329 const_cast<TreeEntry *>(UserTE), OpIdx); 2330 } 2331 2332 /// Checks if all users of \p I are the part of the vectorization tree. 2333 bool areAllUsersVectorized( 2334 Instruction *I, 2335 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const; 2336 2337 /// Return information about the vector formed for the specified index 2338 /// of a vector of (the same) instruction. 2339 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops); 2340 2341 /// \ returns the graph entry for the \p Idx operand of the \p E entry. 2342 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const; 2343 2344 /// \returns the cost of the vectorizable entry. 2345 InstructionCost getEntryCost(const TreeEntry *E, 2346 ArrayRef<Value *> VectorizedVals, 2347 SmallPtrSetImpl<Value *> &CheckedExtracts); 2348 2349 /// This is the recursive part of buildTree. 2350 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2351 const EdgeInfo &EI); 2352 2353 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2354 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2355 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2356 /// returns false, setting \p CurrentOrder to either an empty vector or a 2357 /// non-identity permutation that allows to reuse extract instructions. 2358 /// \param ResizeAllowed indicates whether it is allowed to handle subvector 2359 /// extract order. 2360 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2361 SmallVectorImpl<unsigned> &CurrentOrder, 2362 bool ResizeAllowed = false) const; 2363 2364 /// Vectorize a single entry in the tree. 2365 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2366 /// avoid issues with def-use order. 2367 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs); 2368 2369 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry 2370 /// \p E. 2371 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2372 /// avoid issues with def-use order. 2373 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs); 2374 2375 /// Create a new vector from a list of scalar values. Produces a sequence 2376 /// which exploits values reused across lanes, and arranges the inserts 2377 /// for ease of later optimization. 2378 template <typename BVTy, typename ResTy, typename... Args> 2379 ResTy processBuildVector(const TreeEntry *E, Args &...Params); 2380 2381 /// Create a new vector from a list of scalar values. Produces a sequence 2382 /// which exploits values reused across lanes, and arranges the inserts 2383 /// for ease of later optimization. 2384 Value *createBuildVector(const TreeEntry *E); 2385 2386 /// Returns the instruction in the bundle, which can be used as a base point 2387 /// for scheduling. Usually it is the last instruction in the bundle, except 2388 /// for the case when all operands are external (in this case, it is the first 2389 /// instruction in the list). 2390 Instruction &getLastInstructionInBundle(const TreeEntry *E); 2391 2392 /// Tries to find extractelement instructions with constant indices from fixed 2393 /// vector type and gather such instructions into a bunch, which highly likely 2394 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2395 /// was successful, the matched scalars are replaced by poison values in \p VL 2396 /// for future analysis. 2397 std::optional<TargetTransformInfo::ShuffleKind> 2398 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL, 2399 SmallVectorImpl<int> &Mask) const; 2400 2401 /// Tries to find extractelement instructions with constant indices from fixed 2402 /// vector type and gather such instructions into a bunch, which highly likely 2403 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2404 /// was successful, the matched scalars are replaced by poison values in \p VL 2405 /// for future analysis. 2406 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2407 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 2408 SmallVectorImpl<int> &Mask, 2409 unsigned NumParts) const; 2410 2411 /// Checks if the gathered \p VL can be represented as a single register 2412 /// shuffle(s) of previous tree entries. 2413 /// \param TE Tree entry checked for permutation. 2414 /// \param VL List of scalars (a subset of the TE scalar), checked for 2415 /// permutations. Must form single-register vector. 2416 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2417 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask. 2418 std::optional<TargetTransformInfo::ShuffleKind> 2419 isGatherShuffledSingleRegisterEntry( 2420 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 2421 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part); 2422 2423 /// Checks if the gathered \p VL can be represented as multi-register 2424 /// shuffle(s) of previous tree entries. 2425 /// \param TE Tree entry checked for permutation. 2426 /// \param VL List of scalars (a subset of the TE scalar), checked for 2427 /// permutations. 2428 /// \returns per-register series of ShuffleKind, if gathered values can be 2429 /// represented as shuffles of previous tree entries. \p Mask is filled with 2430 /// the shuffle mask (also on per-register base). 2431 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2432 isGatherShuffledEntry( 2433 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 2434 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 2435 unsigned NumParts); 2436 2437 /// \returns the scalarization cost for this list of values. Assuming that 2438 /// this subtree gets vectorized, we may need to extract the values from the 2439 /// roots. This method calculates the cost of extracting the values. 2440 /// \param ForPoisonSrc true if initial vector is poison, false otherwise. 2441 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc) const; 2442 2443 /// Set the Builder insert point to one after the last instruction in 2444 /// the bundle 2445 void setInsertPointAfterBundle(const TreeEntry *E); 2446 2447 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not 2448 /// specified, the starting vector value is poison. 2449 Value *gather(ArrayRef<Value *> VL, Value *Root); 2450 2451 /// \returns whether the VectorizableTree is fully vectorizable and will 2452 /// be beneficial even the tree height is tiny. 2453 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2454 2455 /// Reorder commutative or alt operands to get better probability of 2456 /// generating vectorized code. 2457 static void reorderInputsAccordingToOpcode( 2458 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 2459 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 2460 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R); 2461 2462 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2463 /// users of \p TE and collects the stores. It returns the map from the store 2464 /// pointers to the collected stores. 2465 DenseMap<Value *, SmallVector<StoreInst *>> 2466 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2467 2468 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2469 /// stores in \p StoresVec can form a vector instruction. If so it returns 2470 /// true and populates \p ReorderIndices with the shuffle indices of the 2471 /// stores when compared to the sorted vector. 2472 bool canFormVector(ArrayRef<StoreInst *> StoresVec, 2473 OrdersType &ReorderIndices) const; 2474 2475 /// Iterates through the users of \p TE, looking for scalar stores that can be 2476 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2477 /// their order and builds an order index vector for each store bundle. It 2478 /// returns all these order vectors found. 2479 /// We run this after the tree has formed, otherwise we may come across user 2480 /// instructions that are not yet in the tree. 2481 SmallVector<OrdersType, 1> 2482 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2483 2484 struct TreeEntry { 2485 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2486 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2487 2488 /// \returns Common mask for reorder indices and reused scalars. 2489 SmallVector<int> getCommonMask() const { 2490 SmallVector<int> Mask; 2491 inversePermutation(ReorderIndices, Mask); 2492 ::addMask(Mask, ReuseShuffleIndices); 2493 return Mask; 2494 } 2495 2496 /// \returns true if the scalars in VL are equal to this entry. 2497 bool isSame(ArrayRef<Value *> VL) const { 2498 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2499 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2500 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2501 return VL.size() == Mask.size() && 2502 std::equal(VL.begin(), VL.end(), Mask.begin(), 2503 [Scalars](Value *V, int Idx) { 2504 return (isa<UndefValue>(V) && 2505 Idx == PoisonMaskElem) || 2506 (Idx != PoisonMaskElem && V == Scalars[Idx]); 2507 }); 2508 }; 2509 if (!ReorderIndices.empty()) { 2510 // TODO: implement matching if the nodes are just reordered, still can 2511 // treat the vector as the same if the list of scalars matches VL 2512 // directly, without reordering. 2513 SmallVector<int> Mask; 2514 inversePermutation(ReorderIndices, Mask); 2515 if (VL.size() == Scalars.size()) 2516 return IsSame(Scalars, Mask); 2517 if (VL.size() == ReuseShuffleIndices.size()) { 2518 ::addMask(Mask, ReuseShuffleIndices); 2519 return IsSame(Scalars, Mask); 2520 } 2521 return false; 2522 } 2523 return IsSame(Scalars, ReuseShuffleIndices); 2524 } 2525 2526 bool isOperandGatherNode(const EdgeInfo &UserEI) const { 2527 return State == TreeEntry::NeedToGather && 2528 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx && 2529 UserTreeIndices.front().UserTE == UserEI.UserTE; 2530 } 2531 2532 /// \returns true if current entry has same operands as \p TE. 2533 bool hasEqualOperands(const TreeEntry &TE) const { 2534 if (TE.getNumOperands() != getNumOperands()) 2535 return false; 2536 SmallBitVector Used(getNumOperands()); 2537 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2538 unsigned PrevCount = Used.count(); 2539 for (unsigned K = 0; K < E; ++K) { 2540 if (Used.test(K)) 2541 continue; 2542 if (getOperand(K) == TE.getOperand(I)) { 2543 Used.set(K); 2544 break; 2545 } 2546 } 2547 // Check if we actually found the matching operand. 2548 if (PrevCount == Used.count()) 2549 return false; 2550 } 2551 return true; 2552 } 2553 2554 /// \return Final vectorization factor for the node. Defined by the total 2555 /// number of vectorized scalars, including those, used several times in the 2556 /// entry and counted in the \a ReuseShuffleIndices, if any. 2557 unsigned getVectorFactor() const { 2558 if (!ReuseShuffleIndices.empty()) 2559 return ReuseShuffleIndices.size(); 2560 return Scalars.size(); 2561 }; 2562 2563 /// A vector of scalars. 2564 ValueList Scalars; 2565 2566 /// The Scalars are vectorized into this value. It is initialized to Null. 2567 WeakTrackingVH VectorizedValue = nullptr; 2568 2569 /// New vector phi instructions emitted for the vectorized phi nodes. 2570 PHINode *PHI = nullptr; 2571 2572 /// Do we need to gather this sequence or vectorize it 2573 /// (either with vector instruction or with scatter/gather 2574 /// intrinsics for store/load)? 2575 enum EntryState { 2576 Vectorize, 2577 ScatterVectorize, 2578 PossibleStridedVectorize, 2579 NeedToGather 2580 }; 2581 EntryState State; 2582 2583 /// Does this sequence require some shuffling? 2584 SmallVector<int, 4> ReuseShuffleIndices; 2585 2586 /// Does this entry require reordering? 2587 SmallVector<unsigned, 4> ReorderIndices; 2588 2589 /// Points back to the VectorizableTree. 2590 /// 2591 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2592 /// to be a pointer and needs to be able to initialize the child iterator. 2593 /// Thus we need a reference back to the container to translate the indices 2594 /// to entries. 2595 VecTreeTy &Container; 2596 2597 /// The TreeEntry index containing the user of this entry. We can actually 2598 /// have multiple users so the data structure is not truly a tree. 2599 SmallVector<EdgeInfo, 1> UserTreeIndices; 2600 2601 /// The index of this treeEntry in VectorizableTree. 2602 int Idx = -1; 2603 2604 private: 2605 /// The operands of each instruction in each lane Operands[op_index][lane]. 2606 /// Note: This helps avoid the replication of the code that performs the 2607 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2608 SmallVector<ValueList, 2> Operands; 2609 2610 /// The main/alternate instruction. 2611 Instruction *MainOp = nullptr; 2612 Instruction *AltOp = nullptr; 2613 2614 public: 2615 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2616 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2617 if (Operands.size() < OpIdx + 1) 2618 Operands.resize(OpIdx + 1); 2619 assert(Operands[OpIdx].empty() && "Already resized?"); 2620 assert(OpVL.size() <= Scalars.size() && 2621 "Number of operands is greater than the number of scalars."); 2622 Operands[OpIdx].resize(OpVL.size()); 2623 copy(OpVL, Operands[OpIdx].begin()); 2624 } 2625 2626 /// Set the operands of this bundle in their original order. 2627 void setOperandsInOrder() { 2628 assert(Operands.empty() && "Already initialized?"); 2629 auto *I0 = cast<Instruction>(Scalars[0]); 2630 Operands.resize(I0->getNumOperands()); 2631 unsigned NumLanes = Scalars.size(); 2632 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2633 OpIdx != NumOperands; ++OpIdx) { 2634 Operands[OpIdx].resize(NumLanes); 2635 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2636 auto *I = cast<Instruction>(Scalars[Lane]); 2637 assert(I->getNumOperands() == NumOperands && 2638 "Expected same number of operands"); 2639 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2640 } 2641 } 2642 } 2643 2644 /// Reorders operands of the node to the given mask \p Mask. 2645 void reorderOperands(ArrayRef<int> Mask) { 2646 for (ValueList &Operand : Operands) 2647 reorderScalars(Operand, Mask); 2648 } 2649 2650 /// \returns the \p OpIdx operand of this TreeEntry. 2651 ValueList &getOperand(unsigned OpIdx) { 2652 assert(OpIdx < Operands.size() && "Off bounds"); 2653 return Operands[OpIdx]; 2654 } 2655 2656 /// \returns the \p OpIdx operand of this TreeEntry. 2657 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2658 assert(OpIdx < Operands.size() && "Off bounds"); 2659 return Operands[OpIdx]; 2660 } 2661 2662 /// \returns the number of operands. 2663 unsigned getNumOperands() const { return Operands.size(); } 2664 2665 /// \return the single \p OpIdx operand. 2666 Value *getSingleOperand(unsigned OpIdx) const { 2667 assert(OpIdx < Operands.size() && "Off bounds"); 2668 assert(!Operands[OpIdx].empty() && "No operand available"); 2669 return Operands[OpIdx][0]; 2670 } 2671 2672 /// Some of the instructions in the list have alternate opcodes. 2673 bool isAltShuffle() const { return MainOp != AltOp; } 2674 2675 bool isOpcodeOrAlt(Instruction *I) const { 2676 unsigned CheckedOpcode = I->getOpcode(); 2677 return (getOpcode() == CheckedOpcode || 2678 getAltOpcode() == CheckedOpcode); 2679 } 2680 2681 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2682 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2683 /// \p OpValue. 2684 Value *isOneOf(Value *Op) const { 2685 auto *I = dyn_cast<Instruction>(Op); 2686 if (I && isOpcodeOrAlt(I)) 2687 return Op; 2688 return MainOp; 2689 } 2690 2691 void setOperations(const InstructionsState &S) { 2692 MainOp = S.MainOp; 2693 AltOp = S.AltOp; 2694 } 2695 2696 Instruction *getMainOp() const { 2697 return MainOp; 2698 } 2699 2700 Instruction *getAltOp() const { 2701 return AltOp; 2702 } 2703 2704 /// The main/alternate opcodes for the list of instructions. 2705 unsigned getOpcode() const { 2706 return MainOp ? MainOp->getOpcode() : 0; 2707 } 2708 2709 unsigned getAltOpcode() const { 2710 return AltOp ? AltOp->getOpcode() : 0; 2711 } 2712 2713 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2714 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2715 int findLaneForValue(Value *V) const { 2716 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2717 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2718 if (!ReorderIndices.empty()) 2719 FoundLane = ReorderIndices[FoundLane]; 2720 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2721 if (!ReuseShuffleIndices.empty()) { 2722 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2723 find(ReuseShuffleIndices, FoundLane)); 2724 } 2725 return FoundLane; 2726 } 2727 2728 /// Build a shuffle mask for graph entry which represents a merge of main 2729 /// and alternate operations. 2730 void 2731 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp, 2732 SmallVectorImpl<int> &Mask, 2733 SmallVectorImpl<Value *> *OpScalars = nullptr, 2734 SmallVectorImpl<Value *> *AltScalars = nullptr) const; 2735 2736 #ifndef NDEBUG 2737 /// Debug printer. 2738 LLVM_DUMP_METHOD void dump() const { 2739 dbgs() << Idx << ".\n"; 2740 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2741 dbgs() << "Operand " << OpI << ":\n"; 2742 for (const Value *V : Operands[OpI]) 2743 dbgs().indent(2) << *V << "\n"; 2744 } 2745 dbgs() << "Scalars: \n"; 2746 for (Value *V : Scalars) 2747 dbgs().indent(2) << *V << "\n"; 2748 dbgs() << "State: "; 2749 switch (State) { 2750 case Vectorize: 2751 dbgs() << "Vectorize\n"; 2752 break; 2753 case ScatterVectorize: 2754 dbgs() << "ScatterVectorize\n"; 2755 break; 2756 case PossibleStridedVectorize: 2757 dbgs() << "PossibleStridedVectorize\n"; 2758 break; 2759 case NeedToGather: 2760 dbgs() << "NeedToGather\n"; 2761 break; 2762 } 2763 dbgs() << "MainOp: "; 2764 if (MainOp) 2765 dbgs() << *MainOp << "\n"; 2766 else 2767 dbgs() << "NULL\n"; 2768 dbgs() << "AltOp: "; 2769 if (AltOp) 2770 dbgs() << *AltOp << "\n"; 2771 else 2772 dbgs() << "NULL\n"; 2773 dbgs() << "VectorizedValue: "; 2774 if (VectorizedValue) 2775 dbgs() << *VectorizedValue << "\n"; 2776 else 2777 dbgs() << "NULL\n"; 2778 dbgs() << "ReuseShuffleIndices: "; 2779 if (ReuseShuffleIndices.empty()) 2780 dbgs() << "Empty"; 2781 else 2782 for (int ReuseIdx : ReuseShuffleIndices) 2783 dbgs() << ReuseIdx << ", "; 2784 dbgs() << "\n"; 2785 dbgs() << "ReorderIndices: "; 2786 for (unsigned ReorderIdx : ReorderIndices) 2787 dbgs() << ReorderIdx << ", "; 2788 dbgs() << "\n"; 2789 dbgs() << "UserTreeIndices: "; 2790 for (const auto &EInfo : UserTreeIndices) 2791 dbgs() << EInfo << ", "; 2792 dbgs() << "\n"; 2793 } 2794 #endif 2795 }; 2796 2797 #ifndef NDEBUG 2798 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2799 InstructionCost VecCost, InstructionCost ScalarCost, 2800 StringRef Banner) const { 2801 dbgs() << "SLP: " << Banner << ":\n"; 2802 E->dump(); 2803 dbgs() << "SLP: Costs:\n"; 2804 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2805 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2806 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2807 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " 2808 << ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2809 } 2810 #endif 2811 2812 /// Create a new VectorizableTree entry. 2813 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2814 std::optional<ScheduleData *> Bundle, 2815 const InstructionsState &S, 2816 const EdgeInfo &UserTreeIdx, 2817 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2818 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2819 TreeEntry::EntryState EntryState = 2820 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2821 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2822 ReuseShuffleIndices, ReorderIndices); 2823 } 2824 2825 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2826 TreeEntry::EntryState EntryState, 2827 std::optional<ScheduleData *> Bundle, 2828 const InstructionsState &S, 2829 const EdgeInfo &UserTreeIdx, 2830 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2831 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2832 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2833 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2834 "Need to vectorize gather entry?"); 2835 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2836 TreeEntry *Last = VectorizableTree.back().get(); 2837 Last->Idx = VectorizableTree.size() - 1; 2838 Last->State = EntryState; 2839 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2840 ReuseShuffleIndices.end()); 2841 if (ReorderIndices.empty()) { 2842 Last->Scalars.assign(VL.begin(), VL.end()); 2843 Last->setOperations(S); 2844 } else { 2845 // Reorder scalars and build final mask. 2846 Last->Scalars.assign(VL.size(), nullptr); 2847 transform(ReorderIndices, Last->Scalars.begin(), 2848 [VL](unsigned Idx) -> Value * { 2849 if (Idx >= VL.size()) 2850 return UndefValue::get(VL.front()->getType()); 2851 return VL[Idx]; 2852 }); 2853 InstructionsState S = getSameOpcode(Last->Scalars, *TLI); 2854 Last->setOperations(S); 2855 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2856 } 2857 if (Last->State != TreeEntry::NeedToGather) { 2858 for (Value *V : VL) { 2859 const TreeEntry *TE = getTreeEntry(V); 2860 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) && 2861 "Scalar already in tree!"); 2862 if (TE) { 2863 if (TE != Last) 2864 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last); 2865 continue; 2866 } 2867 ScalarToTreeEntry[V] = Last; 2868 } 2869 // Update the scheduler bundle to point to this TreeEntry. 2870 ScheduleData *BundleMember = *Bundle; 2871 assert((BundleMember || isa<PHINode>(S.MainOp) || 2872 isVectorLikeInstWithConstOps(S.MainOp) || 2873 doesNotNeedToSchedule(VL)) && 2874 "Bundle and VL out of sync"); 2875 if (BundleMember) { 2876 for (Value *V : VL) { 2877 if (doesNotNeedToBeScheduled(V)) 2878 continue; 2879 if (!BundleMember) 2880 continue; 2881 BundleMember->TE = Last; 2882 BundleMember = BundleMember->NextInBundle; 2883 } 2884 } 2885 assert(!BundleMember && "Bundle and VL out of sync"); 2886 } else { 2887 MustGather.insert(VL.begin(), VL.end()); 2888 // Build a map for gathered scalars to the nodes where they are used. 2889 for (Value *V : VL) 2890 if (!isConstant(V)) 2891 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last); 2892 } 2893 2894 if (UserTreeIdx.UserTE) 2895 Last->UserTreeIndices.push_back(UserTreeIdx); 2896 2897 return Last; 2898 } 2899 2900 /// -- Vectorization State -- 2901 /// Holds all of the tree entries. 2902 TreeEntry::VecTreeTy VectorizableTree; 2903 2904 #ifndef NDEBUG 2905 /// Debug printer. 2906 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2907 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2908 VectorizableTree[Id]->dump(); 2909 dbgs() << "\n"; 2910 } 2911 } 2912 #endif 2913 2914 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2915 2916 const TreeEntry *getTreeEntry(Value *V) const { 2917 return ScalarToTreeEntry.lookup(V); 2918 } 2919 2920 /// Checks if the specified list of the instructions/values can be vectorized 2921 /// and fills required data before actual scheduling of the instructions. 2922 TreeEntry::EntryState getScalarsVectorizationState( 2923 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 2924 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const; 2925 2926 /// Maps a specific scalar to its tree entry. 2927 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry; 2928 2929 /// List of scalars, used in several vectorize nodes, and the list of the 2930 /// nodes. 2931 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars; 2932 2933 /// Maps a value to the proposed vectorizable size. 2934 SmallDenseMap<Value *, unsigned> InstrElementSize; 2935 2936 /// A list of scalars that we found that we need to keep as scalars. 2937 ValueSet MustGather; 2938 2939 /// A map between the vectorized entries and the last instructions in the 2940 /// bundles. The bundles are built in use order, not in the def order of the 2941 /// instructions. So, we cannot rely directly on the last instruction in the 2942 /// bundle being the last instruction in the program order during 2943 /// vectorization process since the basic blocks are affected, need to 2944 /// pre-gather them before. 2945 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction; 2946 2947 /// List of gather nodes, depending on other gather/vector nodes, which should 2948 /// be emitted after the vector instruction emission process to correctly 2949 /// handle order of the vector instructions and shuffles. 2950 SetVector<const TreeEntry *> PostponedGathers; 2951 2952 using ValueToGatherNodesMap = 2953 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>; 2954 ValueToGatherNodesMap ValueToGatherNodes; 2955 2956 /// This POD struct describes one external user in the vectorized tree. 2957 struct ExternalUser { 2958 ExternalUser(Value *S, llvm::User *U, int L) 2959 : Scalar(S), User(U), Lane(L) {} 2960 2961 // Which scalar in our function. 2962 Value *Scalar; 2963 2964 // Which user that uses the scalar. 2965 llvm::User *User; 2966 2967 // Which lane does the scalar belong to. 2968 int Lane; 2969 }; 2970 using UserList = SmallVector<ExternalUser, 16>; 2971 2972 /// Checks if two instructions may access the same memory. 2973 /// 2974 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2975 /// is invariant in the calling loop. 2976 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2977 Instruction *Inst2) { 2978 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2)) 2979 return true; 2980 // First check if the result is already in the cache. 2981 AliasCacheKey Key = std::make_pair(Inst1, Inst2); 2982 auto It = AliasCache.find(Key); 2983 if (It != AliasCache.end()) 2984 return It->second; 2985 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2986 // Store the result in the cache. 2987 AliasCache.try_emplace(Key, Aliased); 2988 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased); 2989 return Aliased; 2990 } 2991 2992 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2993 2994 /// Cache for alias results. 2995 /// TODO: consider moving this to the AliasAnalysis itself. 2996 DenseMap<AliasCacheKey, bool> AliasCache; 2997 2998 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2999 // globally through SLP because we don't perform any action which 3000 // invalidates capture results. 3001 BatchAAResults BatchAA; 3002 3003 /// Temporary store for deleted instructions. Instructions will be deleted 3004 /// eventually when the BoUpSLP is destructed. The deferral is required to 3005 /// ensure that there are no incorrect collisions in the AliasCache, which 3006 /// can happen if a new instruction is allocated at the same address as a 3007 /// previously deleted instruction. 3008 DenseSet<Instruction *> DeletedInstructions; 3009 3010 /// Set of the instruction, being analyzed already for reductions. 3011 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 3012 3013 /// Set of hashes for the list of reduction values already being analyzed. 3014 DenseSet<size_t> AnalyzedReductionVals; 3015 3016 /// A list of values that need to extracted out of the tree. 3017 /// This list holds pairs of (Internal Scalar : External User). External User 3018 /// can be nullptr, it means that this Internal Scalar will be used later, 3019 /// after vectorization. 3020 UserList ExternalUses; 3021 3022 /// Values used only by @llvm.assume calls. 3023 SmallPtrSet<const Value *, 32> EphValues; 3024 3025 /// Holds all of the instructions that we gathered, shuffle instructions and 3026 /// extractelements. 3027 SetVector<Instruction *> GatherShuffleExtractSeq; 3028 3029 /// A list of blocks that we are going to CSE. 3030 DenseSet<BasicBlock *> CSEBlocks; 3031 3032 /// Contains all scheduling relevant data for an instruction. 3033 /// A ScheduleData either represents a single instruction or a member of an 3034 /// instruction bundle (= a group of instructions which is combined into a 3035 /// vector instruction). 3036 struct ScheduleData { 3037 // The initial value for the dependency counters. It means that the 3038 // dependencies are not calculated yet. 3039 enum { InvalidDeps = -1 }; 3040 3041 ScheduleData() = default; 3042 3043 void init(int BlockSchedulingRegionID, Value *OpVal) { 3044 FirstInBundle = this; 3045 NextInBundle = nullptr; 3046 NextLoadStore = nullptr; 3047 IsScheduled = false; 3048 SchedulingRegionID = BlockSchedulingRegionID; 3049 clearDependencies(); 3050 OpValue = OpVal; 3051 TE = nullptr; 3052 } 3053 3054 /// Verify basic self consistency properties 3055 void verify() { 3056 if (hasValidDependencies()) { 3057 assert(UnscheduledDeps <= Dependencies && "invariant"); 3058 } else { 3059 assert(UnscheduledDeps == Dependencies && "invariant"); 3060 } 3061 3062 if (IsScheduled) { 3063 assert(isSchedulingEntity() && 3064 "unexpected scheduled state"); 3065 for (const ScheduleData *BundleMember = this; BundleMember; 3066 BundleMember = BundleMember->NextInBundle) { 3067 assert(BundleMember->hasValidDependencies() && 3068 BundleMember->UnscheduledDeps == 0 && 3069 "unexpected scheduled state"); 3070 assert((BundleMember == this || !BundleMember->IsScheduled) && 3071 "only bundle is marked scheduled"); 3072 } 3073 } 3074 3075 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 3076 "all bundle members must be in same basic block"); 3077 } 3078 3079 /// Returns true if the dependency information has been calculated. 3080 /// Note that depenendency validity can vary between instructions within 3081 /// a single bundle. 3082 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 3083 3084 /// Returns true for single instructions and for bundle representatives 3085 /// (= the head of a bundle). 3086 bool isSchedulingEntity() const { return FirstInBundle == this; } 3087 3088 /// Returns true if it represents an instruction bundle and not only a 3089 /// single instruction. 3090 bool isPartOfBundle() const { 3091 return NextInBundle != nullptr || FirstInBundle != this || TE; 3092 } 3093 3094 /// Returns true if it is ready for scheduling, i.e. it has no more 3095 /// unscheduled depending instructions/bundles. 3096 bool isReady() const { 3097 assert(isSchedulingEntity() && 3098 "can't consider non-scheduling entity for ready list"); 3099 return unscheduledDepsInBundle() == 0 && !IsScheduled; 3100 } 3101 3102 /// Modifies the number of unscheduled dependencies for this instruction, 3103 /// and returns the number of remaining dependencies for the containing 3104 /// bundle. 3105 int incrementUnscheduledDeps(int Incr) { 3106 assert(hasValidDependencies() && 3107 "increment of unscheduled deps would be meaningless"); 3108 UnscheduledDeps += Incr; 3109 return FirstInBundle->unscheduledDepsInBundle(); 3110 } 3111 3112 /// Sets the number of unscheduled dependencies to the number of 3113 /// dependencies. 3114 void resetUnscheduledDeps() { 3115 UnscheduledDeps = Dependencies; 3116 } 3117 3118 /// Clears all dependency information. 3119 void clearDependencies() { 3120 Dependencies = InvalidDeps; 3121 resetUnscheduledDeps(); 3122 MemoryDependencies.clear(); 3123 ControlDependencies.clear(); 3124 } 3125 3126 int unscheduledDepsInBundle() const { 3127 assert(isSchedulingEntity() && "only meaningful on the bundle"); 3128 int Sum = 0; 3129 for (const ScheduleData *BundleMember = this; BundleMember; 3130 BundleMember = BundleMember->NextInBundle) { 3131 if (BundleMember->UnscheduledDeps == InvalidDeps) 3132 return InvalidDeps; 3133 Sum += BundleMember->UnscheduledDeps; 3134 } 3135 return Sum; 3136 } 3137 3138 void dump(raw_ostream &os) const { 3139 if (!isSchedulingEntity()) { 3140 os << "/ " << *Inst; 3141 } else if (NextInBundle) { 3142 os << '[' << *Inst; 3143 ScheduleData *SD = NextInBundle; 3144 while (SD) { 3145 os << ';' << *SD->Inst; 3146 SD = SD->NextInBundle; 3147 } 3148 os << ']'; 3149 } else { 3150 os << *Inst; 3151 } 3152 } 3153 3154 Instruction *Inst = nullptr; 3155 3156 /// Opcode of the current instruction in the schedule data. 3157 Value *OpValue = nullptr; 3158 3159 /// The TreeEntry that this instruction corresponds to. 3160 TreeEntry *TE = nullptr; 3161 3162 /// Points to the head in an instruction bundle (and always to this for 3163 /// single instructions). 3164 ScheduleData *FirstInBundle = nullptr; 3165 3166 /// Single linked list of all instructions in a bundle. Null if it is a 3167 /// single instruction. 3168 ScheduleData *NextInBundle = nullptr; 3169 3170 /// Single linked list of all memory instructions (e.g. load, store, call) 3171 /// in the block - until the end of the scheduling region. 3172 ScheduleData *NextLoadStore = nullptr; 3173 3174 /// The dependent memory instructions. 3175 /// This list is derived on demand in calculateDependencies(). 3176 SmallVector<ScheduleData *, 4> MemoryDependencies; 3177 3178 /// List of instructions which this instruction could be control dependent 3179 /// on. Allowing such nodes to be scheduled below this one could introduce 3180 /// a runtime fault which didn't exist in the original program. 3181 /// ex: this is a load or udiv following a readonly call which inf loops 3182 SmallVector<ScheduleData *, 4> ControlDependencies; 3183 3184 /// This ScheduleData is in the current scheduling region if this matches 3185 /// the current SchedulingRegionID of BlockScheduling. 3186 int SchedulingRegionID = 0; 3187 3188 /// Used for getting a "good" final ordering of instructions. 3189 int SchedulingPriority = 0; 3190 3191 /// The number of dependencies. Constitutes of the number of users of the 3192 /// instruction plus the number of dependent memory instructions (if any). 3193 /// This value is calculated on demand. 3194 /// If InvalidDeps, the number of dependencies is not calculated yet. 3195 int Dependencies = InvalidDeps; 3196 3197 /// The number of dependencies minus the number of dependencies of scheduled 3198 /// instructions. As soon as this is zero, the instruction/bundle gets ready 3199 /// for scheduling. 3200 /// Note that this is negative as long as Dependencies is not calculated. 3201 int UnscheduledDeps = InvalidDeps; 3202 3203 /// True if this instruction is scheduled (or considered as scheduled in the 3204 /// dry-run). 3205 bool IsScheduled = false; 3206 }; 3207 3208 #ifndef NDEBUG 3209 friend inline raw_ostream &operator<<(raw_ostream &os, 3210 const BoUpSLP::ScheduleData &SD) { 3211 SD.dump(os); 3212 return os; 3213 } 3214 #endif 3215 3216 friend struct GraphTraits<BoUpSLP *>; 3217 friend struct DOTGraphTraits<BoUpSLP *>; 3218 3219 /// Contains all scheduling data for a basic block. 3220 /// It does not schedules instructions, which are not memory read/write 3221 /// instructions and their operands are either constants, or arguments, or 3222 /// phis, or instructions from others blocks, or their users are phis or from 3223 /// the other blocks. The resulting vector instructions can be placed at the 3224 /// beginning of the basic block without scheduling (if operands does not need 3225 /// to be scheduled) or at the end of the block (if users are outside of the 3226 /// block). It allows to save some compile time and memory used by the 3227 /// compiler. 3228 /// ScheduleData is assigned for each instruction in between the boundaries of 3229 /// the tree entry, even for those, which are not part of the graph. It is 3230 /// required to correctly follow the dependencies between the instructions and 3231 /// their correct scheduling. The ScheduleData is not allocated for the 3232 /// instructions, which do not require scheduling, like phis, nodes with 3233 /// extractelements/insertelements only or nodes with instructions, with 3234 /// uses/operands outside of the block. 3235 struct BlockScheduling { 3236 BlockScheduling(BasicBlock *BB) 3237 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 3238 3239 void clear() { 3240 ReadyInsts.clear(); 3241 ScheduleStart = nullptr; 3242 ScheduleEnd = nullptr; 3243 FirstLoadStoreInRegion = nullptr; 3244 LastLoadStoreInRegion = nullptr; 3245 RegionHasStackSave = false; 3246 3247 // Reduce the maximum schedule region size by the size of the 3248 // previous scheduling run. 3249 ScheduleRegionSizeLimit -= ScheduleRegionSize; 3250 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 3251 ScheduleRegionSizeLimit = MinScheduleRegionSize; 3252 ScheduleRegionSize = 0; 3253 3254 // Make a new scheduling region, i.e. all existing ScheduleData is not 3255 // in the new region yet. 3256 ++SchedulingRegionID; 3257 } 3258 3259 ScheduleData *getScheduleData(Instruction *I) { 3260 if (BB != I->getParent()) 3261 // Avoid lookup if can't possibly be in map. 3262 return nullptr; 3263 ScheduleData *SD = ScheduleDataMap.lookup(I); 3264 if (SD && isInSchedulingRegion(SD)) 3265 return SD; 3266 return nullptr; 3267 } 3268 3269 ScheduleData *getScheduleData(Value *V) { 3270 if (auto *I = dyn_cast<Instruction>(V)) 3271 return getScheduleData(I); 3272 return nullptr; 3273 } 3274 3275 ScheduleData *getScheduleData(Value *V, Value *Key) { 3276 if (V == Key) 3277 return getScheduleData(V); 3278 auto I = ExtraScheduleDataMap.find(V); 3279 if (I != ExtraScheduleDataMap.end()) { 3280 ScheduleData *SD = I->second.lookup(Key); 3281 if (SD && isInSchedulingRegion(SD)) 3282 return SD; 3283 } 3284 return nullptr; 3285 } 3286 3287 bool isInSchedulingRegion(ScheduleData *SD) const { 3288 return SD->SchedulingRegionID == SchedulingRegionID; 3289 } 3290 3291 /// Marks an instruction as scheduled and puts all dependent ready 3292 /// instructions into the ready-list. 3293 template <typename ReadyListType> 3294 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 3295 SD->IsScheduled = true; 3296 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 3297 3298 for (ScheduleData *BundleMember = SD; BundleMember; 3299 BundleMember = BundleMember->NextInBundle) { 3300 if (BundleMember->Inst != BundleMember->OpValue) 3301 continue; 3302 3303 // Handle the def-use chain dependencies. 3304 3305 // Decrement the unscheduled counter and insert to ready list if ready. 3306 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 3307 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 3308 if (OpDef && OpDef->hasValidDependencies() && 3309 OpDef->incrementUnscheduledDeps(-1) == 0) { 3310 // There are no more unscheduled dependencies after 3311 // decrementing, so we can put the dependent instruction 3312 // into the ready list. 3313 ScheduleData *DepBundle = OpDef->FirstInBundle; 3314 assert(!DepBundle->IsScheduled && 3315 "already scheduled bundle gets ready"); 3316 ReadyList.insert(DepBundle); 3317 LLVM_DEBUG(dbgs() 3318 << "SLP: gets ready (def): " << *DepBundle << "\n"); 3319 } 3320 }); 3321 }; 3322 3323 // If BundleMember is a vector bundle, its operands may have been 3324 // reordered during buildTree(). We therefore need to get its operands 3325 // through the TreeEntry. 3326 if (TreeEntry *TE = BundleMember->TE) { 3327 // Need to search for the lane since the tree entry can be reordered. 3328 int Lane = std::distance(TE->Scalars.begin(), 3329 find(TE->Scalars, BundleMember->Inst)); 3330 assert(Lane >= 0 && "Lane not set"); 3331 3332 // Since vectorization tree is being built recursively this assertion 3333 // ensures that the tree entry has all operands set before reaching 3334 // this code. Couple of exceptions known at the moment are extracts 3335 // where their second (immediate) operand is not added. Since 3336 // immediates do not affect scheduler behavior this is considered 3337 // okay. 3338 auto *In = BundleMember->Inst; 3339 assert(In && 3340 (isa<ExtractValueInst, ExtractElementInst>(In) || 3341 In->getNumOperands() == TE->getNumOperands()) && 3342 "Missed TreeEntry operands?"); 3343 (void)In; // fake use to avoid build failure when assertions disabled 3344 3345 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3346 OpIdx != NumOperands; ++OpIdx) 3347 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3348 DecrUnsched(I); 3349 } else { 3350 // If BundleMember is a stand-alone instruction, no operand reordering 3351 // has taken place, so we directly access its operands. 3352 for (Use &U : BundleMember->Inst->operands()) 3353 if (auto *I = dyn_cast<Instruction>(U.get())) 3354 DecrUnsched(I); 3355 } 3356 // Handle the memory dependencies. 3357 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3358 if (MemoryDepSD->hasValidDependencies() && 3359 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3360 // There are no more unscheduled dependencies after decrementing, 3361 // so we can put the dependent instruction into the ready list. 3362 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3363 assert(!DepBundle->IsScheduled && 3364 "already scheduled bundle gets ready"); 3365 ReadyList.insert(DepBundle); 3366 LLVM_DEBUG(dbgs() 3367 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3368 } 3369 } 3370 // Handle the control dependencies. 3371 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3372 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3373 // There are no more unscheduled dependencies after decrementing, 3374 // so we can put the dependent instruction into the ready list. 3375 ScheduleData *DepBundle = DepSD->FirstInBundle; 3376 assert(!DepBundle->IsScheduled && 3377 "already scheduled bundle gets ready"); 3378 ReadyList.insert(DepBundle); 3379 LLVM_DEBUG(dbgs() 3380 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// Verify basic self consistency properties of the data structure. 3387 void verify() { 3388 if (!ScheduleStart) 3389 return; 3390 3391 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3392 ScheduleStart->comesBefore(ScheduleEnd) && 3393 "Not a valid scheduling region?"); 3394 3395 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3396 auto *SD = getScheduleData(I); 3397 if (!SD) 3398 continue; 3399 assert(isInSchedulingRegion(SD) && 3400 "primary schedule data not in window?"); 3401 assert(isInSchedulingRegion(SD->FirstInBundle) && 3402 "entire bundle in window!"); 3403 (void)SD; 3404 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3405 } 3406 3407 for (auto *SD : ReadyInsts) { 3408 assert(SD->isSchedulingEntity() && SD->isReady() && 3409 "item in ready list not ready?"); 3410 (void)SD; 3411 } 3412 } 3413 3414 void doForAllOpcodes(Value *V, 3415 function_ref<void(ScheduleData *SD)> Action) { 3416 if (ScheduleData *SD = getScheduleData(V)) 3417 Action(SD); 3418 auto I = ExtraScheduleDataMap.find(V); 3419 if (I != ExtraScheduleDataMap.end()) 3420 for (auto &P : I->second) 3421 if (isInSchedulingRegion(P.second)) 3422 Action(P.second); 3423 } 3424 3425 /// Put all instructions into the ReadyList which are ready for scheduling. 3426 template <typename ReadyListType> 3427 void initialFillReadyList(ReadyListType &ReadyList) { 3428 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3429 doForAllOpcodes(I, [&](ScheduleData *SD) { 3430 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3431 SD->isReady()) { 3432 ReadyList.insert(SD); 3433 LLVM_DEBUG(dbgs() 3434 << "SLP: initially in ready list: " << *SD << "\n"); 3435 } 3436 }); 3437 } 3438 } 3439 3440 /// Build a bundle from the ScheduleData nodes corresponding to the 3441 /// scalar instruction for each lane. 3442 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3443 3444 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3445 /// cyclic dependencies. This is only a dry-run, no instructions are 3446 /// actually moved at this stage. 3447 /// \returns the scheduling bundle. The returned Optional value is not 3448 /// std::nullopt if \p VL is allowed to be scheduled. 3449 std::optional<ScheduleData *> 3450 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3451 const InstructionsState &S); 3452 3453 /// Un-bundles a group of instructions. 3454 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3455 3456 /// Allocates schedule data chunk. 3457 ScheduleData *allocateScheduleDataChunks(); 3458 3459 /// Extends the scheduling region so that V is inside the region. 3460 /// \returns true if the region size is within the limit. 3461 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3462 3463 /// Initialize the ScheduleData structures for new instructions in the 3464 /// scheduling region. 3465 void initScheduleData(Instruction *FromI, Instruction *ToI, 3466 ScheduleData *PrevLoadStore, 3467 ScheduleData *NextLoadStore); 3468 3469 /// Updates the dependency information of a bundle and of all instructions/ 3470 /// bundles which depend on the original bundle. 3471 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3472 BoUpSLP *SLP); 3473 3474 /// Sets all instruction in the scheduling region to un-scheduled. 3475 void resetSchedule(); 3476 3477 BasicBlock *BB; 3478 3479 /// Simple memory allocation for ScheduleData. 3480 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3481 3482 /// The size of a ScheduleData array in ScheduleDataChunks. 3483 int ChunkSize; 3484 3485 /// The allocator position in the current chunk, which is the last entry 3486 /// of ScheduleDataChunks. 3487 int ChunkPos; 3488 3489 /// Attaches ScheduleData to Instruction. 3490 /// Note that the mapping survives during all vectorization iterations, i.e. 3491 /// ScheduleData structures are recycled. 3492 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3493 3494 /// Attaches ScheduleData to Instruction with the leading key. 3495 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3496 ExtraScheduleDataMap; 3497 3498 /// The ready-list for scheduling (only used for the dry-run). 3499 SetVector<ScheduleData *> ReadyInsts; 3500 3501 /// The first instruction of the scheduling region. 3502 Instruction *ScheduleStart = nullptr; 3503 3504 /// The first instruction _after_ the scheduling region. 3505 Instruction *ScheduleEnd = nullptr; 3506 3507 /// The first memory accessing instruction in the scheduling region 3508 /// (can be null). 3509 ScheduleData *FirstLoadStoreInRegion = nullptr; 3510 3511 /// The last memory accessing instruction in the scheduling region 3512 /// (can be null). 3513 ScheduleData *LastLoadStoreInRegion = nullptr; 3514 3515 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3516 /// region? Used to optimize the dependence calculation for the 3517 /// common case where there isn't. 3518 bool RegionHasStackSave = false; 3519 3520 /// The current size of the scheduling region. 3521 int ScheduleRegionSize = 0; 3522 3523 /// The maximum size allowed for the scheduling region. 3524 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3525 3526 /// The ID of the scheduling region. For a new vectorization iteration this 3527 /// is incremented which "removes" all ScheduleData from the region. 3528 /// Make sure that the initial SchedulingRegionID is greater than the 3529 /// initial SchedulingRegionID in ScheduleData (which is 0). 3530 int SchedulingRegionID = 1; 3531 }; 3532 3533 /// Attaches the BlockScheduling structures to basic blocks. 3534 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3535 3536 /// Performs the "real" scheduling. Done before vectorization is actually 3537 /// performed in a basic block. 3538 void scheduleBlock(BlockScheduling *BS); 3539 3540 /// List of users to ignore during scheduling and that don't need extracting. 3541 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3542 3543 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3544 /// sorted SmallVectors of unsigned. 3545 struct OrdersTypeDenseMapInfo { 3546 static OrdersType getEmptyKey() { 3547 OrdersType V; 3548 V.push_back(~1U); 3549 return V; 3550 } 3551 3552 static OrdersType getTombstoneKey() { 3553 OrdersType V; 3554 V.push_back(~2U); 3555 return V; 3556 } 3557 3558 static unsigned getHashValue(const OrdersType &V) { 3559 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3560 } 3561 3562 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3563 return LHS == RHS; 3564 } 3565 }; 3566 3567 // Analysis and block reference. 3568 Function *F; 3569 ScalarEvolution *SE; 3570 TargetTransformInfo *TTI; 3571 TargetLibraryInfo *TLI; 3572 LoopInfo *LI; 3573 DominatorTree *DT; 3574 AssumptionCache *AC; 3575 DemandedBits *DB; 3576 const DataLayout *DL; 3577 OptimizationRemarkEmitter *ORE; 3578 3579 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3580 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3581 3582 /// Instruction builder to construct the vectorized tree. 3583 IRBuilder<> Builder; 3584 3585 /// A map of scalar integer values to the smallest bit width with which they 3586 /// can legally be represented. The values map to (width, signed) pairs, 3587 /// where "width" indicates the minimum bit width and "signed" is True if the 3588 /// value must be signed-extended, rather than zero-extended, back to its 3589 /// original width. 3590 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs; 3591 }; 3592 3593 } // end namespace slpvectorizer 3594 3595 template <> struct GraphTraits<BoUpSLP *> { 3596 using TreeEntry = BoUpSLP::TreeEntry; 3597 3598 /// NodeRef has to be a pointer per the GraphWriter. 3599 using NodeRef = TreeEntry *; 3600 3601 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3602 3603 /// Add the VectorizableTree to the index iterator to be able to return 3604 /// TreeEntry pointers. 3605 struct ChildIteratorType 3606 : public iterator_adaptor_base< 3607 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3608 ContainerTy &VectorizableTree; 3609 3610 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3611 ContainerTy &VT) 3612 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3613 3614 NodeRef operator*() { return I->UserTE; } 3615 }; 3616 3617 static NodeRef getEntryNode(BoUpSLP &R) { 3618 return R.VectorizableTree[0].get(); 3619 } 3620 3621 static ChildIteratorType child_begin(NodeRef N) { 3622 return {N->UserTreeIndices.begin(), N->Container}; 3623 } 3624 3625 static ChildIteratorType child_end(NodeRef N) { 3626 return {N->UserTreeIndices.end(), N->Container}; 3627 } 3628 3629 /// For the node iterator we just need to turn the TreeEntry iterator into a 3630 /// TreeEntry* iterator so that it dereferences to NodeRef. 3631 class nodes_iterator { 3632 using ItTy = ContainerTy::iterator; 3633 ItTy It; 3634 3635 public: 3636 nodes_iterator(const ItTy &It2) : It(It2) {} 3637 NodeRef operator*() { return It->get(); } 3638 nodes_iterator operator++() { 3639 ++It; 3640 return *this; 3641 } 3642 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3643 }; 3644 3645 static nodes_iterator nodes_begin(BoUpSLP *R) { 3646 return nodes_iterator(R->VectorizableTree.begin()); 3647 } 3648 3649 static nodes_iterator nodes_end(BoUpSLP *R) { 3650 return nodes_iterator(R->VectorizableTree.end()); 3651 } 3652 3653 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3654 }; 3655 3656 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3657 using TreeEntry = BoUpSLP::TreeEntry; 3658 3659 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 3660 3661 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3662 std::string Str; 3663 raw_string_ostream OS(Str); 3664 OS << Entry->Idx << ".\n"; 3665 if (isSplat(Entry->Scalars)) 3666 OS << "<splat> "; 3667 for (auto *V : Entry->Scalars) { 3668 OS << *V; 3669 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3670 return EU.Scalar == V; 3671 })) 3672 OS << " <extract>"; 3673 OS << "\n"; 3674 } 3675 return Str; 3676 } 3677 3678 static std::string getNodeAttributes(const TreeEntry *Entry, 3679 const BoUpSLP *) { 3680 if (Entry->State == TreeEntry::NeedToGather) 3681 return "color=red"; 3682 if (Entry->State == TreeEntry::ScatterVectorize || 3683 Entry->State == TreeEntry::PossibleStridedVectorize) 3684 return "color=blue"; 3685 return ""; 3686 } 3687 }; 3688 3689 } // end namespace llvm 3690 3691 BoUpSLP::~BoUpSLP() { 3692 SmallVector<WeakTrackingVH> DeadInsts; 3693 for (auto *I : DeletedInstructions) { 3694 for (Use &U : I->operands()) { 3695 auto *Op = dyn_cast<Instruction>(U.get()); 3696 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3697 wouldInstructionBeTriviallyDead(Op, TLI)) 3698 DeadInsts.emplace_back(Op); 3699 } 3700 I->dropAllReferences(); 3701 } 3702 for (auto *I : DeletedInstructions) { 3703 assert(I->use_empty() && 3704 "trying to erase instruction with users."); 3705 I->eraseFromParent(); 3706 } 3707 3708 // Cleanup any dead scalar code feeding the vectorized instructions 3709 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3710 3711 #ifdef EXPENSIVE_CHECKS 3712 // If we could guarantee that this call is not extremely slow, we could 3713 // remove the ifdef limitation (see PR47712). 3714 assert(!verifyFunction(*F, &dbgs())); 3715 #endif 3716 } 3717 3718 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3719 /// contains original mask for the scalars reused in the node. Procedure 3720 /// transform this mask in accordance with the given \p Mask. 3721 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3722 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3723 "Expected non-empty mask."); 3724 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3725 Prev.swap(Reuses); 3726 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3727 if (Mask[I] != PoisonMaskElem) 3728 Reuses[Mask[I]] = Prev[I]; 3729 } 3730 3731 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3732 /// the original order of the scalars. Procedure transforms the provided order 3733 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3734 /// identity order, \p Order is cleared. 3735 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3736 assert(!Mask.empty() && "Expected non-empty mask."); 3737 SmallVector<int> MaskOrder; 3738 if (Order.empty()) { 3739 MaskOrder.resize(Mask.size()); 3740 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3741 } else { 3742 inversePermutation(Order, MaskOrder); 3743 } 3744 reorderReuses(MaskOrder, Mask); 3745 if (ShuffleVectorInst::isIdentityMask(MaskOrder, MaskOrder.size())) { 3746 Order.clear(); 3747 return; 3748 } 3749 Order.assign(Mask.size(), Mask.size()); 3750 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3751 if (MaskOrder[I] != PoisonMaskElem) 3752 Order[MaskOrder[I]] = I; 3753 fixupOrderingIndices(Order); 3754 } 3755 3756 std::optional<BoUpSLP::OrdersType> 3757 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3758 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3759 unsigned NumScalars = TE.Scalars.size(); 3760 OrdersType CurrentOrder(NumScalars, NumScalars); 3761 SmallVector<int> Positions; 3762 SmallBitVector UsedPositions(NumScalars); 3763 const TreeEntry *STE = nullptr; 3764 // Try to find all gathered scalars that are gets vectorized in other 3765 // vectorize node. Here we can have only one single tree vector node to 3766 // correctly identify order of the gathered scalars. 3767 for (unsigned I = 0; I < NumScalars; ++I) { 3768 Value *V = TE.Scalars[I]; 3769 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3770 continue; 3771 if (const auto *LocalSTE = getTreeEntry(V)) { 3772 if (!STE) 3773 STE = LocalSTE; 3774 else if (STE != LocalSTE) 3775 // Take the order only from the single vector node. 3776 return std::nullopt; 3777 unsigned Lane = 3778 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 3779 if (Lane >= NumScalars) 3780 return std::nullopt; 3781 if (CurrentOrder[Lane] != NumScalars) { 3782 if (Lane != I) 3783 continue; 3784 UsedPositions.reset(CurrentOrder[Lane]); 3785 } 3786 // The partial identity (where only some elements of the gather node are 3787 // in the identity order) is good. 3788 CurrentOrder[Lane] = I; 3789 UsedPositions.set(I); 3790 } 3791 } 3792 // Need to keep the order if we have a vector entry and at least 2 scalars or 3793 // the vectorized entry has just 2 scalars. 3794 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 3795 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 3796 for (unsigned I = 0; I < NumScalars; ++I) 3797 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3798 return false; 3799 return true; 3800 }; 3801 if (IsIdentityOrder(CurrentOrder)) 3802 return OrdersType(); 3803 auto *It = CurrentOrder.begin(); 3804 for (unsigned I = 0; I < NumScalars;) { 3805 if (UsedPositions.test(I)) { 3806 ++I; 3807 continue; 3808 } 3809 if (*It == NumScalars) { 3810 *It = I; 3811 ++I; 3812 } 3813 ++It; 3814 } 3815 return std::move(CurrentOrder); 3816 } 3817 return std::nullopt; 3818 } 3819 3820 namespace { 3821 /// Tracks the state we can represent the loads in the given sequence. 3822 enum class LoadsState { 3823 Gather, 3824 Vectorize, 3825 ScatterVectorize, 3826 PossibleStridedVectorize 3827 }; 3828 } // anonymous namespace 3829 3830 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2, 3831 const TargetLibraryInfo &TLI, 3832 bool CompareOpcodes = true) { 3833 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2)) 3834 return false; 3835 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 3836 if (!GEP1) 3837 return false; 3838 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 3839 if (!GEP2) 3840 return false; 3841 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 && 3842 ((isConstant(GEP1->getOperand(1)) && 3843 isConstant(GEP2->getOperand(1))) || 3844 !CompareOpcodes || 3845 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI) 3846 .getOpcode()); 3847 } 3848 3849 /// Checks if the given array of loads can be represented as a vectorized, 3850 /// scatter or just simple gather. 3851 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3852 const TargetTransformInfo &TTI, 3853 const DataLayout &DL, ScalarEvolution &SE, 3854 LoopInfo &LI, const TargetLibraryInfo &TLI, 3855 SmallVectorImpl<unsigned> &Order, 3856 SmallVectorImpl<Value *> &PointerOps) { 3857 // Check that a vectorized load would load the same memory as a scalar 3858 // load. For example, we don't want to vectorize loads that are smaller 3859 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3860 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3861 // from such a struct, we read/write packed bits disagreeing with the 3862 // unvectorized version. 3863 Type *ScalarTy = VL0->getType(); 3864 3865 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3866 return LoadsState::Gather; 3867 3868 // Make sure all loads in the bundle are simple - we can't vectorize 3869 // atomic or volatile loads. 3870 PointerOps.clear(); 3871 PointerOps.resize(VL.size()); 3872 auto *POIter = PointerOps.begin(); 3873 for (Value *V : VL) { 3874 auto *L = cast<LoadInst>(V); 3875 if (!L->isSimple()) 3876 return LoadsState::Gather; 3877 *POIter = L->getPointerOperand(); 3878 ++POIter; 3879 } 3880 3881 Order.clear(); 3882 // Check the order of pointer operands or that all pointers are the same. 3883 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order); 3884 if (IsSorted || all_of(PointerOps, [&](Value *P) { 3885 return arePointersCompatible(P, PointerOps.front(), TLI); 3886 })) { 3887 bool IsPossibleStrided = false; 3888 if (IsSorted) { 3889 Value *Ptr0; 3890 Value *PtrN; 3891 if (Order.empty()) { 3892 Ptr0 = PointerOps.front(); 3893 PtrN = PointerOps.back(); 3894 } else { 3895 Ptr0 = PointerOps[Order.front()]; 3896 PtrN = PointerOps[Order.back()]; 3897 } 3898 std::optional<int> Diff = 3899 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3900 // Check that the sorted loads are consecutive. 3901 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3902 return LoadsState::Vectorize; 3903 // Simple check if not a strided access - clear order. 3904 IsPossibleStrided = *Diff % (VL.size() - 1) == 0; 3905 } 3906 // TODO: need to improve analysis of the pointers, if not all of them are 3907 // GEPs or have > 2 operands, we end up with a gather node, which just 3908 // increases the cost. 3909 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent()); 3910 bool ProfitableGatherPointers = 3911 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) { 3912 return L && L->isLoopInvariant(V); 3913 })) <= VL.size() / 2 && VL.size() > 2; 3914 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) { 3915 auto *GEP = dyn_cast<GetElementPtrInst>(P); 3916 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) || 3917 (GEP && GEP->getNumOperands() == 2); 3918 })) { 3919 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3920 for (Value *V : VL) 3921 CommonAlignment = 3922 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3923 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3924 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) && 3925 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) 3926 return IsPossibleStrided ? LoadsState::PossibleStridedVectorize 3927 : LoadsState::ScatterVectorize; 3928 } 3929 } 3930 3931 return LoadsState::Gather; 3932 } 3933 3934 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3935 const DataLayout &DL, ScalarEvolution &SE, 3936 SmallVectorImpl<unsigned> &SortedIndices) { 3937 assert(llvm::all_of( 3938 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3939 "Expected list of pointer operands."); 3940 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3941 // Ptr into, sort and return the sorted indices with values next to one 3942 // another. 3943 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3944 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3945 3946 unsigned Cnt = 1; 3947 for (Value *Ptr : VL.drop_front()) { 3948 bool Found = any_of(Bases, [&](auto &Base) { 3949 std::optional<int> Diff = 3950 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3951 /*StrictCheck=*/true); 3952 if (!Diff) 3953 return false; 3954 3955 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3956 return true; 3957 }); 3958 3959 if (!Found) { 3960 // If we haven't found enough to usefully cluster, return early. 3961 if (Bases.size() > VL.size() / 2 - 1) 3962 return false; 3963 3964 // Not found already - add a new Base 3965 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3966 } 3967 } 3968 3969 // For each of the bases sort the pointers by Offset and check if any of the 3970 // base become consecutively allocated. 3971 bool AnyConsecutive = false; 3972 for (auto &Base : Bases) { 3973 auto &Vec = Base.second; 3974 if (Vec.size() > 1) { 3975 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 3976 const std::tuple<Value *, int, unsigned> &Y) { 3977 return std::get<1>(X) < std::get<1>(Y); 3978 }); 3979 int InitialOffset = std::get<1>(Vec[0]); 3980 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](const auto &P) { 3981 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 3982 }); 3983 } 3984 } 3985 3986 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 3987 SortedIndices.clear(); 3988 if (!AnyConsecutive) 3989 return false; 3990 3991 for (auto &Base : Bases) { 3992 for (auto &T : Base.second) 3993 SortedIndices.push_back(std::get<2>(T)); 3994 } 3995 3996 assert(SortedIndices.size() == VL.size() && 3997 "Expected SortedIndices to be the size of VL"); 3998 return true; 3999 } 4000 4001 std::optional<BoUpSLP::OrdersType> 4002 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 4003 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 4004 Type *ScalarTy = TE.Scalars[0]->getType(); 4005 4006 SmallVector<Value *> Ptrs; 4007 Ptrs.reserve(TE.Scalars.size()); 4008 for (Value *V : TE.Scalars) { 4009 auto *L = dyn_cast<LoadInst>(V); 4010 if (!L || !L->isSimple()) 4011 return std::nullopt; 4012 Ptrs.push_back(L->getPointerOperand()); 4013 } 4014 4015 BoUpSLP::OrdersType Order; 4016 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 4017 return std::move(Order); 4018 return std::nullopt; 4019 } 4020 4021 /// Check if two insertelement instructions are from the same buildvector. 4022 static bool areTwoInsertFromSameBuildVector( 4023 InsertElementInst *VU, InsertElementInst *V, 4024 function_ref<Value *(InsertElementInst *)> GetBaseOperand) { 4025 // Instructions must be from the same basic blocks. 4026 if (VU->getParent() != V->getParent()) 4027 return false; 4028 // Checks if 2 insertelements are from the same buildvector. 4029 if (VU->getType() != V->getType()) 4030 return false; 4031 // Multiple used inserts are separate nodes. 4032 if (!VU->hasOneUse() && !V->hasOneUse()) 4033 return false; 4034 auto *IE1 = VU; 4035 auto *IE2 = V; 4036 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4037 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4038 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4039 return false; 4040 // Go through the vector operand of insertelement instructions trying to find 4041 // either VU as the original vector for IE2 or V as the original vector for 4042 // IE1. 4043 SmallBitVector ReusedIdx( 4044 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue()); 4045 bool IsReusedIdx = false; 4046 do { 4047 if (IE2 == VU && !IE1) 4048 return VU->hasOneUse(); 4049 if (IE1 == V && !IE2) 4050 return V->hasOneUse(); 4051 if (IE1 && IE1 != V) { 4052 unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2); 4053 IsReusedIdx |= ReusedIdx.test(Idx1); 4054 ReusedIdx.set(Idx1); 4055 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx) 4056 IE1 = nullptr; 4057 else 4058 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1)); 4059 } 4060 if (IE2 && IE2 != VU) { 4061 unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1); 4062 IsReusedIdx |= ReusedIdx.test(Idx2); 4063 ReusedIdx.set(Idx2); 4064 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx) 4065 IE2 = nullptr; 4066 else 4067 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2)); 4068 } 4069 } while (!IsReusedIdx && (IE1 || IE2)); 4070 return false; 4071 } 4072 4073 std::optional<BoUpSLP::OrdersType> 4074 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { 4075 // No need to reorder if need to shuffle reuses, still need to shuffle the 4076 // node. 4077 if (!TE.ReuseShuffleIndices.empty()) { 4078 // Check if reuse shuffle indices can be improved by reordering. 4079 // For this, check that reuse mask is "clustered", i.e. each scalar values 4080 // is used once in each submask of size <number_of_scalars>. 4081 // Example: 4 scalar values. 4082 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered. 4083 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because 4084 // element 3 is used twice in the second submask. 4085 unsigned Sz = TE.Scalars.size(); 4086 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4087 Sz)) 4088 return std::nullopt; 4089 unsigned VF = TE.getVectorFactor(); 4090 // Try build correct order for extractelement instructions. 4091 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(), 4092 TE.ReuseShuffleIndices.end()); 4093 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() && 4094 all_of(TE.Scalars, [Sz](Value *V) { 4095 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V)); 4096 return Idx && *Idx < Sz; 4097 })) { 4098 SmallVector<int> ReorderMask(Sz, PoisonMaskElem); 4099 if (TE.ReorderIndices.empty()) 4100 std::iota(ReorderMask.begin(), ReorderMask.end(), 0); 4101 else 4102 inversePermutation(TE.ReorderIndices, ReorderMask); 4103 for (unsigned I = 0; I < VF; ++I) { 4104 int &Idx = ReusedMask[I]; 4105 if (Idx == PoisonMaskElem) 4106 continue; 4107 Value *V = TE.Scalars[ReorderMask[Idx]]; 4108 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V)); 4109 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI)); 4110 } 4111 } 4112 // Build the order of the VF size, need to reorder reuses shuffles, they are 4113 // always of VF size. 4114 OrdersType ResOrder(VF); 4115 std::iota(ResOrder.begin(), ResOrder.end(), 0); 4116 auto *It = ResOrder.begin(); 4117 for (unsigned K = 0; K < VF; K += Sz) { 4118 OrdersType CurrentOrder(TE.ReorderIndices); 4119 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)}; 4120 if (SubMask.front() == PoisonMaskElem) 4121 std::iota(SubMask.begin(), SubMask.end(), 0); 4122 reorderOrder(CurrentOrder, SubMask); 4123 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; }); 4124 std::advance(It, Sz); 4125 } 4126 if (all_of(enumerate(ResOrder), 4127 [](const auto &Data) { return Data.index() == Data.value(); })) 4128 return std::nullopt; // No need to reorder. 4129 return std::move(ResOrder); 4130 } 4131 if ((TE.State == TreeEntry::Vectorize || 4132 TE.State == TreeEntry::PossibleStridedVectorize) && 4133 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 4134 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 4135 !TE.isAltShuffle()) 4136 return TE.ReorderIndices; 4137 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) { 4138 auto PHICompare = [&](unsigned I1, unsigned I2) { 4139 Value *V1 = TE.Scalars[I1]; 4140 Value *V2 = TE.Scalars[I2]; 4141 if (V1 == V2) 4142 return false; 4143 if (!V1->hasOneUse() || !V2->hasOneUse()) 4144 return false; 4145 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin()); 4146 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin()); 4147 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1)) 4148 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) { 4149 if (!areTwoInsertFromSameBuildVector( 4150 IE1, IE2, 4151 [](InsertElementInst *II) { return II->getOperand(0); })) 4152 return false; 4153 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4154 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4155 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4156 return false; 4157 return *Idx1 < *Idx2; 4158 } 4159 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1)) 4160 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) { 4161 if (EE1->getOperand(0) != EE2->getOperand(0)) 4162 return false; 4163 std::optional<unsigned> Idx1 = getExtractIndex(EE1); 4164 std::optional<unsigned> Idx2 = getExtractIndex(EE2); 4165 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4166 return false; 4167 return *Idx1 < *Idx2; 4168 } 4169 return false; 4170 }; 4171 auto IsIdentityOrder = [](const OrdersType &Order) { 4172 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4173 if (Idx != Order[Idx]) 4174 return false; 4175 return true; 4176 }; 4177 if (!TE.ReorderIndices.empty()) 4178 return TE.ReorderIndices; 4179 DenseMap<unsigned, unsigned> PhiToId; 4180 SmallVector<unsigned> Phis(TE.Scalars.size()); 4181 std::iota(Phis.begin(), Phis.end(), 0); 4182 OrdersType ResOrder(TE.Scalars.size()); 4183 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) 4184 PhiToId[Id] = Id; 4185 stable_sort(Phis, PHICompare); 4186 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id) 4187 ResOrder[Id] = PhiToId[Phis[Id]]; 4188 if (IsIdentityOrder(ResOrder)) 4189 return std::nullopt; // No need to reorder. 4190 return std::move(ResOrder); 4191 } 4192 if (TE.State == TreeEntry::NeedToGather) { 4193 // TODO: add analysis of other gather nodes with extractelement 4194 // instructions and other values/instructions, not only undefs. 4195 if (((TE.getOpcode() == Instruction::ExtractElement && 4196 !TE.isAltShuffle()) || 4197 (all_of(TE.Scalars, 4198 [](Value *V) { 4199 return isa<UndefValue, ExtractElementInst>(V); 4200 }) && 4201 any_of(TE.Scalars, 4202 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 4203 all_of(TE.Scalars, 4204 [](Value *V) { 4205 auto *EE = dyn_cast<ExtractElementInst>(V); 4206 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 4207 }) && 4208 allSameType(TE.Scalars)) { 4209 // Check that gather of extractelements can be represented as 4210 // just a shuffle of a single vector. 4211 OrdersType CurrentOrder; 4212 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder, 4213 /*ResizeAllowed=*/true); 4214 if (Reuse || !CurrentOrder.empty()) { 4215 if (!CurrentOrder.empty()) 4216 fixupOrderingIndices(CurrentOrder); 4217 return std::move(CurrentOrder); 4218 } 4219 } 4220 // If the gather node is <undef, v, .., poison> and 4221 // insertelement poison, v, 0 [+ permute] 4222 // is cheaper than 4223 // insertelement poison, v, n - try to reorder. 4224 // If rotating the whole graph, exclude the permute cost, the whole graph 4225 // might be transformed. 4226 int Sz = TE.Scalars.size(); 4227 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) && 4228 count_if(TE.Scalars, UndefValue::classof) == Sz - 1) { 4229 const auto *It = 4230 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); }); 4231 if (It == TE.Scalars.begin()) 4232 return OrdersType(); 4233 auto *Ty = FixedVectorType::get(TE.Scalars.front()->getType(), Sz); 4234 if (It != TE.Scalars.end()) { 4235 OrdersType Order(Sz, Sz); 4236 unsigned Idx = std::distance(TE.Scalars.begin(), It); 4237 Order[Idx] = 0; 4238 fixupOrderingIndices(Order); 4239 SmallVector<int> Mask; 4240 inversePermutation(Order, Mask); 4241 InstructionCost PermuteCost = 4242 TopToBottom 4243 ? 0 4244 : TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, Mask); 4245 InstructionCost InsertFirstCost = TTI->getVectorInstrCost( 4246 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0, 4247 PoisonValue::get(Ty), *It); 4248 InstructionCost InsertIdxCost = TTI->getVectorInstrCost( 4249 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx, 4250 PoisonValue::get(Ty), *It); 4251 if (InsertFirstCost + PermuteCost < InsertIdxCost) 4252 return std::move(Order); 4253 } 4254 } 4255 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 4256 return CurrentOrder; 4257 if (TE.Scalars.size() >= 4) 4258 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 4259 return Order; 4260 } 4261 return std::nullopt; 4262 } 4263 4264 /// Checks if the given mask is a "clustered" mask with the same clusters of 4265 /// size \p Sz, which are not identity submasks. 4266 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask, 4267 unsigned Sz) { 4268 ArrayRef<int> FirstCluster = Mask.slice(0, Sz); 4269 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz)) 4270 return false; 4271 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) { 4272 ArrayRef<int> Cluster = Mask.slice(I, Sz); 4273 if (Cluster != FirstCluster) 4274 return false; 4275 } 4276 return true; 4277 } 4278 4279 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const { 4280 // Reorder reuses mask. 4281 reorderReuses(TE.ReuseShuffleIndices, Mask); 4282 const unsigned Sz = TE.Scalars.size(); 4283 // For vectorized and non-clustered reused no need to do anything else. 4284 if (TE.State != TreeEntry::NeedToGather || 4285 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4286 Sz) || 4287 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) 4288 return; 4289 SmallVector<int> NewMask; 4290 inversePermutation(TE.ReorderIndices, NewMask); 4291 addMask(NewMask, TE.ReuseShuffleIndices); 4292 // Clear reorder since it is going to be applied to the new mask. 4293 TE.ReorderIndices.clear(); 4294 // Try to improve gathered nodes with clustered reuses, if possible. 4295 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz); 4296 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end()); 4297 inversePermutation(NewOrder, NewMask); 4298 reorderScalars(TE.Scalars, NewMask); 4299 // Fill the reuses mask with the identity submasks. 4300 for (auto *It = TE.ReuseShuffleIndices.begin(), 4301 *End = TE.ReuseShuffleIndices.end(); 4302 It != End; std::advance(It, Sz)) 4303 std::iota(It, std::next(It, Sz), 0); 4304 } 4305 4306 void BoUpSLP::reorderTopToBottom() { 4307 // Maps VF to the graph nodes. 4308 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 4309 // ExtractElement gather nodes which can be vectorized and need to handle 4310 // their ordering. 4311 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4312 4313 // Phi nodes can have preferred ordering based on their result users 4314 DenseMap<const TreeEntry *, OrdersType> PhisToOrders; 4315 4316 // AltShuffles can also have a preferred ordering that leads to fewer 4317 // instructions, e.g., the addsub instruction in x86. 4318 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders; 4319 4320 // Maps a TreeEntry to the reorder indices of external users. 4321 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 4322 ExternalUserReorderMap; 4323 // FIXME: Workaround for syntax error reported by MSVC buildbots. 4324 TargetTransformInfo &TTIRef = *TTI; 4325 // Find all reorderable nodes with the given VF. 4326 // Currently the are vectorized stores,loads,extracts + some gathering of 4327 // extracts. 4328 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries, 4329 &GathersToOrders, &ExternalUserReorderMap, 4330 &AltShufflesToOrders, &PhisToOrders]( 4331 const std::unique_ptr<TreeEntry> &TE) { 4332 // Look for external users that will probably be vectorized. 4333 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 4334 findExternalStoreUsersReorderIndices(TE.get()); 4335 if (!ExternalUserReorderIndices.empty()) { 4336 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4337 ExternalUserReorderMap.try_emplace(TE.get(), 4338 std::move(ExternalUserReorderIndices)); 4339 } 4340 4341 // Patterns like [fadd,fsub] can be combined into a single instruction in 4342 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need 4343 // to take into account their order when looking for the most used order. 4344 if (TE->isAltShuffle()) { 4345 VectorType *VecTy = 4346 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size()); 4347 unsigned Opcode0 = TE->getOpcode(); 4348 unsigned Opcode1 = TE->getAltOpcode(); 4349 // The opcode mask selects between the two opcodes. 4350 SmallBitVector OpcodeMask(TE->Scalars.size(), false); 4351 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) 4352 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1) 4353 OpcodeMask.set(Lane); 4354 // If this pattern is supported by the target then we consider the order. 4355 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 4356 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4357 AltShufflesToOrders.try_emplace(TE.get(), OrdersType()); 4358 } 4359 // TODO: Check the reverse order too. 4360 } 4361 4362 if (std::optional<OrdersType> CurrentOrder = 4363 getReorderingData(*TE, /*TopToBottom=*/true)) { 4364 // Do not include ordering for nodes used in the alt opcode vectorization, 4365 // better to reorder them during bottom-to-top stage. If follow the order 4366 // here, it causes reordering of the whole graph though actually it is 4367 // profitable just to reorder the subgraph that starts from the alternate 4368 // opcode vectorization node. Such nodes already end-up with the shuffle 4369 // instruction and it is just enough to change this shuffle rather than 4370 // rotate the scalars for the whole graph. 4371 unsigned Cnt = 0; 4372 const TreeEntry *UserTE = TE.get(); 4373 while (UserTE && Cnt < RecursionMaxDepth) { 4374 if (UserTE->UserTreeIndices.size() != 1) 4375 break; 4376 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 4377 return EI.UserTE->State == TreeEntry::Vectorize && 4378 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 4379 })) 4380 return; 4381 UserTE = UserTE->UserTreeIndices.back().UserTE; 4382 ++Cnt; 4383 } 4384 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4385 if (!(TE->State == TreeEntry::Vectorize || 4386 TE->State == TreeEntry::PossibleStridedVectorize) || 4387 !TE->ReuseShuffleIndices.empty()) 4388 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4389 if (TE->State == TreeEntry::Vectorize && 4390 TE->getOpcode() == Instruction::PHI) 4391 PhisToOrders.try_emplace(TE.get(), *CurrentOrder); 4392 } 4393 }); 4394 4395 // Reorder the graph nodes according to their vectorization factor. 4396 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1; 4397 VF /= 2) { 4398 auto It = VFToOrderedEntries.find(VF); 4399 if (It == VFToOrderedEntries.end()) 4400 continue; 4401 // Try to find the most profitable order. We just are looking for the most 4402 // used order and reorder scalar elements in the nodes according to this 4403 // mostly used order. 4404 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 4405 // All operands are reordered and used only in this node - propagate the 4406 // most used order to the user node. 4407 MapVector<OrdersType, unsigned, 4408 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4409 OrdersUses; 4410 // Last chance orders - scatter vectorize. Try to use their orders if no 4411 // other orders or the order is counted already. 4412 SmallVector<OrdersType> StridedVectorizeOrders; 4413 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4414 for (const TreeEntry *OpTE : OrderedEntries) { 4415 // No need to reorder this nodes, still need to extend and to use shuffle, 4416 // just need to merge reordering shuffle and the reuse shuffle. 4417 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4418 continue; 4419 // Count number of orders uses. 4420 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders, 4421 &PhisToOrders]() -> const OrdersType & { 4422 if (OpTE->State == TreeEntry::NeedToGather || 4423 !OpTE->ReuseShuffleIndices.empty()) { 4424 auto It = GathersToOrders.find(OpTE); 4425 if (It != GathersToOrders.end()) 4426 return It->second; 4427 } 4428 if (OpTE->isAltShuffle()) { 4429 auto It = AltShufflesToOrders.find(OpTE); 4430 if (It != AltShufflesToOrders.end()) 4431 return It->second; 4432 } 4433 if (OpTE->State == TreeEntry::Vectorize && 4434 OpTE->getOpcode() == Instruction::PHI) { 4435 auto It = PhisToOrders.find(OpTE); 4436 if (It != PhisToOrders.end()) 4437 return It->second; 4438 } 4439 return OpTE->ReorderIndices; 4440 }(); 4441 // First consider the order of the external scalar users. 4442 auto It = ExternalUserReorderMap.find(OpTE); 4443 if (It != ExternalUserReorderMap.end()) { 4444 const auto &ExternalUserReorderIndices = It->second; 4445 // If the OpTE vector factor != number of scalars - use natural order, 4446 // it is an attempt to reorder node with reused scalars but with 4447 // external uses. 4448 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) { 4449 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 4450 ExternalUserReorderIndices.size(); 4451 } else { 4452 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 4453 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 4454 } 4455 // No other useful reorder data in this entry. 4456 if (Order.empty()) 4457 continue; 4458 } 4459 // Postpone scatter orders. 4460 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4461 StridedVectorizeOrders.push_back(Order); 4462 continue; 4463 } 4464 // Stores actually store the mask, not the order, need to invert. 4465 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4466 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4467 SmallVector<int> Mask; 4468 inversePermutation(Order, Mask); 4469 unsigned E = Order.size(); 4470 OrdersType CurrentOrder(E, E); 4471 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4472 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4473 }); 4474 fixupOrderingIndices(CurrentOrder); 4475 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 4476 } else { 4477 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4478 } 4479 } 4480 // Set order of the user node. 4481 if (OrdersUses.empty()) { 4482 if (StridedVectorizeOrders.empty()) 4483 continue; 4484 // Add (potentially!) strided vectorize orders. 4485 for (OrdersType &Order : StridedVectorizeOrders) 4486 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4487 } else { 4488 // Account (potentially!) strided vectorize orders only if it was used 4489 // already. 4490 for (OrdersType &Order : StridedVectorizeOrders) { 4491 auto *It = OrdersUses.find(Order); 4492 if (It != OrdersUses.end()) 4493 ++It->second; 4494 } 4495 } 4496 // Choose the most used order. 4497 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4498 unsigned Cnt = OrdersUses.front().second; 4499 for (const auto &Pair : drop_begin(OrdersUses)) { 4500 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4501 BestOrder = Pair.first; 4502 Cnt = Pair.second; 4503 } 4504 } 4505 // Set order of the user node. 4506 if (BestOrder.empty()) 4507 continue; 4508 SmallVector<int> Mask; 4509 inversePermutation(BestOrder, Mask); 4510 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4511 unsigned E = BestOrder.size(); 4512 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4513 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4514 }); 4515 // Do an actual reordering, if profitable. 4516 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4517 // Just do the reordering for the nodes with the given VF. 4518 if (TE->Scalars.size() != VF) { 4519 if (TE->ReuseShuffleIndices.size() == VF) { 4520 // Need to reorder the reuses masks of the operands with smaller VF to 4521 // be able to find the match between the graph nodes and scalar 4522 // operands of the given node during vectorization/cost estimation. 4523 assert(all_of(TE->UserTreeIndices, 4524 [VF, &TE](const EdgeInfo &EI) { 4525 return EI.UserTE->Scalars.size() == VF || 4526 EI.UserTE->Scalars.size() == 4527 TE->Scalars.size(); 4528 }) && 4529 "All users must be of VF size."); 4530 // Update ordering of the operands with the smaller VF than the given 4531 // one. 4532 reorderNodeWithReuses(*TE, Mask); 4533 } 4534 continue; 4535 } 4536 if ((TE->State == TreeEntry::Vectorize || 4537 TE->State == TreeEntry::PossibleStridedVectorize) && 4538 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 4539 InsertElementInst>(TE->getMainOp()) && 4540 !TE->isAltShuffle()) { 4541 // Build correct orders for extract{element,value}, loads and 4542 // stores. 4543 reorderOrder(TE->ReorderIndices, Mask); 4544 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 4545 TE->reorderOperands(Mask); 4546 } else { 4547 // Reorder the node and its operands. 4548 TE->reorderOperands(Mask); 4549 assert(TE->ReorderIndices.empty() && 4550 "Expected empty reorder sequence."); 4551 reorderScalars(TE->Scalars, Mask); 4552 } 4553 if (!TE->ReuseShuffleIndices.empty()) { 4554 // Apply reversed order to keep the original ordering of the reused 4555 // elements to avoid extra reorder indices shuffling. 4556 OrdersType CurrentOrder; 4557 reorderOrder(CurrentOrder, MaskOrder); 4558 SmallVector<int> NewReuses; 4559 inversePermutation(CurrentOrder, NewReuses); 4560 addMask(NewReuses, TE->ReuseShuffleIndices); 4561 TE->ReuseShuffleIndices.swap(NewReuses); 4562 } 4563 } 4564 } 4565 } 4566 4567 bool BoUpSLP::canReorderOperands( 4568 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 4569 ArrayRef<TreeEntry *> ReorderableGathers, 4570 SmallVectorImpl<TreeEntry *> &GatherOps) { 4571 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 4572 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 4573 return OpData.first == I && 4574 OpData.second->State == TreeEntry::Vectorize; 4575 })) 4576 continue; 4577 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 4578 // FIXME: Do not reorder (possible!) strided vectorized nodes, they 4579 // require reordering of the operands, which is not implemented yet. 4580 if (TE->State == TreeEntry::PossibleStridedVectorize) 4581 return false; 4582 // Do not reorder if operand node is used by many user nodes. 4583 if (any_of(TE->UserTreeIndices, 4584 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 4585 return false; 4586 // Add the node to the list of the ordered nodes with the identity 4587 // order. 4588 Edges.emplace_back(I, TE); 4589 // Add ScatterVectorize nodes to the list of operands, where just 4590 // reordering of the scalars is required. Similar to the gathers, so 4591 // simply add to the list of gathered ops. 4592 // If there are reused scalars, process this node as a regular vectorize 4593 // node, just reorder reuses mask. 4594 if (TE->State != TreeEntry::Vectorize && 4595 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) 4596 GatherOps.push_back(TE); 4597 continue; 4598 } 4599 TreeEntry *Gather = nullptr; 4600 if (count_if(ReorderableGathers, 4601 [&Gather, UserTE, I](TreeEntry *TE) { 4602 assert(TE->State != TreeEntry::Vectorize && 4603 "Only non-vectorized nodes are expected."); 4604 if (any_of(TE->UserTreeIndices, 4605 [UserTE, I](const EdgeInfo &EI) { 4606 return EI.UserTE == UserTE && EI.EdgeIdx == I; 4607 })) { 4608 assert(TE->isSame(UserTE->getOperand(I)) && 4609 "Operand entry does not match operands."); 4610 Gather = TE; 4611 return true; 4612 } 4613 return false; 4614 }) > 1 && 4615 !allConstant(UserTE->getOperand(I))) 4616 return false; 4617 if (Gather) 4618 GatherOps.push_back(Gather); 4619 } 4620 return true; 4621 } 4622 4623 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 4624 SetVector<TreeEntry *> OrderedEntries; 4625 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4626 // Find all reorderable leaf nodes with the given VF. 4627 // Currently the are vectorized loads,extracts without alternate operands + 4628 // some gathering of extracts. 4629 SmallVector<TreeEntry *> NonVectorized; 4630 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4631 if (TE->State != TreeEntry::Vectorize && 4632 TE->State != TreeEntry::PossibleStridedVectorize) 4633 NonVectorized.push_back(TE.get()); 4634 if (std::optional<OrdersType> CurrentOrder = 4635 getReorderingData(*TE, /*TopToBottom=*/false)) { 4636 OrderedEntries.insert(TE.get()); 4637 if (!(TE->State == TreeEntry::Vectorize || 4638 TE->State == TreeEntry::PossibleStridedVectorize) || 4639 !TE->ReuseShuffleIndices.empty()) 4640 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4641 } 4642 } 4643 4644 // 1. Propagate order to the graph nodes, which use only reordered nodes. 4645 // I.e., if the node has operands, that are reordered, try to make at least 4646 // one operand order in the natural order and reorder others + reorder the 4647 // user node itself. 4648 SmallPtrSet<const TreeEntry *, 4> Visited; 4649 while (!OrderedEntries.empty()) { 4650 // 1. Filter out only reordered nodes. 4651 // 2. If the entry has multiple uses - skip it and jump to the next node. 4652 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 4653 SmallVector<TreeEntry *> Filtered; 4654 for (TreeEntry *TE : OrderedEntries) { 4655 if (!(TE->State == TreeEntry::Vectorize || 4656 TE->State == TreeEntry::PossibleStridedVectorize || 4657 (TE->State == TreeEntry::NeedToGather && 4658 GathersToOrders.count(TE))) || 4659 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4660 !all_of(drop_begin(TE->UserTreeIndices), 4661 [TE](const EdgeInfo &EI) { 4662 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 4663 }) || 4664 !Visited.insert(TE).second) { 4665 Filtered.push_back(TE); 4666 continue; 4667 } 4668 // Build a map between user nodes and their operands order to speedup 4669 // search. The graph currently does not provide this dependency directly. 4670 for (EdgeInfo &EI : TE->UserTreeIndices) { 4671 TreeEntry *UserTE = EI.UserTE; 4672 auto It = Users.find(UserTE); 4673 if (It == Users.end()) 4674 It = Users.insert({UserTE, {}}).first; 4675 It->second.emplace_back(EI.EdgeIdx, TE); 4676 } 4677 } 4678 // Erase filtered entries. 4679 for (TreeEntry *TE : Filtered) 4680 OrderedEntries.remove(TE); 4681 SmallVector< 4682 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>> 4683 UsersVec(Users.begin(), Users.end()); 4684 sort(UsersVec, [](const auto &Data1, const auto &Data2) { 4685 return Data1.first->Idx > Data2.first->Idx; 4686 }); 4687 for (auto &Data : UsersVec) { 4688 // Check that operands are used only in the User node. 4689 SmallVector<TreeEntry *> GatherOps; 4690 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 4691 GatherOps)) { 4692 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4693 OrderedEntries.remove(Op.second); 4694 continue; 4695 } 4696 // All operands are reordered and used only in this node - propagate the 4697 // most used order to the user node. 4698 MapVector<OrdersType, unsigned, 4699 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4700 OrdersUses; 4701 // Last chance orders - scatter vectorize. Try to use their orders if no 4702 // other orders or the order is counted already. 4703 SmallVector<std::pair<OrdersType, unsigned>> StridedVectorizeOrders; 4704 // Do the analysis for each tree entry only once, otherwise the order of 4705 // the same node my be considered several times, though might be not 4706 // profitable. 4707 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4708 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 4709 for (const auto &Op : Data.second) { 4710 TreeEntry *OpTE = Op.second; 4711 if (!VisitedOps.insert(OpTE).second) 4712 continue; 4713 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4714 continue; 4715 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 4716 if (OpTE->State == TreeEntry::NeedToGather || 4717 !OpTE->ReuseShuffleIndices.empty()) 4718 return GathersToOrders.find(OpTE)->second; 4719 return OpTE->ReorderIndices; 4720 }(); 4721 unsigned NumOps = count_if( 4722 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 4723 return P.second == OpTE; 4724 }); 4725 // Postpone scatter orders. 4726 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4727 StridedVectorizeOrders.emplace_back(Order, NumOps); 4728 continue; 4729 } 4730 // Stores actually store the mask, not the order, need to invert. 4731 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4732 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4733 SmallVector<int> Mask; 4734 inversePermutation(Order, Mask); 4735 unsigned E = Order.size(); 4736 OrdersType CurrentOrder(E, E); 4737 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4738 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4739 }); 4740 fixupOrderingIndices(CurrentOrder); 4741 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 4742 NumOps; 4743 } else { 4744 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 4745 } 4746 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 4747 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 4748 const TreeEntry *TE) { 4749 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4750 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 4751 (IgnoreReorder && TE->Idx == 0)) 4752 return true; 4753 if (TE->State == TreeEntry::NeedToGather) { 4754 auto It = GathersToOrders.find(TE); 4755 if (It != GathersToOrders.end()) 4756 return !It->second.empty(); 4757 return true; 4758 } 4759 return false; 4760 }; 4761 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 4762 TreeEntry *UserTE = EI.UserTE; 4763 if (!VisitedUsers.insert(UserTE).second) 4764 continue; 4765 // May reorder user node if it requires reordering, has reused 4766 // scalars, is an alternate op vectorize node or its op nodes require 4767 // reordering. 4768 if (AllowsReordering(UserTE)) 4769 continue; 4770 // Check if users allow reordering. 4771 // Currently look up just 1 level of operands to avoid increase of 4772 // the compile time. 4773 // Profitable to reorder if definitely more operands allow 4774 // reordering rather than those with natural order. 4775 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 4776 if (static_cast<unsigned>(count_if( 4777 Ops, [UserTE, &AllowsReordering]( 4778 const std::pair<unsigned, TreeEntry *> &Op) { 4779 return AllowsReordering(Op.second) && 4780 all_of(Op.second->UserTreeIndices, 4781 [UserTE](const EdgeInfo &EI) { 4782 return EI.UserTE == UserTE; 4783 }); 4784 })) <= Ops.size() / 2) 4785 ++Res.first->second; 4786 } 4787 } 4788 // If no orders - skip current nodes and jump to the next one, if any. 4789 if (OrdersUses.empty()) { 4790 if (StridedVectorizeOrders.empty() || 4791 (Data.first->ReorderIndices.empty() && 4792 Data.first->ReuseShuffleIndices.empty() && 4793 !(IgnoreReorder && 4794 Data.first == VectorizableTree.front().get()))) { 4795 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4796 OrderedEntries.remove(Op.second); 4797 continue; 4798 } 4799 // Add (potentially!) strided vectorize orders. 4800 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) 4801 OrdersUses.insert(std::make_pair(Pair.first, 0)).first->second += 4802 Pair.second; 4803 } else { 4804 // Account (potentially!) strided vectorize orders only if it was used 4805 // already. 4806 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) { 4807 auto *It = OrdersUses.find(Pair.first); 4808 if (It != OrdersUses.end()) 4809 It->second += Pair.second; 4810 } 4811 } 4812 // Choose the best order. 4813 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4814 unsigned Cnt = OrdersUses.front().second; 4815 for (const auto &Pair : drop_begin(OrdersUses)) { 4816 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4817 BestOrder = Pair.first; 4818 Cnt = Pair.second; 4819 } 4820 } 4821 // Set order of the user node (reordering of operands and user nodes). 4822 if (BestOrder.empty()) { 4823 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4824 OrderedEntries.remove(Op.second); 4825 continue; 4826 } 4827 // Erase operands from OrderedEntries list and adjust their orders. 4828 VisitedOps.clear(); 4829 SmallVector<int> Mask; 4830 inversePermutation(BestOrder, Mask); 4831 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4832 unsigned E = BestOrder.size(); 4833 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4834 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4835 }); 4836 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4837 TreeEntry *TE = Op.second; 4838 OrderedEntries.remove(TE); 4839 if (!VisitedOps.insert(TE).second) 4840 continue; 4841 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4842 reorderNodeWithReuses(*TE, Mask); 4843 continue; 4844 } 4845 // Gathers are processed separately. 4846 if (TE->State != TreeEntry::Vectorize && 4847 TE->State != TreeEntry::PossibleStridedVectorize && 4848 (TE->State != TreeEntry::ScatterVectorize || 4849 TE->ReorderIndices.empty())) 4850 continue; 4851 assert((BestOrder.size() == TE->ReorderIndices.size() || 4852 TE->ReorderIndices.empty()) && 4853 "Non-matching sizes of user/operand entries."); 4854 reorderOrder(TE->ReorderIndices, Mask); 4855 if (IgnoreReorder && TE == VectorizableTree.front().get()) 4856 IgnoreReorder = false; 4857 } 4858 // For gathers just need to reorder its scalars. 4859 for (TreeEntry *Gather : GatherOps) { 4860 assert(Gather->ReorderIndices.empty() && 4861 "Unexpected reordering of gathers."); 4862 if (!Gather->ReuseShuffleIndices.empty()) { 4863 // Just reorder reuses indices. 4864 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4865 continue; 4866 } 4867 reorderScalars(Gather->Scalars, Mask); 4868 OrderedEntries.remove(Gather); 4869 } 4870 // Reorder operands of the user node and set the ordering for the user 4871 // node itself. 4872 if (Data.first->State != TreeEntry::Vectorize || 4873 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4874 Data.first->getMainOp()) || 4875 Data.first->isAltShuffle()) 4876 Data.first->reorderOperands(Mask); 4877 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4878 Data.first->isAltShuffle() || 4879 Data.first->State == TreeEntry::PossibleStridedVectorize) { 4880 reorderScalars(Data.first->Scalars, Mask); 4881 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4882 if (Data.first->ReuseShuffleIndices.empty() && 4883 !Data.first->ReorderIndices.empty() && 4884 !Data.first->isAltShuffle()) { 4885 // Insert user node to the list to try to sink reordering deeper in 4886 // the graph. 4887 OrderedEntries.insert(Data.first); 4888 } 4889 } else { 4890 reorderOrder(Data.first->ReorderIndices, Mask); 4891 } 4892 } 4893 } 4894 // If the reordering is unnecessary, just remove the reorder. 4895 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4896 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4897 VectorizableTree.front()->ReorderIndices.clear(); 4898 } 4899 4900 void BoUpSLP::buildExternalUses( 4901 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4902 // Collect the values that we need to extract from the tree. 4903 for (auto &TEPtr : VectorizableTree) { 4904 TreeEntry *Entry = TEPtr.get(); 4905 4906 // No need to handle users of gathered values. 4907 if (Entry->State == TreeEntry::NeedToGather) 4908 continue; 4909 4910 // For each lane: 4911 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4912 Value *Scalar = Entry->Scalars[Lane]; 4913 if (!isa<Instruction>(Scalar)) 4914 continue; 4915 int FoundLane = Entry->findLaneForValue(Scalar); 4916 4917 // Check if the scalar is externally used as an extra arg. 4918 const auto *ExtI = ExternallyUsedValues.find(Scalar); 4919 if (ExtI != ExternallyUsedValues.end()) { 4920 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4921 << Lane << " from " << *Scalar << ".\n"); 4922 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4923 } 4924 for (User *U : Scalar->users()) { 4925 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4926 4927 Instruction *UserInst = dyn_cast<Instruction>(U); 4928 if (!UserInst) 4929 continue; 4930 4931 if (isDeleted(UserInst)) 4932 continue; 4933 4934 // Skip in-tree scalars that become vectors 4935 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4936 Value *UseScalar = UseEntry->Scalars[0]; 4937 // Some in-tree scalars will remain as scalar in vectorized 4938 // instructions. If that is the case, the one in Lane 0 will 4939 // be used. 4940 if (UseScalar != U || 4941 UseEntry->State == TreeEntry::ScatterVectorize || 4942 UseEntry->State == TreeEntry::PossibleStridedVectorize || 4943 !doesInTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 4944 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4945 << ".\n"); 4946 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4947 continue; 4948 } 4949 } 4950 4951 // Ignore users in the user ignore list. 4952 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4953 continue; 4954 4955 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 4956 << Lane << " from " << *Scalar << ".\n"); 4957 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 4958 } 4959 } 4960 } 4961 } 4962 4963 DenseMap<Value *, SmallVector<StoreInst *>> 4964 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4965 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap; 4966 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4967 Value *V = TE->Scalars[Lane]; 4968 // To save compilation time we don't visit if we have too many users. 4969 static constexpr unsigned UsersLimit = 4; 4970 if (V->hasNUsesOrMore(UsersLimit)) 4971 break; 4972 4973 // Collect stores per pointer object. 4974 for (User *U : V->users()) { 4975 auto *SI = dyn_cast<StoreInst>(U); 4976 if (SI == nullptr || !SI->isSimple() || 4977 !isValidElementType(SI->getValueOperand()->getType())) 4978 continue; 4979 // Skip entry if already 4980 if (getTreeEntry(U)) 4981 continue; 4982 4983 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 4984 auto &StoresVec = PtrToStoresMap[Ptr]; 4985 // For now just keep one store per pointer object per lane. 4986 // TODO: Extend this to support multiple stores per pointer per lane 4987 if (StoresVec.size() > Lane) 4988 continue; 4989 // Skip if in different BBs. 4990 if (!StoresVec.empty() && 4991 SI->getParent() != StoresVec.back()->getParent()) 4992 continue; 4993 // Make sure that the stores are of the same type. 4994 if (!StoresVec.empty() && 4995 SI->getValueOperand()->getType() != 4996 StoresVec.back()->getValueOperand()->getType()) 4997 continue; 4998 StoresVec.push_back(SI); 4999 } 5000 } 5001 return PtrToStoresMap; 5002 } 5003 5004 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec, 5005 OrdersType &ReorderIndices) const { 5006 // We check whether the stores in StoreVec can form a vector by sorting them 5007 // and checking whether they are consecutive. 5008 5009 // To avoid calling getPointersDiff() while sorting we create a vector of 5010 // pairs {store, offset from first} and sort this instead. 5011 SmallVector<std::pair<StoreInst *, int>> StoreOffsetVec(StoresVec.size()); 5012 StoreInst *S0 = StoresVec[0]; 5013 StoreOffsetVec[0] = {S0, 0}; 5014 Type *S0Ty = S0->getValueOperand()->getType(); 5015 Value *S0Ptr = S0->getPointerOperand(); 5016 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 5017 StoreInst *SI = StoresVec[Idx]; 5018 std::optional<int> Diff = 5019 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 5020 SI->getPointerOperand(), *DL, *SE, 5021 /*StrictCheck=*/true); 5022 // We failed to compare the pointers so just abandon this StoresVec. 5023 if (!Diff) 5024 return false; 5025 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 5026 } 5027 5028 // Sort the vector based on the pointers. We create a copy because we may 5029 // need the original later for calculating the reorder (shuffle) indices. 5030 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 5031 const std::pair<StoreInst *, int> &Pair2) { 5032 int Offset1 = Pair1.second; 5033 int Offset2 = Pair2.second; 5034 return Offset1 < Offset2; 5035 }); 5036 5037 // Check if the stores are consecutive by checking if their difference is 1. 5038 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 5039 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx - 1].second + 1) 5040 return false; 5041 5042 // Calculate the shuffle indices according to their offset against the sorted 5043 // StoreOffsetVec. 5044 ReorderIndices.reserve(StoresVec.size()); 5045 for (StoreInst *SI : StoresVec) { 5046 unsigned Idx = find_if(StoreOffsetVec, 5047 [SI](const std::pair<StoreInst *, int> &Pair) { 5048 return Pair.first == SI; 5049 }) - 5050 StoreOffsetVec.begin(); 5051 ReorderIndices.push_back(Idx); 5052 } 5053 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 5054 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 5055 // same convention here. 5056 auto IsIdentityOrder = [](const OrdersType &Order) { 5057 for (unsigned Idx : seq<unsigned>(0, Order.size())) 5058 if (Idx != Order[Idx]) 5059 return false; 5060 return true; 5061 }; 5062 if (IsIdentityOrder(ReorderIndices)) 5063 ReorderIndices.clear(); 5064 5065 return true; 5066 } 5067 5068 #ifndef NDEBUG 5069 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 5070 for (unsigned Idx : Order) 5071 dbgs() << Idx << ", "; 5072 dbgs() << "\n"; 5073 } 5074 #endif 5075 5076 SmallVector<BoUpSLP::OrdersType, 1> 5077 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 5078 unsigned NumLanes = TE->Scalars.size(); 5079 5080 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap = 5081 collectUserStores(TE); 5082 5083 // Holds the reorder indices for each candidate store vector that is a user of 5084 // the current TreeEntry. 5085 SmallVector<OrdersType, 1> ExternalReorderIndices; 5086 5087 // Now inspect the stores collected per pointer and look for vectorization 5088 // candidates. For each candidate calculate the reorder index vector and push 5089 // it into `ExternalReorderIndices` 5090 for (const auto &Pair : PtrToStoresMap) { 5091 auto &StoresVec = Pair.second; 5092 // If we have fewer than NumLanes stores, then we can't form a vector. 5093 if (StoresVec.size() != NumLanes) 5094 continue; 5095 5096 // If the stores are not consecutive then abandon this StoresVec. 5097 OrdersType ReorderIndices; 5098 if (!canFormVector(StoresVec, ReorderIndices)) 5099 continue; 5100 5101 // We now know that the scalars in StoresVec can form a vector instruction, 5102 // so set the reorder indices. 5103 ExternalReorderIndices.push_back(ReorderIndices); 5104 } 5105 return ExternalReorderIndices; 5106 } 5107 5108 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 5109 const SmallDenseSet<Value *> &UserIgnoreLst) { 5110 deleteTree(); 5111 UserIgnoreList = &UserIgnoreLst; 5112 if (!allSameType(Roots)) 5113 return; 5114 buildTree_rec(Roots, 0, EdgeInfo()); 5115 } 5116 5117 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 5118 deleteTree(); 5119 if (!allSameType(Roots)) 5120 return; 5121 buildTree_rec(Roots, 0, EdgeInfo()); 5122 } 5123 5124 /// \return true if the specified list of values has only one instruction that 5125 /// requires scheduling, false otherwise. 5126 #ifndef NDEBUG 5127 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 5128 Value *NeedsScheduling = nullptr; 5129 for (Value *V : VL) { 5130 if (doesNotNeedToBeScheduled(V)) 5131 continue; 5132 if (!NeedsScheduling) { 5133 NeedsScheduling = V; 5134 continue; 5135 } 5136 return false; 5137 } 5138 return NeedsScheduling; 5139 } 5140 #endif 5141 5142 /// Generates key/subkey pair for the given value to provide effective sorting 5143 /// of the values and better detection of the vectorizable values sequences. The 5144 /// keys/subkeys can be used for better sorting of the values themselves (keys) 5145 /// and in values subgroups (subkeys). 5146 static std::pair<size_t, size_t> generateKeySubkey( 5147 Value *V, const TargetLibraryInfo *TLI, 5148 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 5149 bool AllowAlternate) { 5150 hash_code Key = hash_value(V->getValueID() + 2); 5151 hash_code SubKey = hash_value(0); 5152 // Sort the loads by the distance between the pointers. 5153 if (auto *LI = dyn_cast<LoadInst>(V)) { 5154 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key); 5155 if (LI->isSimple()) 5156 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 5157 else 5158 Key = SubKey = hash_value(LI); 5159 } else if (isVectorLikeInstWithConstOps(V)) { 5160 // Sort extracts by the vector operands. 5161 if (isa<ExtractElementInst, UndefValue>(V)) 5162 Key = hash_value(Value::UndefValueVal + 1); 5163 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 5164 if (!isUndefVector(EI->getVectorOperand()).all() && 5165 !isa<UndefValue>(EI->getIndexOperand())) 5166 SubKey = hash_value(EI->getVectorOperand()); 5167 } 5168 } else if (auto *I = dyn_cast<Instruction>(V)) { 5169 // Sort other instructions just by the opcodes except for CMPInst. 5170 // For CMP also sort by the predicate kind. 5171 if ((isa<BinaryOperator, CastInst>(I)) && 5172 isValidForAlternation(I->getOpcode())) { 5173 if (AllowAlternate) 5174 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 5175 else 5176 Key = hash_combine(hash_value(I->getOpcode()), Key); 5177 SubKey = hash_combine( 5178 hash_value(I->getOpcode()), hash_value(I->getType()), 5179 hash_value(isa<BinaryOperator>(I) 5180 ? I->getType() 5181 : cast<CastInst>(I)->getOperand(0)->getType())); 5182 // For casts, look through the only operand to improve compile time. 5183 if (isa<CastInst>(I)) { 5184 std::pair<size_t, size_t> OpVals = 5185 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator, 5186 /*AllowAlternate=*/true); 5187 Key = hash_combine(OpVals.first, Key); 5188 SubKey = hash_combine(OpVals.first, SubKey); 5189 } 5190 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 5191 CmpInst::Predicate Pred = CI->getPredicate(); 5192 if (CI->isCommutative()) 5193 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 5194 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 5195 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 5196 hash_value(SwapPred), 5197 hash_value(CI->getOperand(0)->getType())); 5198 } else if (auto *Call = dyn_cast<CallInst>(I)) { 5199 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 5200 if (isTriviallyVectorizable(ID)) { 5201 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 5202 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) { 5203 SubKey = hash_combine(hash_value(I->getOpcode()), 5204 hash_value(Call->getCalledFunction())); 5205 } else { 5206 Key = hash_combine(hash_value(Call), Key); 5207 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 5208 } 5209 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 5210 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 5211 hash_value(Op.Tag), SubKey); 5212 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 5213 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 5214 SubKey = hash_value(Gep->getPointerOperand()); 5215 else 5216 SubKey = hash_value(Gep); 5217 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 5218 !isa<ConstantInt>(I->getOperand(1))) { 5219 // Do not try to vectorize instructions with potentially high cost. 5220 SubKey = hash_value(I); 5221 } else { 5222 SubKey = hash_value(I->getOpcode()); 5223 } 5224 Key = hash_combine(hash_value(I->getParent()), Key); 5225 } 5226 return std::make_pair(Key, SubKey); 5227 } 5228 5229 /// Checks if the specified instruction \p I is an alternate operation for 5230 /// the given \p MainOp and \p AltOp instructions. 5231 static bool isAlternateInstruction(const Instruction *I, 5232 const Instruction *MainOp, 5233 const Instruction *AltOp, 5234 const TargetLibraryInfo &TLI); 5235 5236 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState( 5237 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 5238 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const { 5239 assert(S.MainOp && "Expected instructions with same/alternate opcodes only."); 5240 5241 unsigned ShuffleOrOp = 5242 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode(); 5243 auto *VL0 = cast<Instruction>(S.OpValue); 5244 switch (ShuffleOrOp) { 5245 case Instruction::PHI: { 5246 // Check for terminator values (e.g. invoke). 5247 for (Value *V : VL) 5248 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 5249 Instruction *Term = dyn_cast<Instruction>(Incoming); 5250 if (Term && Term->isTerminator()) { 5251 LLVM_DEBUG(dbgs() 5252 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 5253 return TreeEntry::NeedToGather; 5254 } 5255 } 5256 5257 return TreeEntry::Vectorize; 5258 } 5259 case Instruction::ExtractValue: 5260 case Instruction::ExtractElement: { 5261 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 5262 if (Reuse || !CurrentOrder.empty()) 5263 return TreeEntry::Vectorize; 5264 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 5265 return TreeEntry::NeedToGather; 5266 } 5267 case Instruction::InsertElement: { 5268 // Check that we have a buildvector and not a shuffle of 2 or more 5269 // different vectors. 5270 ValueSet SourceVectors; 5271 for (Value *V : VL) { 5272 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 5273 assert(getInsertIndex(V) != std::nullopt && 5274 "Non-constant or undef index?"); 5275 } 5276 5277 if (count_if(VL, [&SourceVectors](Value *V) { 5278 return !SourceVectors.contains(V); 5279 }) >= 2) { 5280 // Found 2nd source vector - cancel. 5281 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 5282 "different source vectors.\n"); 5283 return TreeEntry::NeedToGather; 5284 } 5285 5286 return TreeEntry::Vectorize; 5287 } 5288 case Instruction::Load: { 5289 // Check that a vectorized load would load the same memory as a scalar 5290 // load. For example, we don't want to vectorize loads that are smaller 5291 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5292 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5293 // from such a struct, we read/write packed bits disagreeing with the 5294 // unvectorized version. 5295 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder, 5296 PointerOps)) { 5297 case LoadsState::Vectorize: 5298 return TreeEntry::Vectorize; 5299 case LoadsState::ScatterVectorize: 5300 return TreeEntry::ScatterVectorize; 5301 case LoadsState::PossibleStridedVectorize: 5302 return TreeEntry::PossibleStridedVectorize; 5303 case LoadsState::Gather: 5304 #ifndef NDEBUG 5305 Type *ScalarTy = VL0->getType(); 5306 if (DL->getTypeSizeInBits(ScalarTy) != 5307 DL->getTypeAllocSizeInBits(ScalarTy)) 5308 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 5309 else if (any_of(VL, 5310 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); })) 5311 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 5312 else 5313 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 5314 #endif // NDEBUG 5315 return TreeEntry::NeedToGather; 5316 } 5317 llvm_unreachable("Unexpected state of loads"); 5318 } 5319 case Instruction::ZExt: 5320 case Instruction::SExt: 5321 case Instruction::FPToUI: 5322 case Instruction::FPToSI: 5323 case Instruction::FPExt: 5324 case Instruction::PtrToInt: 5325 case Instruction::IntToPtr: 5326 case Instruction::SIToFP: 5327 case Instruction::UIToFP: 5328 case Instruction::Trunc: 5329 case Instruction::FPTrunc: 5330 case Instruction::BitCast: { 5331 Type *SrcTy = VL0->getOperand(0)->getType(); 5332 for (Value *V : VL) { 5333 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 5334 if (Ty != SrcTy || !isValidElementType(Ty)) { 5335 LLVM_DEBUG( 5336 dbgs() << "SLP: Gathering casts with different src types.\n"); 5337 return TreeEntry::NeedToGather; 5338 } 5339 } 5340 return TreeEntry::Vectorize; 5341 } 5342 case Instruction::ICmp: 5343 case Instruction::FCmp: { 5344 // Check that all of the compares have the same predicate. 5345 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5346 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 5347 Type *ComparedTy = VL0->getOperand(0)->getType(); 5348 for (Value *V : VL) { 5349 CmpInst *Cmp = cast<CmpInst>(V); 5350 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 5351 Cmp->getOperand(0)->getType() != ComparedTy) { 5352 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 5353 return TreeEntry::NeedToGather; 5354 } 5355 } 5356 return TreeEntry::Vectorize; 5357 } 5358 case Instruction::Select: 5359 case Instruction::FNeg: 5360 case Instruction::Add: 5361 case Instruction::FAdd: 5362 case Instruction::Sub: 5363 case Instruction::FSub: 5364 case Instruction::Mul: 5365 case Instruction::FMul: 5366 case Instruction::UDiv: 5367 case Instruction::SDiv: 5368 case Instruction::FDiv: 5369 case Instruction::URem: 5370 case Instruction::SRem: 5371 case Instruction::FRem: 5372 case Instruction::Shl: 5373 case Instruction::LShr: 5374 case Instruction::AShr: 5375 case Instruction::And: 5376 case Instruction::Or: 5377 case Instruction::Xor: 5378 return TreeEntry::Vectorize; 5379 case Instruction::GetElementPtr: { 5380 // We don't combine GEPs with complicated (nested) indexing. 5381 for (Value *V : VL) { 5382 auto *I = dyn_cast<GetElementPtrInst>(V); 5383 if (!I) 5384 continue; 5385 if (I->getNumOperands() != 2) { 5386 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5387 return TreeEntry::NeedToGather; 5388 } 5389 } 5390 5391 // We can't combine several GEPs into one vector if they operate on 5392 // different types. 5393 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5394 for (Value *V : VL) { 5395 auto *GEP = dyn_cast<GEPOperator>(V); 5396 if (!GEP) 5397 continue; 5398 Type *CurTy = GEP->getSourceElementType(); 5399 if (Ty0 != CurTy) { 5400 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 5401 return TreeEntry::NeedToGather; 5402 } 5403 } 5404 5405 // We don't combine GEPs with non-constant indexes. 5406 Type *Ty1 = VL0->getOperand(1)->getType(); 5407 for (Value *V : VL) { 5408 auto *I = dyn_cast<GetElementPtrInst>(V); 5409 if (!I) 5410 continue; 5411 auto *Op = I->getOperand(1); 5412 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5413 (Op->getType() != Ty1 && 5414 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5415 Op->getType()->getScalarSizeInBits() > 5416 DL->getIndexSizeInBits( 5417 V->getType()->getPointerAddressSpace())))) { 5418 LLVM_DEBUG( 5419 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5420 return TreeEntry::NeedToGather; 5421 } 5422 } 5423 5424 return TreeEntry::Vectorize; 5425 } 5426 case Instruction::Store: { 5427 // Check if the stores are consecutive or if we need to swizzle them. 5428 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5429 // Avoid types that are padded when being allocated as scalars, while 5430 // being packed together in a vector (such as i1). 5431 if (DL->getTypeSizeInBits(ScalarTy) != 5432 DL->getTypeAllocSizeInBits(ScalarTy)) { 5433 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5434 return TreeEntry::NeedToGather; 5435 } 5436 // Make sure all stores in the bundle are simple - we can't vectorize 5437 // atomic or volatile stores. 5438 for (Value *V : VL) { 5439 auto *SI = cast<StoreInst>(V); 5440 if (!SI->isSimple()) { 5441 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5442 return TreeEntry::NeedToGather; 5443 } 5444 PointerOps.push_back(SI->getPointerOperand()); 5445 } 5446 5447 // Check the order of pointer operands. 5448 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5449 Value *Ptr0; 5450 Value *PtrN; 5451 if (CurrentOrder.empty()) { 5452 Ptr0 = PointerOps.front(); 5453 PtrN = PointerOps.back(); 5454 } else { 5455 Ptr0 = PointerOps[CurrentOrder.front()]; 5456 PtrN = PointerOps[CurrentOrder.back()]; 5457 } 5458 std::optional<int> Dist = 5459 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5460 // Check that the sorted pointer operands are consecutive. 5461 if (static_cast<unsigned>(*Dist) == VL.size() - 1) 5462 return TreeEntry::Vectorize; 5463 } 5464 5465 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5466 return TreeEntry::NeedToGather; 5467 } 5468 case Instruction::Call: { 5469 // Check if the calls are all to the same vectorizable intrinsic or 5470 // library function. 5471 CallInst *CI = cast<CallInst>(VL0); 5472 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5473 5474 VFShape Shape = VFShape::get( 5475 CI->getFunctionType(), 5476 ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5477 false /*HasGlobalPred*/); 5478 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5479 5480 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5481 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5482 return TreeEntry::NeedToGather; 5483 } 5484 Function *F = CI->getCalledFunction(); 5485 unsigned NumArgs = CI->arg_size(); 5486 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr); 5487 for (unsigned J = 0; J != NumArgs; ++J) 5488 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) 5489 ScalarArgs[J] = CI->getArgOperand(J); 5490 for (Value *V : VL) { 5491 CallInst *CI2 = dyn_cast<CallInst>(V); 5492 if (!CI2 || CI2->getCalledFunction() != F || 5493 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5494 (VecFunc && 5495 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5496 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5497 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5498 << "\n"); 5499 return TreeEntry::NeedToGather; 5500 } 5501 // Some intrinsics have scalar arguments and should be same in order for 5502 // them to be vectorized. 5503 for (unsigned J = 0; J != NumArgs; ++J) { 5504 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) { 5505 Value *A1J = CI2->getArgOperand(J); 5506 if (ScalarArgs[J] != A1J) { 5507 LLVM_DEBUG(dbgs() 5508 << "SLP: mismatched arguments in call:" << *CI 5509 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n"); 5510 return TreeEntry::NeedToGather; 5511 } 5512 } 5513 } 5514 // Verify that the bundle operands are identical between the two calls. 5515 if (CI->hasOperandBundles() && 5516 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5517 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5518 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5519 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI 5520 << "!=" << *V << '\n'); 5521 return TreeEntry::NeedToGather; 5522 } 5523 } 5524 5525 return TreeEntry::Vectorize; 5526 } 5527 case Instruction::ShuffleVector: { 5528 // If this is not an alternate sequence of opcode like add-sub 5529 // then do not vectorize this instruction. 5530 if (!S.isAltShuffle()) { 5531 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5532 return TreeEntry::NeedToGather; 5533 } 5534 return TreeEntry::Vectorize; 5535 } 5536 default: 5537 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5538 return TreeEntry::NeedToGather; 5539 } 5540 } 5541 5542 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 5543 const EdgeInfo &UserTreeIdx) { 5544 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 5545 5546 SmallVector<int> ReuseShuffleIndicies; 5547 SmallVector<Value *> UniqueValues; 5548 SmallVector<Value *> NonUniqueValueVL; 5549 auto TryToFindDuplicates = [&](const InstructionsState &S, 5550 bool DoNotFail = false) { 5551 // Check that every instruction appears once in this bundle. 5552 DenseMap<Value *, unsigned> UniquePositions(VL.size()); 5553 for (Value *V : VL) { 5554 if (isConstant(V)) { 5555 ReuseShuffleIndicies.emplace_back( 5556 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size()); 5557 UniqueValues.emplace_back(V); 5558 continue; 5559 } 5560 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5561 ReuseShuffleIndicies.emplace_back(Res.first->second); 5562 if (Res.second) 5563 UniqueValues.emplace_back(V); 5564 } 5565 size_t NumUniqueScalarValues = UniqueValues.size(); 5566 if (NumUniqueScalarValues == VL.size()) { 5567 ReuseShuffleIndicies.clear(); 5568 } else { 5569 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 5570 if (NumUniqueScalarValues <= 1 || 5571 (UniquePositions.size() == 1 && all_of(UniqueValues, 5572 [](Value *V) { 5573 return isa<UndefValue>(V) || 5574 !isConstant(V); 5575 })) || 5576 !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) { 5577 if (DoNotFail && UniquePositions.size() > 1 && 5578 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() && 5579 all_of(UniqueValues, [=](Value *V) { 5580 return isa<ExtractElementInst>(V) || 5581 areAllUsersVectorized(cast<Instruction>(V), 5582 UserIgnoreList); 5583 })) { 5584 unsigned PWSz = PowerOf2Ceil(UniqueValues.size()); 5585 if (PWSz == VL.size()) { 5586 ReuseShuffleIndicies.clear(); 5587 } else { 5588 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end()); 5589 NonUniqueValueVL.append(PWSz - UniqueValues.size(), 5590 UniqueValues.back()); 5591 VL = NonUniqueValueVL; 5592 } 5593 return true; 5594 } 5595 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 5596 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5597 return false; 5598 } 5599 VL = UniqueValues; 5600 } 5601 return true; 5602 }; 5603 5604 InstructionsState S = getSameOpcode(VL, *TLI); 5605 5606 // Don't vectorize ephemeral values. 5607 if (!EphValues.empty()) { 5608 for (Value *V : VL) { 5609 if (EphValues.count(V)) { 5610 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5611 << ") is ephemeral.\n"); 5612 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5613 return; 5614 } 5615 } 5616 } 5617 5618 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of 5619 // a load), in which case peek through to include it in the tree, without 5620 // ballooning over-budget. 5621 if (Depth >= RecursionMaxDepth && 5622 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp && 5623 VL.size() >= 4 && 5624 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) { 5625 return match(I, 5626 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) && 5627 cast<Instruction>(I)->getOpcode() == 5628 cast<Instruction>(S.MainOp)->getOpcode(); 5629 })))) { 5630 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 5631 if (TryToFindDuplicates(S)) 5632 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5633 ReuseShuffleIndicies); 5634 return; 5635 } 5636 5637 // Don't handle scalable vectors 5638 if (S.getOpcode() == Instruction::ExtractElement && 5639 isa<ScalableVectorType>( 5640 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 5641 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 5642 if (TryToFindDuplicates(S)) 5643 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5644 ReuseShuffleIndicies); 5645 return; 5646 } 5647 5648 // Don't handle vectors. 5649 if (S.OpValue->getType()->isVectorTy() && 5650 !isa<InsertElementInst>(S.OpValue)) { 5651 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 5652 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5653 return; 5654 } 5655 5656 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 5657 if (SI->getValueOperand()->getType()->isVectorTy()) { 5658 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 5659 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5660 return; 5661 } 5662 5663 // If all of the operands are identical or constant we have a simple solution. 5664 // If we deal with insert/extract instructions, they all must have constant 5665 // indices, otherwise we should gather them, not try to vectorize. 5666 // If alternate op node with 2 elements with gathered operands - do not 5667 // vectorize. 5668 auto &&NotProfitableForVectorization = [&S, this, 5669 Depth](ArrayRef<Value *> VL) { 5670 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 5671 return false; 5672 if (VectorizableTree.size() < MinTreeSize) 5673 return false; 5674 if (Depth >= RecursionMaxDepth - 1) 5675 return true; 5676 // Check if all operands are extracts, part of vector node or can build a 5677 // regular vectorize node. 5678 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 5679 for (Value *V : VL) { 5680 auto *I = cast<Instruction>(V); 5681 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 5682 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 5683 })); 5684 } 5685 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 5686 if ((IsCommutative && 5687 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 5688 (!IsCommutative && 5689 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 5690 return true; 5691 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 5692 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 5693 auto *I1 = cast<Instruction>(VL.front()); 5694 auto *I2 = cast<Instruction>(VL.back()); 5695 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5696 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5697 I2->getOperand(Op)); 5698 if (static_cast<unsigned>(count_if( 5699 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5700 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5701 })) >= S.MainOp->getNumOperands() / 2) 5702 return false; 5703 if (S.MainOp->getNumOperands() > 2) 5704 return true; 5705 if (IsCommutative) { 5706 // Check permuted operands. 5707 Candidates.clear(); 5708 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5709 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5710 I2->getOperand((Op + 1) % E)); 5711 if (any_of( 5712 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5713 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5714 })) 5715 return false; 5716 } 5717 return true; 5718 }; 5719 SmallVector<unsigned> SortedIndices; 5720 BasicBlock *BB = nullptr; 5721 bool IsScatterVectorizeUserTE = 5722 UserTreeIdx.UserTE && 5723 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5724 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize); 5725 bool AreAllSameInsts = 5726 (S.getOpcode() && allSameBlock(VL)) || 5727 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE && 5728 VL.size() > 2 && 5729 all_of(VL, 5730 [&BB](Value *V) { 5731 auto *I = dyn_cast<GetElementPtrInst>(V); 5732 if (!I) 5733 return doesNotNeedToBeScheduled(V); 5734 if (!BB) 5735 BB = I->getParent(); 5736 return BB == I->getParent() && I->getNumOperands() == 2; 5737 }) && 5738 BB && 5739 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE, 5740 SortedIndices)); 5741 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) || 5742 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>( 5743 S.OpValue) && 5744 !all_of(VL, isVectorLikeInstWithConstOps)) || 5745 NotProfitableForVectorization(VL)) { 5746 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 5747 if (TryToFindDuplicates(S)) 5748 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5749 ReuseShuffleIndicies); 5750 return; 5751 } 5752 5753 // We now know that this is a vector of instructions of the same type from 5754 // the same block. 5755 5756 // Check if this is a duplicate of another entry. 5757 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 5758 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 5759 if (!E->isSame(VL)) { 5760 auto It = MultiNodeScalars.find(S.OpValue); 5761 if (It != MultiNodeScalars.end()) { 5762 auto *TEIt = find_if(It->getSecond(), 5763 [&](TreeEntry *ME) { return ME->isSame(VL); }); 5764 if (TEIt != It->getSecond().end()) 5765 E = *TEIt; 5766 else 5767 E = nullptr; 5768 } else { 5769 E = nullptr; 5770 } 5771 } 5772 if (!E) { 5773 if (!doesNotNeedToBeScheduled(S.OpValue)) { 5774 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 5775 if (TryToFindDuplicates(S)) 5776 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5777 ReuseShuffleIndicies); 5778 return; 5779 } 5780 } else { 5781 // Record the reuse of the tree node. FIXME, currently this is only used 5782 // to properly draw the graph rather than for the actual vectorization. 5783 E->UserTreeIndices.push_back(UserTreeIdx); 5784 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 5785 << ".\n"); 5786 return; 5787 } 5788 } 5789 5790 // Check that none of the instructions in the bundle are already in the tree. 5791 for (Value *V : VL) { 5792 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) || 5793 doesNotNeedToBeScheduled(V)) 5794 continue; 5795 if (getTreeEntry(V)) { 5796 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5797 << ") is already in tree.\n"); 5798 if (TryToFindDuplicates(S)) 5799 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5800 ReuseShuffleIndicies); 5801 return; 5802 } 5803 } 5804 5805 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 5806 if (UserIgnoreList && !UserIgnoreList->empty()) { 5807 for (Value *V : VL) { 5808 if (UserIgnoreList && UserIgnoreList->contains(V)) { 5809 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 5810 if (TryToFindDuplicates(S)) 5811 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5812 ReuseShuffleIndicies); 5813 return; 5814 } 5815 } 5816 } 5817 5818 // Special processing for sorted pointers for ScatterVectorize node with 5819 // constant indeces only. 5820 if (AreAllSameInsts && UserTreeIdx.UserTE && 5821 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5822 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize) && 5823 !(S.getOpcode() && allSameBlock(VL))) { 5824 assert(S.OpValue->getType()->isPointerTy() && 5825 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 5826 2 && 5827 "Expected pointers only."); 5828 // Reset S to make it GetElementPtr kind of node. 5829 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 5830 assert(It != VL.end() && "Expected at least one GEP."); 5831 S = getSameOpcode(*It, *TLI); 5832 } 5833 5834 // Check that all of the users of the scalars that we want to vectorize are 5835 // schedulable. 5836 auto *VL0 = cast<Instruction>(S.OpValue); 5837 BB = VL0->getParent(); 5838 5839 if (!DT->isReachableFromEntry(BB)) { 5840 // Don't go into unreachable blocks. They may contain instructions with 5841 // dependency cycles which confuse the final scheduling. 5842 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 5843 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5844 return; 5845 } 5846 5847 // Don't go into catchswitch blocks, which can happen with PHIs. 5848 // Such blocks can only have PHIs and the catchswitch. There is no 5849 // place to insert a shuffle if we need to, so just avoid that issue. 5850 if (isa<CatchSwitchInst>(BB->getTerminator())) { 5851 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n"); 5852 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5853 return; 5854 } 5855 5856 // Check that every instruction appears once in this bundle. 5857 if (!TryToFindDuplicates(S, /*DoNotFail=*/true)) 5858 return; 5859 5860 // Perform specific checks for each particular instruction kind. 5861 OrdersType CurrentOrder; 5862 SmallVector<Value *> PointerOps; 5863 TreeEntry::EntryState State = getScalarsVectorizationState( 5864 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps); 5865 if (State == TreeEntry::NeedToGather) { 5866 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5867 ReuseShuffleIndicies); 5868 return; 5869 } 5870 5871 auto &BSRef = BlocksSchedules[BB]; 5872 if (!BSRef) 5873 BSRef = std::make_unique<BlockScheduling>(BB); 5874 5875 BlockScheduling &BS = *BSRef; 5876 5877 std::optional<ScheduleData *> Bundle = 5878 BS.tryScheduleBundle(UniqueValues, this, S); 5879 #ifdef EXPENSIVE_CHECKS 5880 // Make sure we didn't break any internal invariants 5881 BS.verify(); 5882 #endif 5883 if (!Bundle) { 5884 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 5885 assert((!BS.getScheduleData(VL0) || 5886 !BS.getScheduleData(VL0)->isPartOfBundle()) && 5887 "tryScheduleBundle should cancelScheduling on failure"); 5888 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5889 ReuseShuffleIndicies); 5890 return; 5891 } 5892 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 5893 5894 unsigned ShuffleOrOp = S.isAltShuffle() ? 5895 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 5896 switch (ShuffleOrOp) { 5897 case Instruction::PHI: { 5898 auto *PH = cast<PHINode>(VL0); 5899 5900 TreeEntry *TE = 5901 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 5902 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 5903 5904 // Keeps the reordered operands to avoid code duplication. 5905 SmallVector<ValueList, 2> OperandsVec; 5906 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 5907 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 5908 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 5909 TE->setOperand(I, Operands); 5910 OperandsVec.push_back(Operands); 5911 continue; 5912 } 5913 ValueList Operands; 5914 // Prepare the operand vector. 5915 for (Value *V : VL) 5916 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 5917 PH->getIncomingBlock(I))); 5918 TE->setOperand(I, Operands); 5919 OperandsVec.push_back(Operands); 5920 } 5921 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 5922 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 5923 return; 5924 } 5925 case Instruction::ExtractValue: 5926 case Instruction::ExtractElement: { 5927 if (CurrentOrder.empty()) { 5928 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 5929 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5930 ReuseShuffleIndicies); 5931 // This is a special case, as it does not gather, but at the same time 5932 // we are not extending buildTree_rec() towards the operands. 5933 ValueList Op0; 5934 Op0.assign(VL.size(), VL0->getOperand(0)); 5935 VectorizableTree.back()->setOperand(0, Op0); 5936 return; 5937 } 5938 LLVM_DEBUG({ 5939 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 5940 "with order"; 5941 for (unsigned Idx : CurrentOrder) 5942 dbgs() << " " << Idx; 5943 dbgs() << "\n"; 5944 }); 5945 fixupOrderingIndices(CurrentOrder); 5946 // Insert new order with initial value 0, if it does not exist, 5947 // otherwise return the iterator to the existing one. 5948 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5949 ReuseShuffleIndicies, CurrentOrder); 5950 // This is a special case, as it does not gather, but at the same time 5951 // we are not extending buildTree_rec() towards the operands. 5952 ValueList Op0; 5953 Op0.assign(VL.size(), VL0->getOperand(0)); 5954 VectorizableTree.back()->setOperand(0, Op0); 5955 return; 5956 } 5957 case Instruction::InsertElement: { 5958 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 5959 5960 auto OrdCompare = [](const std::pair<int, int> &P1, 5961 const std::pair<int, int> &P2) { 5962 return P1.first > P2.first; 5963 }; 5964 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 5965 decltype(OrdCompare)> 5966 Indices(OrdCompare); 5967 for (int I = 0, E = VL.size(); I < E; ++I) { 5968 unsigned Idx = *getInsertIndex(VL[I]); 5969 Indices.emplace(Idx, I); 5970 } 5971 OrdersType CurrentOrder(VL.size(), VL.size()); 5972 bool IsIdentity = true; 5973 for (int I = 0, E = VL.size(); I < E; ++I) { 5974 CurrentOrder[Indices.top().second] = I; 5975 IsIdentity &= Indices.top().second == I; 5976 Indices.pop(); 5977 } 5978 if (IsIdentity) 5979 CurrentOrder.clear(); 5980 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5981 std::nullopt, CurrentOrder); 5982 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 5983 5984 constexpr int NumOps = 2; 5985 ValueList VectorOperands[NumOps]; 5986 for (int I = 0; I < NumOps; ++I) { 5987 for (Value *V : VL) 5988 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 5989 5990 TE->setOperand(I, VectorOperands[I]); 5991 } 5992 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 5993 return; 5994 } 5995 case Instruction::Load: { 5996 // Check that a vectorized load would load the same memory as a scalar 5997 // load. For example, we don't want to vectorize loads that are smaller 5998 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5999 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 6000 // from such a struct, we read/write packed bits disagreeing with the 6001 // unvectorized version. 6002 TreeEntry *TE = nullptr; 6003 fixupOrderingIndices(CurrentOrder); 6004 switch (State) { 6005 case TreeEntry::Vectorize: 6006 if (CurrentOrder.empty()) { 6007 // Original loads are consecutive and does not require reordering. 6008 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6009 ReuseShuffleIndicies); 6010 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 6011 } else { 6012 // Need to reorder. 6013 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6014 ReuseShuffleIndicies, CurrentOrder); 6015 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 6016 } 6017 TE->setOperandsInOrder(); 6018 break; 6019 case TreeEntry::PossibleStridedVectorize: 6020 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6021 if (CurrentOrder.empty()) { 6022 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6023 UserTreeIdx, ReuseShuffleIndicies); 6024 } else { 6025 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6026 UserTreeIdx, ReuseShuffleIndicies, CurrentOrder); 6027 } 6028 TE->setOperandsInOrder(); 6029 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6030 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6031 break; 6032 case TreeEntry::ScatterVectorize: 6033 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6034 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 6035 UserTreeIdx, ReuseShuffleIndicies); 6036 TE->setOperandsInOrder(); 6037 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6038 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6039 break; 6040 case TreeEntry::NeedToGather: 6041 llvm_unreachable("Unexpected loads state."); 6042 } 6043 return; 6044 } 6045 case Instruction::ZExt: 6046 case Instruction::SExt: 6047 case Instruction::FPToUI: 6048 case Instruction::FPToSI: 6049 case Instruction::FPExt: 6050 case Instruction::PtrToInt: 6051 case Instruction::IntToPtr: 6052 case Instruction::SIToFP: 6053 case Instruction::UIToFP: 6054 case Instruction::Trunc: 6055 case Instruction::FPTrunc: 6056 case Instruction::BitCast: { 6057 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6058 ReuseShuffleIndicies); 6059 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 6060 6061 TE->setOperandsInOrder(); 6062 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6063 ValueList Operands; 6064 // Prepare the operand vector. 6065 for (Value *V : VL) 6066 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6067 6068 buildTree_rec(Operands, Depth + 1, {TE, I}); 6069 } 6070 return; 6071 } 6072 case Instruction::ICmp: 6073 case Instruction::FCmp: { 6074 // Check that all of the compares have the same predicate. 6075 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6076 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6077 ReuseShuffleIndicies); 6078 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 6079 6080 ValueList Left, Right; 6081 if (cast<CmpInst>(VL0)->isCommutative()) { 6082 // Commutative predicate - collect + sort operands of the instructions 6083 // so that each side is more likely to have the same opcode. 6084 assert(P0 == CmpInst::getSwappedPredicate(P0) && 6085 "Commutative Predicate mismatch"); 6086 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6087 } else { 6088 // Collect operands - commute if it uses the swapped predicate. 6089 for (Value *V : VL) { 6090 auto *Cmp = cast<CmpInst>(V); 6091 Value *LHS = Cmp->getOperand(0); 6092 Value *RHS = Cmp->getOperand(1); 6093 if (Cmp->getPredicate() != P0) 6094 std::swap(LHS, RHS); 6095 Left.push_back(LHS); 6096 Right.push_back(RHS); 6097 } 6098 } 6099 TE->setOperand(0, Left); 6100 TE->setOperand(1, Right); 6101 buildTree_rec(Left, Depth + 1, {TE, 0}); 6102 buildTree_rec(Right, Depth + 1, {TE, 1}); 6103 return; 6104 } 6105 case Instruction::Select: 6106 case Instruction::FNeg: 6107 case Instruction::Add: 6108 case Instruction::FAdd: 6109 case Instruction::Sub: 6110 case Instruction::FSub: 6111 case Instruction::Mul: 6112 case Instruction::FMul: 6113 case Instruction::UDiv: 6114 case Instruction::SDiv: 6115 case Instruction::FDiv: 6116 case Instruction::URem: 6117 case Instruction::SRem: 6118 case Instruction::FRem: 6119 case Instruction::Shl: 6120 case Instruction::LShr: 6121 case Instruction::AShr: 6122 case Instruction::And: 6123 case Instruction::Or: 6124 case Instruction::Xor: { 6125 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6126 ReuseShuffleIndicies); 6127 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 6128 6129 // Sort operands of the instructions so that each side is more likely to 6130 // have the same opcode. 6131 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 6132 ValueList Left, Right; 6133 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6134 TE->setOperand(0, Left); 6135 TE->setOperand(1, Right); 6136 buildTree_rec(Left, Depth + 1, {TE, 0}); 6137 buildTree_rec(Right, Depth + 1, {TE, 1}); 6138 return; 6139 } 6140 6141 TE->setOperandsInOrder(); 6142 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6143 ValueList Operands; 6144 // Prepare the operand vector. 6145 for (Value *V : VL) 6146 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6147 6148 buildTree_rec(Operands, Depth + 1, {TE, I}); 6149 } 6150 return; 6151 } 6152 case Instruction::GetElementPtr: { 6153 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6154 ReuseShuffleIndicies); 6155 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 6156 SmallVector<ValueList, 2> Operands(2); 6157 // Prepare the operand vector for pointer operands. 6158 for (Value *V : VL) { 6159 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6160 if (!GEP) { 6161 Operands.front().push_back(V); 6162 continue; 6163 } 6164 Operands.front().push_back(GEP->getPointerOperand()); 6165 } 6166 TE->setOperand(0, Operands.front()); 6167 // Need to cast all indices to the same type before vectorization to 6168 // avoid crash. 6169 // Required to be able to find correct matches between different gather 6170 // nodes and reuse the vectorized values rather than trying to gather them 6171 // again. 6172 int IndexIdx = 1; 6173 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 6174 Type *Ty = all_of(VL, 6175 [VL0Ty, IndexIdx](Value *V) { 6176 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6177 if (!GEP) 6178 return true; 6179 return VL0Ty == GEP->getOperand(IndexIdx)->getType(); 6180 }) 6181 ? VL0Ty 6182 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6183 ->getPointerOperandType() 6184 ->getScalarType()); 6185 // Prepare the operand vector. 6186 for (Value *V : VL) { 6187 auto *I = dyn_cast<GetElementPtrInst>(V); 6188 if (!I) { 6189 Operands.back().push_back( 6190 ConstantInt::get(Ty, 0, /*isSigned=*/false)); 6191 continue; 6192 } 6193 auto *Op = I->getOperand(IndexIdx); 6194 auto *CI = dyn_cast<ConstantInt>(Op); 6195 if (!CI) 6196 Operands.back().push_back(Op); 6197 else 6198 Operands.back().push_back(ConstantFoldIntegerCast( 6199 CI, Ty, CI->getValue().isSignBitSet(), *DL)); 6200 } 6201 TE->setOperand(IndexIdx, Operands.back()); 6202 6203 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 6204 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 6205 return; 6206 } 6207 case Instruction::Store: { 6208 // Check if the stores are consecutive or if we need to swizzle them. 6209 ValueList Operands(VL.size()); 6210 auto *OIter = Operands.begin(); 6211 for (Value *V : VL) { 6212 auto *SI = cast<StoreInst>(V); 6213 *OIter = SI->getValueOperand(); 6214 ++OIter; 6215 } 6216 // Check that the sorted pointer operands are consecutive. 6217 if (CurrentOrder.empty()) { 6218 // Original stores are consecutive and does not require reordering. 6219 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6220 ReuseShuffleIndicies); 6221 TE->setOperandsInOrder(); 6222 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6223 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 6224 } else { 6225 fixupOrderingIndices(CurrentOrder); 6226 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6227 ReuseShuffleIndicies, CurrentOrder); 6228 TE->setOperandsInOrder(); 6229 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6230 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 6231 } 6232 return; 6233 } 6234 case Instruction::Call: { 6235 // Check if the calls are all to the same vectorizable intrinsic or 6236 // library function. 6237 CallInst *CI = cast<CallInst>(VL0); 6238 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6239 6240 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6241 ReuseShuffleIndicies); 6242 TE->setOperandsInOrder(); 6243 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 6244 // For scalar operands no need to create an entry since no need to 6245 // vectorize it. 6246 if (isVectorIntrinsicWithScalarOpAtArg(ID, I)) 6247 continue; 6248 ValueList Operands; 6249 // Prepare the operand vector. 6250 for (Value *V : VL) { 6251 auto *CI2 = cast<CallInst>(V); 6252 Operands.push_back(CI2->getArgOperand(I)); 6253 } 6254 buildTree_rec(Operands, Depth + 1, {TE, I}); 6255 } 6256 return; 6257 } 6258 case Instruction::ShuffleVector: { 6259 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6260 ReuseShuffleIndicies); 6261 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 6262 6263 // Reorder operands if reordering would enable vectorization. 6264 auto *CI = dyn_cast<CmpInst>(VL0); 6265 if (isa<BinaryOperator>(VL0) || CI) { 6266 ValueList Left, Right; 6267 if (!CI || all_of(VL, [](Value *V) { 6268 return cast<CmpInst>(V)->isCommutative(); 6269 })) { 6270 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, 6271 *this); 6272 } else { 6273 auto *MainCI = cast<CmpInst>(S.MainOp); 6274 auto *AltCI = cast<CmpInst>(S.AltOp); 6275 CmpInst::Predicate MainP = MainCI->getPredicate(); 6276 CmpInst::Predicate AltP = AltCI->getPredicate(); 6277 assert(MainP != AltP && 6278 "Expected different main/alternate predicates."); 6279 // Collect operands - commute if it uses the swapped predicate or 6280 // alternate operation. 6281 for (Value *V : VL) { 6282 auto *Cmp = cast<CmpInst>(V); 6283 Value *LHS = Cmp->getOperand(0); 6284 Value *RHS = Cmp->getOperand(1); 6285 6286 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) { 6287 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6288 std::swap(LHS, RHS); 6289 } else { 6290 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6291 std::swap(LHS, RHS); 6292 } 6293 Left.push_back(LHS); 6294 Right.push_back(RHS); 6295 } 6296 } 6297 TE->setOperand(0, Left); 6298 TE->setOperand(1, Right); 6299 buildTree_rec(Left, Depth + 1, {TE, 0}); 6300 buildTree_rec(Right, Depth + 1, {TE, 1}); 6301 return; 6302 } 6303 6304 TE->setOperandsInOrder(); 6305 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6306 ValueList Operands; 6307 // Prepare the operand vector. 6308 for (Value *V : VL) 6309 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6310 6311 buildTree_rec(Operands, Depth + 1, {TE, I}); 6312 } 6313 return; 6314 } 6315 default: 6316 break; 6317 } 6318 llvm_unreachable("Unexpected vectorization of the instructions."); 6319 } 6320 6321 unsigned BoUpSLP::canMapToVector(Type *T) const { 6322 unsigned N = 1; 6323 Type *EltTy = T; 6324 6325 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) { 6326 if (auto *ST = dyn_cast<StructType>(EltTy)) { 6327 // Check that struct is homogeneous. 6328 for (const auto *Ty : ST->elements()) 6329 if (Ty != *ST->element_begin()) 6330 return 0; 6331 N *= ST->getNumElements(); 6332 EltTy = *ST->element_begin(); 6333 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 6334 N *= AT->getNumElements(); 6335 EltTy = AT->getElementType(); 6336 } else { 6337 auto *VT = cast<FixedVectorType>(EltTy); 6338 N *= VT->getNumElements(); 6339 EltTy = VT->getElementType(); 6340 } 6341 } 6342 6343 if (!isValidElementType(EltTy)) 6344 return 0; 6345 uint64_t VTSize = DL->getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 6346 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || 6347 VTSize != DL->getTypeStoreSizeInBits(T)) 6348 return 0; 6349 return N; 6350 } 6351 6352 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 6353 SmallVectorImpl<unsigned> &CurrentOrder, 6354 bool ResizeAllowed) const { 6355 const auto *It = find_if(VL, [](Value *V) { 6356 return isa<ExtractElementInst, ExtractValueInst>(V); 6357 }); 6358 assert(It != VL.end() && "Expected at least one extract instruction."); 6359 auto *E0 = cast<Instruction>(*It); 6360 assert(all_of(VL, 6361 [](Value *V) { 6362 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 6363 V); 6364 }) && 6365 "Invalid opcode"); 6366 // Check if all of the extracts come from the same vector and from the 6367 // correct offset. 6368 Value *Vec = E0->getOperand(0); 6369 6370 CurrentOrder.clear(); 6371 6372 // We have to extract from a vector/aggregate with the same number of elements. 6373 unsigned NElts; 6374 if (E0->getOpcode() == Instruction::ExtractValue) { 6375 NElts = canMapToVector(Vec->getType()); 6376 if (!NElts) 6377 return false; 6378 // Check if load can be rewritten as load of vector. 6379 LoadInst *LI = dyn_cast<LoadInst>(Vec); 6380 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 6381 return false; 6382 } else { 6383 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 6384 } 6385 6386 unsigned E = VL.size(); 6387 if (!ResizeAllowed && NElts != E) 6388 return false; 6389 SmallVector<int> Indices(E, PoisonMaskElem); 6390 unsigned MinIdx = NElts, MaxIdx = 0; 6391 for (auto [I, V] : enumerate(VL)) { 6392 auto *Inst = dyn_cast<Instruction>(V); 6393 if (!Inst) 6394 continue; 6395 if (Inst->getOperand(0) != Vec) 6396 return false; 6397 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 6398 if (isa<UndefValue>(EE->getIndexOperand())) 6399 continue; 6400 std::optional<unsigned> Idx = getExtractIndex(Inst); 6401 if (!Idx) 6402 return false; 6403 const unsigned ExtIdx = *Idx; 6404 if (ExtIdx >= NElts) 6405 continue; 6406 Indices[I] = ExtIdx; 6407 if (MinIdx > ExtIdx) 6408 MinIdx = ExtIdx; 6409 if (MaxIdx < ExtIdx) 6410 MaxIdx = ExtIdx; 6411 } 6412 if (MaxIdx - MinIdx + 1 > E) 6413 return false; 6414 if (MaxIdx + 1 <= E) 6415 MinIdx = 0; 6416 6417 // Check that all of the indices extract from the correct offset. 6418 bool ShouldKeepOrder = true; 6419 // Assign to all items the initial value E + 1 so we can check if the extract 6420 // instruction index was used already. 6421 // Also, later we can check that all the indices are used and we have a 6422 // consecutive access in the extract instructions, by checking that no 6423 // element of CurrentOrder still has value E + 1. 6424 CurrentOrder.assign(E, E); 6425 for (unsigned I = 0; I < E; ++I) { 6426 if (Indices[I] == PoisonMaskElem) 6427 continue; 6428 const unsigned ExtIdx = Indices[I] - MinIdx; 6429 if (CurrentOrder[ExtIdx] != E) { 6430 CurrentOrder.clear(); 6431 return false; 6432 } 6433 ShouldKeepOrder &= ExtIdx == I; 6434 CurrentOrder[ExtIdx] = I; 6435 } 6436 if (ShouldKeepOrder) 6437 CurrentOrder.clear(); 6438 6439 return ShouldKeepOrder; 6440 } 6441 6442 bool BoUpSLP::areAllUsersVectorized( 6443 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const { 6444 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) || 6445 all_of(I->users(), [this](User *U) { 6446 return ScalarToTreeEntry.contains(U) || 6447 isVectorLikeInstWithConstOps(U) || 6448 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 6449 }); 6450 } 6451 6452 static std::pair<InstructionCost, InstructionCost> 6453 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 6454 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 6455 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6456 6457 // Calculate the cost of the scalar and vector calls. 6458 SmallVector<Type *, 4> VecTys; 6459 for (Use &Arg : CI->args()) 6460 VecTys.push_back( 6461 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 6462 FastMathFlags FMF; 6463 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 6464 FMF = FPCI->getFastMathFlags(); 6465 SmallVector<const Value *> Arguments(CI->args()); 6466 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 6467 dyn_cast<IntrinsicInst>(CI)); 6468 auto IntrinsicCost = 6469 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 6470 6471 auto Shape = VFShape::get(CI->getFunctionType(), 6472 ElementCount::getFixed(VecTy->getNumElements()), 6473 false /*HasGlobalPred*/); 6474 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 6475 auto LibCost = IntrinsicCost; 6476 if (!CI->isNoBuiltin() && VecFunc) { 6477 // Calculate the cost of the vector library call. 6478 // If the corresponding vector call is cheaper, return its cost. 6479 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 6480 TTI::TCK_RecipThroughput); 6481 } 6482 return {IntrinsicCost, LibCost}; 6483 } 6484 6485 void BoUpSLP::TreeEntry::buildAltOpShuffleMask( 6486 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask, 6487 SmallVectorImpl<Value *> *OpScalars, 6488 SmallVectorImpl<Value *> *AltScalars) const { 6489 unsigned Sz = Scalars.size(); 6490 Mask.assign(Sz, PoisonMaskElem); 6491 SmallVector<int> OrderMask; 6492 if (!ReorderIndices.empty()) 6493 inversePermutation(ReorderIndices, OrderMask); 6494 for (unsigned I = 0; I < Sz; ++I) { 6495 unsigned Idx = I; 6496 if (!ReorderIndices.empty()) 6497 Idx = OrderMask[I]; 6498 auto *OpInst = cast<Instruction>(Scalars[Idx]); 6499 if (IsAltOp(OpInst)) { 6500 Mask[I] = Sz + Idx; 6501 if (AltScalars) 6502 AltScalars->push_back(OpInst); 6503 } else { 6504 Mask[I] = Idx; 6505 if (OpScalars) 6506 OpScalars->push_back(OpInst); 6507 } 6508 } 6509 if (!ReuseShuffleIndices.empty()) { 6510 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem); 6511 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) { 6512 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem; 6513 }); 6514 Mask.swap(NewMask); 6515 } 6516 } 6517 6518 static bool isAlternateInstruction(const Instruction *I, 6519 const Instruction *MainOp, 6520 const Instruction *AltOp, 6521 const TargetLibraryInfo &TLI) { 6522 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) { 6523 auto *AltCI = cast<CmpInst>(AltOp); 6524 CmpInst::Predicate MainP = MainCI->getPredicate(); 6525 CmpInst::Predicate AltP = AltCI->getPredicate(); 6526 assert(MainP != AltP && "Expected different main/alternate predicates."); 6527 auto *CI = cast<CmpInst>(I); 6528 if (isCmpSameOrSwapped(MainCI, CI, TLI)) 6529 return false; 6530 if (isCmpSameOrSwapped(AltCI, CI, TLI)) 6531 return true; 6532 CmpInst::Predicate P = CI->getPredicate(); 6533 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P); 6534 6535 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && 6536 "CmpInst expected to match either main or alternate predicate or " 6537 "their swap."); 6538 (void)AltP; 6539 return MainP != P && MainP != SwappedP; 6540 } 6541 return I->getOpcode() == AltOp->getOpcode(); 6542 } 6543 6544 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) { 6545 assert(!Ops.empty()); 6546 const auto *Op0 = Ops.front(); 6547 6548 const bool IsConstant = all_of(Ops, [](Value *V) { 6549 // TODO: We should allow undef elements here 6550 return isConstant(V) && !isa<UndefValue>(V); 6551 }); 6552 const bool IsUniform = all_of(Ops, [=](Value *V) { 6553 // TODO: We should allow undef elements here 6554 return V == Op0; 6555 }); 6556 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) { 6557 // TODO: We should allow undef elements here 6558 if (auto *CI = dyn_cast<ConstantInt>(V)) 6559 return CI->getValue().isPowerOf2(); 6560 return false; 6561 }); 6562 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) { 6563 // TODO: We should allow undef elements here 6564 if (auto *CI = dyn_cast<ConstantInt>(V)) 6565 return CI->getValue().isNegatedPowerOf2(); 6566 return false; 6567 }); 6568 6569 TTI::OperandValueKind VK = TTI::OK_AnyValue; 6570 if (IsConstant && IsUniform) 6571 VK = TTI::OK_UniformConstantValue; 6572 else if (IsConstant) 6573 VK = TTI::OK_NonUniformConstantValue; 6574 else if (IsUniform) 6575 VK = TTI::OK_UniformValue; 6576 6577 TTI::OperandValueProperties VP = TTI::OP_None; 6578 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP; 6579 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP; 6580 6581 return {VK, VP}; 6582 } 6583 6584 namespace { 6585 /// The base class for shuffle instruction emission and shuffle cost estimation. 6586 class BaseShuffleAnalysis { 6587 protected: 6588 /// Checks if the mask is an identity mask. 6589 /// \param IsStrict if is true the function returns false if mask size does 6590 /// not match vector size. 6591 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy, 6592 bool IsStrict) { 6593 int Limit = Mask.size(); 6594 int VF = VecTy->getNumElements(); 6595 int Index = -1; 6596 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit)) 6597 return true; 6598 if (!IsStrict) { 6599 // Consider extract subvector starting from index 0. 6600 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 6601 Index == 0) 6602 return true; 6603 // All VF-size submasks are identity (e.g. 6604 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4). 6605 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) { 6606 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF); 6607 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) || 6608 ShuffleVectorInst::isIdentityMask(Slice, VF); 6609 })) 6610 return true; 6611 } 6612 return false; 6613 } 6614 6615 /// Tries to combine 2 different masks into single one. 6616 /// \param LocalVF Vector length of the permuted input vector. \p Mask may 6617 /// change the size of the vector, \p LocalVF is the original size of the 6618 /// shuffled vector. 6619 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask, 6620 ArrayRef<int> ExtMask) { 6621 unsigned VF = Mask.size(); 6622 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 6623 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 6624 if (ExtMask[I] == PoisonMaskElem) 6625 continue; 6626 int MaskedIdx = Mask[ExtMask[I] % VF]; 6627 NewMask[I] = 6628 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF; 6629 } 6630 Mask.swap(NewMask); 6631 } 6632 6633 /// Looks through shuffles trying to reduce final number of shuffles in the 6634 /// code. The function looks through the previously emitted shuffle 6635 /// instructions and properly mark indices in mask as undef. 6636 /// For example, given the code 6637 /// \code 6638 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 6639 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 6640 /// \endcode 6641 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 6642 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6643 /// <0, 1, 2, 3> for the shuffle. 6644 /// If 2 operands are of different size, the smallest one will be resized and 6645 /// the mask recalculated properly. 6646 /// For example, given the code 6647 /// \code 6648 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 6649 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 6650 /// \endcode 6651 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 6652 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6653 /// <0, 1, 2, 3> for the shuffle. 6654 /// So, it tries to transform permutations to simple vector merge, if 6655 /// possible. 6656 /// \param V The input vector which must be shuffled using the given \p Mask. 6657 /// If the better candidate is found, \p V is set to this best candidate 6658 /// vector. 6659 /// \param Mask The input mask for the shuffle. If the best candidate is found 6660 /// during looking-through-shuffles attempt, it is updated accordingly. 6661 /// \param SinglePermute true if the shuffle operation is originally a 6662 /// single-value-permutation. In this case the look-through-shuffles procedure 6663 /// may look for resizing shuffles as the best candidates. 6664 /// \return true if the shuffle results in the non-resizing identity shuffle 6665 /// (and thus can be ignored), false - otherwise. 6666 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask, 6667 bool SinglePermute) { 6668 Value *Op = V; 6669 ShuffleVectorInst *IdentityOp = nullptr; 6670 SmallVector<int> IdentityMask; 6671 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) { 6672 // Exit if not a fixed vector type or changing size shuffle. 6673 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType()); 6674 if (!SVTy) 6675 break; 6676 // Remember the identity or broadcast mask, if it is not a resizing 6677 // shuffle. If no better candidates are found, this Op and Mask will be 6678 // used in the final shuffle. 6679 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) { 6680 if (!IdentityOp || !SinglePermute || 6681 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) && 6682 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask, 6683 IdentityMask.size()))) { 6684 IdentityOp = SV; 6685 // Store current mask in the IdentityMask so later we did not lost 6686 // this info if IdentityOp is selected as the best candidate for the 6687 // permutation. 6688 IdentityMask.assign(Mask); 6689 } 6690 } 6691 // Remember the broadcast mask. If no better candidates are found, this Op 6692 // and Mask will be used in the final shuffle. 6693 // Zero splat can be used as identity too, since it might be used with 6694 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling. 6695 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is 6696 // expensive, the analysis founds out, that the source vector is just a 6697 // broadcast, this original mask can be transformed to identity mask <0, 6698 // 1, 2, 3>. 6699 // \code 6700 // %0 = shuffle %v, poison, zeroinitalizer 6701 // %res = shuffle %0, poison, <3, 1, 2, 0> 6702 // \endcode 6703 // may be transformed to 6704 // \code 6705 // %0 = shuffle %v, poison, zeroinitalizer 6706 // %res = shuffle %0, poison, <0, 1, 2, 3> 6707 // \endcode 6708 if (SV->isZeroEltSplat()) { 6709 IdentityOp = SV; 6710 IdentityMask.assign(Mask); 6711 } 6712 int LocalVF = Mask.size(); 6713 if (auto *SVOpTy = 6714 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType())) 6715 LocalVF = SVOpTy->getNumElements(); 6716 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem); 6717 for (auto [Idx, I] : enumerate(Mask)) { 6718 if (I == PoisonMaskElem || 6719 static_cast<unsigned>(I) >= SV->getShuffleMask().size()) 6720 continue; 6721 ExtMask[Idx] = SV->getMaskValue(I); 6722 } 6723 bool IsOp1Undef = 6724 isUndefVector(SV->getOperand(0), 6725 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg)) 6726 .all(); 6727 bool IsOp2Undef = 6728 isUndefVector(SV->getOperand(1), 6729 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg)) 6730 .all(); 6731 if (!IsOp1Undef && !IsOp2Undef) { 6732 // Update mask and mark undef elems. 6733 for (int &I : Mask) { 6734 if (I == PoisonMaskElem) 6735 continue; 6736 if (SV->getMaskValue(I % SV->getShuffleMask().size()) == 6737 PoisonMaskElem) 6738 I = PoisonMaskElem; 6739 } 6740 break; 6741 } 6742 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 6743 SV->getShuffleMask().end()); 6744 combineMasks(LocalVF, ShuffleMask, Mask); 6745 Mask.swap(ShuffleMask); 6746 if (IsOp2Undef) 6747 Op = SV->getOperand(0); 6748 else 6749 Op = SV->getOperand(1); 6750 } 6751 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType()); 6752 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) || 6753 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) { 6754 if (IdentityOp) { 6755 V = IdentityOp; 6756 assert(Mask.size() == IdentityMask.size() && 6757 "Expected masks of same sizes."); 6758 // Clear known poison elements. 6759 for (auto [I, Idx] : enumerate(Mask)) 6760 if (Idx == PoisonMaskElem) 6761 IdentityMask[I] = PoisonMaskElem; 6762 Mask.swap(IdentityMask); 6763 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V); 6764 return SinglePermute && 6765 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()), 6766 /*IsStrict=*/true) || 6767 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() && 6768 Shuffle->isZeroEltSplat() && 6769 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size()))); 6770 } 6771 V = Op; 6772 return false; 6773 } 6774 V = Op; 6775 return true; 6776 } 6777 6778 /// Smart shuffle instruction emission, walks through shuffles trees and 6779 /// tries to find the best matching vector for the actual shuffle 6780 /// instruction. 6781 template <typename T, typename ShuffleBuilderTy> 6782 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask, 6783 ShuffleBuilderTy &Builder) { 6784 assert(V1 && "Expected at least one vector value."); 6785 if (V2) 6786 Builder.resizeToMatch(V1, V2); 6787 int VF = Mask.size(); 6788 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 6789 VF = FTy->getNumElements(); 6790 if (V2 && 6791 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) { 6792 // Peek through shuffles. 6793 Value *Op1 = V1; 6794 Value *Op2 = V2; 6795 int VF = 6796 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 6797 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 6798 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 6799 for (int I = 0, E = Mask.size(); I < E; ++I) { 6800 if (Mask[I] < VF) 6801 CombinedMask1[I] = Mask[I]; 6802 else 6803 CombinedMask2[I] = Mask[I] - VF; 6804 } 6805 Value *PrevOp1; 6806 Value *PrevOp2; 6807 do { 6808 PrevOp1 = Op1; 6809 PrevOp2 = Op2; 6810 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false); 6811 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false); 6812 // Check if we have 2 resizing shuffles - need to peek through operands 6813 // again. 6814 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 6815 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) { 6816 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem); 6817 for (auto [Idx, I] : enumerate(CombinedMask1)) { 6818 if (I == PoisonMaskElem) 6819 continue; 6820 ExtMask1[Idx] = SV1->getMaskValue(I); 6821 } 6822 SmallBitVector UseMask1 = buildUseMask( 6823 cast<FixedVectorType>(SV1->getOperand(1)->getType()) 6824 ->getNumElements(), 6825 ExtMask1, UseMask::SecondArg); 6826 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem); 6827 for (auto [Idx, I] : enumerate(CombinedMask2)) { 6828 if (I == PoisonMaskElem) 6829 continue; 6830 ExtMask2[Idx] = SV2->getMaskValue(I); 6831 } 6832 SmallBitVector UseMask2 = buildUseMask( 6833 cast<FixedVectorType>(SV2->getOperand(1)->getType()) 6834 ->getNumElements(), 6835 ExtMask2, UseMask::SecondArg); 6836 if (SV1->getOperand(0)->getType() == 6837 SV2->getOperand(0)->getType() && 6838 SV1->getOperand(0)->getType() != SV1->getType() && 6839 isUndefVector(SV1->getOperand(1), UseMask1).all() && 6840 isUndefVector(SV2->getOperand(1), UseMask2).all()) { 6841 Op1 = SV1->getOperand(0); 6842 Op2 = SV2->getOperand(0); 6843 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 6844 SV1->getShuffleMask().end()); 6845 int LocalVF = ShuffleMask1.size(); 6846 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType())) 6847 LocalVF = FTy->getNumElements(); 6848 combineMasks(LocalVF, ShuffleMask1, CombinedMask1); 6849 CombinedMask1.swap(ShuffleMask1); 6850 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 6851 SV2->getShuffleMask().end()); 6852 LocalVF = ShuffleMask2.size(); 6853 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType())) 6854 LocalVF = FTy->getNumElements(); 6855 combineMasks(LocalVF, ShuffleMask2, CombinedMask2); 6856 CombinedMask2.swap(ShuffleMask2); 6857 } 6858 } 6859 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 6860 Builder.resizeToMatch(Op1, Op2); 6861 VF = std::max(cast<VectorType>(Op1->getType()) 6862 ->getElementCount() 6863 .getKnownMinValue(), 6864 cast<VectorType>(Op2->getType()) 6865 ->getElementCount() 6866 .getKnownMinValue()); 6867 for (int I = 0, E = Mask.size(); I < E; ++I) { 6868 if (CombinedMask2[I] != PoisonMaskElem) { 6869 assert(CombinedMask1[I] == PoisonMaskElem && 6870 "Expected undefined mask element"); 6871 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 6872 } 6873 } 6874 if (Op1 == Op2 && 6875 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) || 6876 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) && 6877 isa<ShuffleVectorInst>(Op1) && 6878 cast<ShuffleVectorInst>(Op1)->getShuffleMask() == 6879 ArrayRef(CombinedMask1)))) 6880 return Builder.createIdentity(Op1); 6881 return Builder.createShuffleVector( 6882 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 6883 CombinedMask1); 6884 } 6885 if (isa<PoisonValue>(V1)) 6886 return Builder.createPoison( 6887 cast<VectorType>(V1->getType())->getElementType(), Mask.size()); 6888 SmallVector<int> NewMask(Mask.begin(), Mask.end()); 6889 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true); 6890 assert(V1 && "Expected non-null value after looking through shuffles."); 6891 6892 if (!IsIdentity) 6893 return Builder.createShuffleVector(V1, NewMask); 6894 return Builder.createIdentity(V1); 6895 } 6896 }; 6897 } // namespace 6898 6899 /// Merges shuffle masks and emits final shuffle instruction, if required. It 6900 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 6901 /// when the actual shuffle instruction is generated only if this is actually 6902 /// required. Otherwise, the shuffle instruction emission is delayed till the 6903 /// end of the process, to reduce the number of emitted instructions and further 6904 /// analysis/transformations. 6905 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { 6906 bool IsFinalized = false; 6907 SmallVector<int> CommonMask; 6908 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors; 6909 const TargetTransformInfo &TTI; 6910 InstructionCost Cost = 0; 6911 SmallDenseSet<Value *> VectorizedVals; 6912 BoUpSLP &R; 6913 SmallPtrSetImpl<Value *> &CheckedExtracts; 6914 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6915 /// While set, still trying to estimate the cost for the same nodes and we 6916 /// can delay actual cost estimation (virtual shuffle instruction emission). 6917 /// May help better estimate the cost if same nodes must be permuted + allows 6918 /// to move most of the long shuffles cost estimation to TTI. 6919 bool SameNodesEstimated = true; 6920 6921 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) { 6922 if (Ty->getScalarType()->isPointerTy()) { 6923 Constant *Res = ConstantExpr::getIntToPtr( 6924 ConstantInt::getAllOnesValue( 6925 IntegerType::get(Ty->getContext(), 6926 DL.getTypeStoreSizeInBits(Ty->getScalarType()))), 6927 Ty->getScalarType()); 6928 if (auto *VTy = dyn_cast<VectorType>(Ty)) 6929 Res = ConstantVector::getSplat(VTy->getElementCount(), Res); 6930 return Res; 6931 } 6932 return Constant::getAllOnesValue(Ty); 6933 } 6934 6935 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) { 6936 if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof)) 6937 return TTI::TCC_Free; 6938 auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size()); 6939 InstructionCost GatherCost = 0; 6940 SmallVector<Value *> Gathers(VL.begin(), VL.end()); 6941 // Improve gather cost for gather of loads, if we can group some of the 6942 // loads into vector loads. 6943 InstructionsState S = getSameOpcode(VL, *R.TLI); 6944 const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType()); 6945 unsigned MinVF = R.getMinVF(2 * Sz); 6946 if (VL.size() > 2 && 6947 ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) || 6948 (InVectors.empty() && 6949 any_of(seq<unsigned>(0, VL.size() / MinVF), 6950 [&](unsigned Idx) { 6951 ArrayRef<Value *> SubVL = VL.slice(Idx * MinVF, MinVF); 6952 InstructionsState S = getSameOpcode(SubVL, *R.TLI); 6953 return S.getOpcode() == Instruction::Load && 6954 !S.isAltShuffle(); 6955 }))) && 6956 !all_of(Gathers, [&](Value *V) { return R.getTreeEntry(V); }) && 6957 !isSplat(Gathers)) { 6958 SetVector<Value *> VectorizedLoads; 6959 SmallVector<LoadInst *> VectorizedStarts; 6960 SmallVector<std::pair<unsigned, unsigned>> ScatterVectorized; 6961 unsigned StartIdx = 0; 6962 unsigned VF = VL.size() / 2; 6963 for (; VF >= MinVF; VF /= 2) { 6964 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 6965 Cnt += VF) { 6966 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 6967 if (S.getOpcode() != Instruction::Load || S.isAltShuffle()) { 6968 InstructionsState SliceS = getSameOpcode(Slice, *R.TLI); 6969 if (SliceS.getOpcode() != Instruction::Load || 6970 SliceS.isAltShuffle()) 6971 continue; 6972 } 6973 if (!VectorizedLoads.count(Slice.front()) && 6974 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 6975 SmallVector<Value *> PointerOps; 6976 OrdersType CurrentOrder; 6977 LoadsState LS = 6978 canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE, 6979 *R.LI, *R.TLI, CurrentOrder, PointerOps); 6980 switch (LS) { 6981 case LoadsState::Vectorize: 6982 case LoadsState::ScatterVectorize: 6983 case LoadsState::PossibleStridedVectorize: 6984 // Mark the vectorized loads so that we don't vectorize them 6985 // again. 6986 // TODO: better handling of loads with reorders. 6987 if (LS == LoadsState::Vectorize && CurrentOrder.empty()) 6988 VectorizedStarts.push_back(cast<LoadInst>(Slice.front())); 6989 else 6990 ScatterVectorized.emplace_back(Cnt, VF); 6991 VectorizedLoads.insert(Slice.begin(), Slice.end()); 6992 // If we vectorized initial block, no need to try to vectorize 6993 // it again. 6994 if (Cnt == StartIdx) 6995 StartIdx += VF; 6996 break; 6997 case LoadsState::Gather: 6998 break; 6999 } 7000 } 7001 } 7002 // Check if the whole array was vectorized already - exit. 7003 if (StartIdx >= VL.size()) 7004 break; 7005 // Found vectorizable parts - exit. 7006 if (!VectorizedLoads.empty()) 7007 break; 7008 } 7009 if (!VectorizedLoads.empty()) { 7010 unsigned NumParts = TTI.getNumberOfParts(VecTy); 7011 bool NeedInsertSubvectorAnalysis = 7012 !NumParts || (VL.size() / VF) > NumParts; 7013 // Get the cost for gathered loads. 7014 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 7015 if (VectorizedLoads.contains(VL[I])) 7016 continue; 7017 GatherCost += getBuildVectorCost(VL.slice(I, VF), Root); 7018 } 7019 // Exclude potentially vectorized loads from list of gathered 7020 // scalars. 7021 Gathers.assign(Gathers.size(), PoisonValue::get(VL.front()->getType())); 7022 // The cost for vectorized loads. 7023 InstructionCost ScalarsCost = 0; 7024 for (Value *V : VectorizedLoads) { 7025 auto *LI = cast<LoadInst>(V); 7026 ScalarsCost += 7027 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), 7028 LI->getAlign(), LI->getPointerAddressSpace(), 7029 CostKind, TTI::OperandValueInfo(), LI); 7030 } 7031 auto *LoadTy = FixedVectorType::get(VL.front()->getType(), VF); 7032 for (LoadInst *LI : VectorizedStarts) { 7033 Align Alignment = LI->getAlign(); 7034 GatherCost += 7035 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 7036 LI->getPointerAddressSpace(), CostKind, 7037 TTI::OperandValueInfo(), LI); 7038 } 7039 for (std::pair<unsigned, unsigned> P : ScatterVectorized) { 7040 auto *LI0 = cast<LoadInst>(VL[P.first]); 7041 Align CommonAlignment = LI0->getAlign(); 7042 for (Value *V : VL.slice(P.first + 1, VF - 1)) 7043 CommonAlignment = 7044 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7045 GatherCost += TTI.getGatherScatterOpCost( 7046 Instruction::Load, LoadTy, LI0->getPointerOperand(), 7047 /*VariableMask=*/false, CommonAlignment, CostKind, LI0); 7048 } 7049 if (NeedInsertSubvectorAnalysis) { 7050 // Add the cost for the subvectors insert. 7051 for (int I = VF, E = VL.size(); I < E; I += VF) 7052 GatherCost += TTI.getShuffleCost(TTI::SK_InsertSubvector, VecTy, 7053 std::nullopt, CostKind, I, LoadTy); 7054 } 7055 GatherCost -= ScalarsCost; 7056 } 7057 } else if (!Root && isSplat(VL)) { 7058 // Found the broadcasting of the single scalar, calculate the cost as 7059 // the broadcast. 7060 const auto *It = 7061 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); }); 7062 assert(It != VL.end() && "Expected at least one non-undef value."); 7063 // Add broadcast for non-identity shuffle only. 7064 bool NeedShuffle = 7065 count(VL, *It) > 1 && 7066 (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof)); 7067 InstructionCost InsertCost = TTI.getVectorInstrCost( 7068 Instruction::InsertElement, VecTy, CostKind, 7069 NeedShuffle ? 0 : std::distance(VL.begin(), It), 7070 PoisonValue::get(VecTy), *It); 7071 return InsertCost + 7072 (NeedShuffle ? TTI.getShuffleCost( 7073 TargetTransformInfo::SK_Broadcast, VecTy, 7074 /*Mask=*/std::nullopt, CostKind, /*Index=*/0, 7075 /*SubTp=*/nullptr, /*Args=*/*It) 7076 : TTI::TCC_Free); 7077 } 7078 return GatherCost + 7079 (all_of(Gathers, UndefValue::classof) 7080 ? TTI::TCC_Free 7081 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers))); 7082 }; 7083 7084 /// Compute the cost of creating a vector containing the extracted values from 7085 /// \p VL. 7086 InstructionCost 7087 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask, 7088 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7089 unsigned NumParts) { 7090 assert(VL.size() > NumParts && "Unexpected scalarized shuffle."); 7091 unsigned NumElts = 7092 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) { 7093 auto *EE = dyn_cast<ExtractElementInst>(V); 7094 if (!EE) 7095 return Sz; 7096 auto *VecTy = cast<FixedVectorType>(EE->getVectorOperandType()); 7097 return std::max(Sz, VecTy->getNumElements()); 7098 }); 7099 unsigned NumSrcRegs = TTI.getNumberOfParts( 7100 FixedVectorType::get(VL.front()->getType(), NumElts)); 7101 if (NumSrcRegs == 0) 7102 NumSrcRegs = 1; 7103 // FIXME: this must be moved to TTI for better estimation. 7104 unsigned EltsPerVector = PowerOf2Ceil(std::max( 7105 divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs))); 7106 auto CheckPerRegistersShuffle = 7107 [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> { 7108 DenseSet<int> RegIndices; 7109 // Check that if trying to permute same single/2 input vectors. 7110 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc; 7111 int FirstRegId = -1; 7112 for (int &I : Mask) { 7113 if (I == PoisonMaskElem) 7114 continue; 7115 int RegId = (I / NumElts) * NumParts + (I % NumElts) / EltsPerVector; 7116 if (FirstRegId < 0) 7117 FirstRegId = RegId; 7118 RegIndices.insert(RegId); 7119 if (RegIndices.size() > 2) 7120 return std::nullopt; 7121 if (RegIndices.size() == 2) 7122 ShuffleKind = TTI::SK_PermuteTwoSrc; 7123 I = (I % NumElts) % EltsPerVector + 7124 (RegId == FirstRegId ? 0 : EltsPerVector); 7125 } 7126 return ShuffleKind; 7127 }; 7128 InstructionCost Cost = 0; 7129 7130 // Process extracts in blocks of EltsPerVector to check if the source vector 7131 // operand can be re-used directly. If not, add the cost of creating a 7132 // shuffle to extract the values into a vector register. 7133 for (unsigned Part = 0; Part < NumParts; ++Part) { 7134 if (!ShuffleKinds[Part]) 7135 continue; 7136 ArrayRef<int> MaskSlice = 7137 Mask.slice(Part * EltsPerVector, 7138 (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0) 7139 ? Mask.size() % EltsPerVector 7140 : EltsPerVector); 7141 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem); 7142 copy(MaskSlice, SubMask.begin()); 7143 std::optional<TTI::ShuffleKind> RegShuffleKind = 7144 CheckPerRegistersShuffle(SubMask); 7145 if (!RegShuffleKind) { 7146 Cost += TTI.getShuffleCost( 7147 *ShuffleKinds[Part], 7148 FixedVectorType::get(VL.front()->getType(), NumElts), MaskSlice); 7149 continue; 7150 } 7151 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc || 7152 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) { 7153 Cost += TTI.getShuffleCost( 7154 *RegShuffleKind, 7155 FixedVectorType::get(VL.front()->getType(), EltsPerVector), 7156 SubMask); 7157 } 7158 } 7159 return Cost; 7160 } 7161 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 7162 /// shuffle emission. 7163 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 7164 ArrayRef<int> Mask) { 7165 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7166 if (Mask[Idx] != PoisonMaskElem) 7167 CommonMask[Idx] = Idx; 7168 } 7169 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given 7170 /// mask \p Mask, register number \p Part, that includes \p SliceSize 7171 /// elements. 7172 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2, 7173 ArrayRef<int> Mask, unsigned Part, 7174 unsigned SliceSize) { 7175 if (SameNodesEstimated) { 7176 // Delay the cost estimation if the same nodes are reshuffling. 7177 // If we already requested the cost of reshuffling of E1 and E2 before, no 7178 // need to estimate another cost with the sub-Mask, instead include this 7179 // sub-Mask into the CommonMask to estimate it later and avoid double cost 7180 // estimation. 7181 if ((InVectors.size() == 2 && 7182 InVectors.front().get<const TreeEntry *>() == &E1 && 7183 InVectors.back().get<const TreeEntry *>() == E2) || 7184 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) { 7185 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize), 7186 [](int Idx) { return Idx == PoisonMaskElem; }) && 7187 "Expected all poisoned elements."); 7188 ArrayRef<int> SubMask = 7189 ArrayRef(Mask).slice(Part * SliceSize, SliceSize); 7190 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part)); 7191 return; 7192 } 7193 // Found non-matching nodes - need to estimate the cost for the matched 7194 // and transform mask. 7195 Cost += createShuffle(InVectors.front(), 7196 InVectors.size() == 1 ? nullptr : InVectors.back(), 7197 CommonMask); 7198 transformMaskAfterShuffle(CommonMask, CommonMask); 7199 } 7200 SameNodesEstimated = false; 7201 Cost += createShuffle(&E1, E2, Mask); 7202 transformMaskAfterShuffle(CommonMask, Mask); 7203 } 7204 7205 class ShuffleCostBuilder { 7206 const TargetTransformInfo &TTI; 7207 7208 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) { 7209 int Index = -1; 7210 return Mask.empty() || 7211 (VF == Mask.size() && 7212 ShuffleVectorInst::isIdentityMask(Mask, VF)) || 7213 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 7214 Index == 0); 7215 } 7216 7217 public: 7218 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {} 7219 ~ShuffleCostBuilder() = default; 7220 InstructionCost createShuffleVector(Value *V1, Value *, 7221 ArrayRef<int> Mask) const { 7222 // Empty mask or identity mask are free. 7223 unsigned VF = 7224 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7225 if (isEmptyOrIdentity(Mask, VF)) 7226 return TTI::TCC_Free; 7227 return TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, 7228 cast<VectorType>(V1->getType()), Mask); 7229 } 7230 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const { 7231 // Empty mask or identity mask are free. 7232 unsigned VF = 7233 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7234 if (isEmptyOrIdentity(Mask, VF)) 7235 return TTI::TCC_Free; 7236 return TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, 7237 cast<VectorType>(V1->getType()), Mask); 7238 } 7239 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; } 7240 InstructionCost createPoison(Type *Ty, unsigned VF) const { 7241 return TTI::TCC_Free; 7242 } 7243 void resizeToMatch(Value *&, Value *&) const {} 7244 }; 7245 7246 /// Smart shuffle instruction emission, walks through shuffles trees and 7247 /// tries to find the best matching vector for the actual shuffle 7248 /// instruction. 7249 InstructionCost 7250 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1, 7251 const PointerUnion<Value *, const TreeEntry *> &P2, 7252 ArrayRef<int> Mask) { 7253 ShuffleCostBuilder Builder(TTI); 7254 SmallVector<int> CommonMask(Mask.begin(), Mask.end()); 7255 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>(); 7256 unsigned CommonVF = Mask.size(); 7257 if (!V1 && !V2 && !P2.isNull()) { 7258 // Shuffle 2 entry nodes. 7259 const TreeEntry *E = P1.get<const TreeEntry *>(); 7260 unsigned VF = E->getVectorFactor(); 7261 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7262 CommonVF = std::max(VF, E2->getVectorFactor()); 7263 assert(all_of(Mask, 7264 [=](int Idx) { 7265 return Idx < 2 * static_cast<int>(CommonVF); 7266 }) && 7267 "All elements in mask must be less than 2 * CommonVF."); 7268 if (E->Scalars.size() == E2->Scalars.size()) { 7269 SmallVector<int> EMask = E->getCommonMask(); 7270 SmallVector<int> E2Mask = E2->getCommonMask(); 7271 if (!EMask.empty() || !E2Mask.empty()) { 7272 for (int &Idx : CommonMask) { 7273 if (Idx == PoisonMaskElem) 7274 continue; 7275 if (Idx < static_cast<int>(CommonVF) && !EMask.empty()) 7276 Idx = EMask[Idx]; 7277 else if (Idx >= static_cast<int>(CommonVF)) 7278 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) + 7279 E->Scalars.size(); 7280 } 7281 } 7282 CommonVF = E->Scalars.size(); 7283 } 7284 V1 = Constant::getNullValue( 7285 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7286 V2 = getAllOnesValue( 7287 *R.DL, FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7288 } else if (!V1 && P2.isNull()) { 7289 // Shuffle single entry node. 7290 const TreeEntry *E = P1.get<const TreeEntry *>(); 7291 unsigned VF = E->getVectorFactor(); 7292 CommonVF = VF; 7293 assert( 7294 all_of(Mask, 7295 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7296 "All elements in mask must be less than CommonVF."); 7297 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) { 7298 SmallVector<int> EMask = E->getCommonMask(); 7299 assert(!EMask.empty() && "Expected non-empty common mask."); 7300 for (int &Idx : CommonMask) { 7301 if (Idx != PoisonMaskElem) 7302 Idx = EMask[Idx]; 7303 } 7304 CommonVF = E->Scalars.size(); 7305 } 7306 V1 = Constant::getNullValue( 7307 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7308 } else if (V1 && P2.isNull()) { 7309 // Shuffle single vector. 7310 CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7311 assert( 7312 all_of(Mask, 7313 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7314 "All elements in mask must be less than CommonVF."); 7315 } else if (V1 && !V2) { 7316 // Shuffle vector and tree node. 7317 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7318 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7319 CommonVF = std::max(VF, E2->getVectorFactor()); 7320 assert(all_of(Mask, 7321 [=](int Idx) { 7322 return Idx < 2 * static_cast<int>(CommonVF); 7323 }) && 7324 "All elements in mask must be less than 2 * CommonVF."); 7325 if (E2->Scalars.size() == VF && VF != CommonVF) { 7326 SmallVector<int> E2Mask = E2->getCommonMask(); 7327 assert(!E2Mask.empty() && "Expected non-empty common mask."); 7328 for (int &Idx : CommonMask) { 7329 if (Idx == PoisonMaskElem) 7330 continue; 7331 if (Idx >= static_cast<int>(CommonVF)) 7332 Idx = E2Mask[Idx - CommonVF] + VF; 7333 } 7334 CommonVF = VF; 7335 } 7336 V1 = Constant::getNullValue( 7337 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7338 V2 = getAllOnesValue( 7339 *R.DL, 7340 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7341 } else if (!V1 && V2) { 7342 // Shuffle vector and tree node. 7343 unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 7344 const TreeEntry *E1 = P1.get<const TreeEntry *>(); 7345 CommonVF = std::max(VF, E1->getVectorFactor()); 7346 assert(all_of(Mask, 7347 [=](int Idx) { 7348 return Idx < 2 * static_cast<int>(CommonVF); 7349 }) && 7350 "All elements in mask must be less than 2 * CommonVF."); 7351 if (E1->Scalars.size() == VF && VF != CommonVF) { 7352 SmallVector<int> E1Mask = E1->getCommonMask(); 7353 assert(!E1Mask.empty() && "Expected non-empty common mask."); 7354 for (int &Idx : CommonMask) { 7355 if (Idx == PoisonMaskElem) 7356 continue; 7357 if (Idx >= static_cast<int>(CommonVF)) 7358 Idx = E1Mask[Idx - CommonVF] + VF; 7359 } 7360 CommonVF = VF; 7361 } 7362 V1 = Constant::getNullValue( 7363 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7364 V2 = getAllOnesValue( 7365 *R.DL, 7366 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7367 } else { 7368 assert(V1 && V2 && "Expected both vectors."); 7369 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7370 CommonVF = 7371 std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements()); 7372 assert(all_of(Mask, 7373 [=](int Idx) { 7374 return Idx < 2 * static_cast<int>(CommonVF); 7375 }) && 7376 "All elements in mask must be less than 2 * CommonVF."); 7377 if (V1->getType() != V2->getType()) { 7378 V1 = Constant::getNullValue(FixedVectorType::get( 7379 cast<FixedVectorType>(V1->getType())->getElementType(), CommonVF)); 7380 V2 = getAllOnesValue( 7381 *R.DL, FixedVectorType::get( 7382 cast<FixedVectorType>(V1->getType())->getElementType(), 7383 CommonVF)); 7384 } 7385 } 7386 InVectors.front() = Constant::getNullValue(FixedVectorType::get( 7387 cast<FixedVectorType>(V1->getType())->getElementType(), 7388 CommonMask.size())); 7389 if (InVectors.size() == 2) 7390 InVectors.pop_back(); 7391 return BaseShuffleAnalysis::createShuffle<InstructionCost>( 7392 V1, V2, CommonMask, Builder); 7393 } 7394 7395 public: 7396 ShuffleCostEstimator(TargetTransformInfo &TTI, 7397 ArrayRef<Value *> VectorizedVals, BoUpSLP &R, 7398 SmallPtrSetImpl<Value *> &CheckedExtracts) 7399 : TTI(TTI), VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), 7400 R(R), CheckedExtracts(CheckedExtracts) {} 7401 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 7402 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7403 unsigned NumParts, bool &UseVecBaseAsInput) { 7404 UseVecBaseAsInput = false; 7405 if (Mask.empty()) 7406 return nullptr; 7407 Value *VecBase = nullptr; 7408 ArrayRef<Value *> VL = E->Scalars; 7409 // If the resulting type is scalarized, do not adjust the cost. 7410 if (NumParts == VL.size()) 7411 return nullptr; 7412 // Check if it can be considered reused if same extractelements were 7413 // vectorized already. 7414 bool PrevNodeFound = any_of( 7415 ArrayRef(R.VectorizableTree).take_front(E->Idx), 7416 [&](const std::unique_ptr<TreeEntry> &TE) { 7417 return ((!TE->isAltShuffle() && 7418 TE->getOpcode() == Instruction::ExtractElement) || 7419 TE->State == TreeEntry::NeedToGather) && 7420 all_of(enumerate(TE->Scalars), [&](auto &&Data) { 7421 return VL.size() > Data.index() && 7422 (Mask[Data.index()] == PoisonMaskElem || 7423 isa<UndefValue>(VL[Data.index()]) || 7424 Data.value() == VL[Data.index()]); 7425 }); 7426 }); 7427 SmallPtrSet<Value *, 4> UniqueBases; 7428 unsigned SliceSize = VL.size() / NumParts; 7429 for (unsigned Part = 0; Part < NumParts; ++Part) { 7430 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 7431 for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) { 7432 // Ignore non-extractelement scalars. 7433 if (isa<UndefValue>(V) || 7434 (!SubMask.empty() && SubMask[I] == PoisonMaskElem)) 7435 continue; 7436 // If all users of instruction are going to be vectorized and this 7437 // instruction itself is not going to be vectorized, consider this 7438 // instruction as dead and remove its cost from the final cost of the 7439 // vectorized tree. 7440 // Also, avoid adjusting the cost for extractelements with multiple uses 7441 // in different graph entries. 7442 auto *EE = cast<ExtractElementInst>(V); 7443 VecBase = EE->getVectorOperand(); 7444 UniqueBases.insert(VecBase); 7445 const TreeEntry *VE = R.getTreeEntry(V); 7446 if (!CheckedExtracts.insert(V).second || 7447 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) || 7448 (VE && VE != E)) 7449 continue; 7450 std::optional<unsigned> EEIdx = getExtractIndex(EE); 7451 if (!EEIdx) 7452 continue; 7453 unsigned Idx = *EEIdx; 7454 // Take credit for instruction that will become dead. 7455 if (EE->hasOneUse() || !PrevNodeFound) { 7456 Instruction *Ext = EE->user_back(); 7457 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) { 7458 return isa<GetElementPtrInst>(U); 7459 })) { 7460 // Use getExtractWithExtendCost() to calculate the cost of 7461 // extractelement/ext pair. 7462 Cost -= 7463 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 7464 EE->getVectorOperandType(), Idx); 7465 // Add back the cost of s|zext which is subtracted separately. 7466 Cost += TTI.getCastInstrCost( 7467 Ext->getOpcode(), Ext->getType(), EE->getType(), 7468 TTI::getCastContextHint(Ext), CostKind, Ext); 7469 continue; 7470 } 7471 } 7472 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(), 7473 CostKind, Idx); 7474 } 7475 } 7476 // Check that gather of extractelements can be represented as just a 7477 // shuffle of a single/two vectors the scalars are extracted from. 7478 // Found the bunch of extractelement instructions that must be gathered 7479 // into a vector and can be represented as a permutation elements in a 7480 // single input vector or of 2 input vectors. 7481 // Done for reused if same extractelements were vectorized already. 7482 if (!PrevNodeFound) 7483 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts); 7484 InVectors.assign(1, E); 7485 CommonMask.assign(Mask.begin(), Mask.end()); 7486 transformMaskAfterShuffle(CommonMask, CommonMask); 7487 SameNodesEstimated = false; 7488 if (NumParts != 1 && UniqueBases.size() != 1) { 7489 UseVecBaseAsInput = true; 7490 VecBase = Constant::getNullValue( 7491 FixedVectorType::get(VL.front()->getType(), CommonMask.size())); 7492 } 7493 return VecBase; 7494 } 7495 /// Checks if the specified entry \p E needs to be delayed because of its 7496 /// dependency nodes. 7497 std::optional<InstructionCost> 7498 needToDelay(const TreeEntry *, 7499 ArrayRef<SmallVector<const TreeEntry *>>) const { 7500 // No need to delay the cost estimation during analysis. 7501 return std::nullopt; 7502 } 7503 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 7504 if (&E1 == &E2) { 7505 assert(all_of(Mask, 7506 [&](int Idx) { 7507 return Idx < static_cast<int>(E1.getVectorFactor()); 7508 }) && 7509 "Expected single vector shuffle mask."); 7510 add(E1, Mask); 7511 return; 7512 } 7513 if (InVectors.empty()) { 7514 CommonMask.assign(Mask.begin(), Mask.end()); 7515 InVectors.assign({&E1, &E2}); 7516 return; 7517 } 7518 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7519 auto *MaskVecTy = 7520 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7521 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7522 if (NumParts == 0 || NumParts >= Mask.size()) 7523 NumParts = 1; 7524 unsigned SliceSize = Mask.size() / NumParts; 7525 const auto *It = 7526 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7527 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7528 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize); 7529 } 7530 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 7531 if (InVectors.empty()) { 7532 CommonMask.assign(Mask.begin(), Mask.end()); 7533 InVectors.assign(1, &E1); 7534 return; 7535 } 7536 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7537 auto *MaskVecTy = 7538 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7539 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7540 if (NumParts == 0 || NumParts >= Mask.size()) 7541 NumParts = 1; 7542 unsigned SliceSize = Mask.size() / NumParts; 7543 const auto *It = 7544 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7545 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7546 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize); 7547 if (!SameNodesEstimated && InVectors.size() == 1) 7548 InVectors.emplace_back(&E1); 7549 } 7550 /// Adds 2 input vectors and the mask for their shuffling. 7551 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 7552 // May come only for shuffling of 2 vectors with extractelements, already 7553 // handled in adjustExtracts. 7554 assert(InVectors.size() == 1 && 7555 all_of(enumerate(CommonMask), 7556 [&](auto P) { 7557 if (P.value() == PoisonMaskElem) 7558 return Mask[P.index()] == PoisonMaskElem; 7559 auto *EI = 7560 cast<ExtractElementInst>(InVectors.front() 7561 .get<const TreeEntry *>() 7562 ->Scalars[P.index()]); 7563 return EI->getVectorOperand() == V1 || 7564 EI->getVectorOperand() == V2; 7565 }) && 7566 "Expected extractelement vectors."); 7567 } 7568 /// Adds another one input vector and the mask for the shuffling. 7569 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) { 7570 if (InVectors.empty()) { 7571 assert(CommonMask.empty() && !ForExtracts && 7572 "Expected empty input mask/vectors."); 7573 CommonMask.assign(Mask.begin(), Mask.end()); 7574 InVectors.assign(1, V1); 7575 return; 7576 } 7577 if (ForExtracts) { 7578 // No need to add vectors here, already handled them in adjustExtracts. 7579 assert(InVectors.size() == 1 && 7580 InVectors.front().is<const TreeEntry *>() && !CommonMask.empty() && 7581 all_of(enumerate(CommonMask), 7582 [&](auto P) { 7583 Value *Scalar = InVectors.front() 7584 .get<const TreeEntry *>() 7585 ->Scalars[P.index()]; 7586 if (P.value() == PoisonMaskElem) 7587 return P.value() == Mask[P.index()] || 7588 isa<UndefValue>(Scalar); 7589 if (isa<Constant>(V1)) 7590 return true; 7591 auto *EI = cast<ExtractElementInst>(Scalar); 7592 return EI->getVectorOperand() == V1; 7593 }) && 7594 "Expected only tree entry for extractelement vectors."); 7595 return; 7596 } 7597 assert(!InVectors.empty() && !CommonMask.empty() && 7598 "Expected only tree entries from extracts/reused buildvectors."); 7599 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7600 if (InVectors.size() == 2) { 7601 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask); 7602 transformMaskAfterShuffle(CommonMask, CommonMask); 7603 VF = std::max<unsigned>(VF, CommonMask.size()); 7604 } else if (const auto *InTE = 7605 InVectors.front().dyn_cast<const TreeEntry *>()) { 7606 VF = std::max(VF, InTE->getVectorFactor()); 7607 } else { 7608 VF = std::max( 7609 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType()) 7610 ->getNumElements()); 7611 } 7612 InVectors.push_back(V1); 7613 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7614 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 7615 CommonMask[Idx] = Mask[Idx] + VF; 7616 } 7617 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 7618 Value *Root = nullptr) { 7619 Cost += getBuildVectorCost(VL, Root); 7620 if (!Root) { 7621 // FIXME: Need to find a way to avoid use of getNullValue here. 7622 SmallVector<Constant *> Vals; 7623 unsigned VF = VL.size(); 7624 if (MaskVF != 0) 7625 VF = std::min(VF, MaskVF); 7626 for (Value *V : VL.take_front(VF)) { 7627 if (isa<UndefValue>(V)) { 7628 Vals.push_back(cast<Constant>(V)); 7629 continue; 7630 } 7631 Vals.push_back(Constant::getNullValue(V->getType())); 7632 } 7633 return ConstantVector::get(Vals); 7634 } 7635 return ConstantVector::getSplat( 7636 ElementCount::getFixed( 7637 cast<FixedVectorType>(Root->getType())->getNumElements()), 7638 getAllOnesValue(*R.DL, VL.front()->getType())); 7639 } 7640 InstructionCost createFreeze(InstructionCost Cost) { return Cost; } 7641 /// Finalize emission of the shuffles. 7642 InstructionCost 7643 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 7644 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 7645 IsFinalized = true; 7646 if (Action) { 7647 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front(); 7648 if (InVectors.size() == 2) 7649 Cost += createShuffle(Vec, InVectors.back(), CommonMask); 7650 else 7651 Cost += createShuffle(Vec, nullptr, CommonMask); 7652 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7653 if (CommonMask[Idx] != PoisonMaskElem) 7654 CommonMask[Idx] = Idx; 7655 assert(VF > 0 && 7656 "Expected vector length for the final value before action."); 7657 Value *V = Vec.get<Value *>(); 7658 Action(V, CommonMask); 7659 InVectors.front() = V; 7660 } 7661 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true); 7662 if (CommonMask.empty()) { 7663 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 7664 return Cost; 7665 } 7666 return Cost + 7667 createShuffle(InVectors.front(), 7668 InVectors.size() == 2 ? InVectors.back() : nullptr, 7669 CommonMask); 7670 } 7671 7672 ~ShuffleCostEstimator() { 7673 assert((IsFinalized || CommonMask.empty()) && 7674 "Shuffle construction must be finalized."); 7675 } 7676 }; 7677 7678 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E, 7679 unsigned Idx) const { 7680 Value *Op = E->getOperand(Idx).front(); 7681 if (const TreeEntry *TE = getTreeEntry(Op)) { 7682 if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) { 7683 return EI.EdgeIdx == Idx && EI.UserTE == E; 7684 }) != TE->UserTreeIndices.end()) 7685 return TE; 7686 auto MIt = MultiNodeScalars.find(Op); 7687 if (MIt != MultiNodeScalars.end()) { 7688 for (const TreeEntry *TE : MIt->second) { 7689 if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7690 return EI.EdgeIdx == Idx && EI.UserTE == E; 7691 }) != TE->UserTreeIndices.end()) 7692 return TE; 7693 } 7694 } 7695 } 7696 const auto *It = 7697 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 7698 return TE->State == TreeEntry::NeedToGather && 7699 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7700 return EI.EdgeIdx == Idx && EI.UserTE == E; 7701 }) != TE->UserTreeIndices.end(); 7702 }); 7703 assert(It != VectorizableTree.end() && "Expected vectorizable entry."); 7704 return It->get(); 7705 } 7706 7707 InstructionCost 7708 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, 7709 SmallPtrSetImpl<Value *> &CheckedExtracts) { 7710 ArrayRef<Value *> VL = E->Scalars; 7711 7712 Type *ScalarTy = VL[0]->getType(); 7713 if (E->State != TreeEntry::NeedToGather) { 7714 if (auto *SI = dyn_cast<StoreInst>(VL[0])) 7715 ScalarTy = SI->getValueOperand()->getType(); 7716 else if (auto *CI = dyn_cast<CmpInst>(VL[0])) 7717 ScalarTy = CI->getOperand(0)->getType(); 7718 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7719 ScalarTy = IE->getOperand(1)->getType(); 7720 } 7721 if (!FixedVectorType::isValidElementType(ScalarTy)) 7722 return InstructionCost::getInvalid(); 7723 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7724 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7725 7726 // If we have computed a smaller type for the expression, update VecTy so 7727 // that the costs will be accurate. 7728 auto It = MinBWs.find(E); 7729 if (It != MinBWs.end()) { 7730 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 7731 VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7732 } 7733 unsigned EntryVF = E->getVectorFactor(); 7734 auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF); 7735 7736 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7737 if (E->State == TreeEntry::NeedToGather) { 7738 if (allConstant(VL)) 7739 return 0; 7740 if (isa<InsertElementInst>(VL[0])) 7741 return InstructionCost::getInvalid(); 7742 return processBuildVector<ShuffleCostEstimator, InstructionCost>( 7743 E, *TTI, VectorizedVals, *this, CheckedExtracts); 7744 } 7745 InstructionCost CommonCost = 0; 7746 SmallVector<int> Mask; 7747 if (!E->ReorderIndices.empty() && 7748 E->State != TreeEntry::PossibleStridedVectorize) { 7749 SmallVector<int> NewMask; 7750 if (E->getOpcode() == Instruction::Store) { 7751 // For stores the order is actually a mask. 7752 NewMask.resize(E->ReorderIndices.size()); 7753 copy(E->ReorderIndices, NewMask.begin()); 7754 } else { 7755 inversePermutation(E->ReorderIndices, NewMask); 7756 } 7757 ::addMask(Mask, NewMask); 7758 } 7759 if (NeedToShuffleReuses) 7760 ::addMask(Mask, E->ReuseShuffleIndices); 7761 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 7762 CommonCost = 7763 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 7764 assert((E->State == TreeEntry::Vectorize || 7765 E->State == TreeEntry::ScatterVectorize || 7766 E->State == TreeEntry::PossibleStridedVectorize) && 7767 "Unhandled state"); 7768 assert(E->getOpcode() && 7769 ((allSameType(VL) && allSameBlock(VL)) || 7770 (E->getOpcode() == Instruction::GetElementPtr && 7771 E->getMainOp()->getType()->isPointerTy())) && 7772 "Invalid VL"); 7773 Instruction *VL0 = E->getMainOp(); 7774 unsigned ShuffleOrOp = 7775 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7776 SetVector<Value *> UniqueValues(VL.begin(), VL.end()); 7777 const unsigned Sz = UniqueValues.size(); 7778 SmallBitVector UsedScalars(Sz, false); 7779 for (unsigned I = 0; I < Sz; ++I) { 7780 if (getTreeEntry(UniqueValues[I]) == E) 7781 continue; 7782 UsedScalars.set(I); 7783 } 7784 auto GetCastContextHint = [&](Value *V) { 7785 if (const TreeEntry *OpTE = getTreeEntry(V)) { 7786 if (OpTE->State == TreeEntry::ScatterVectorize) 7787 return TTI::CastContextHint::GatherScatter; 7788 if (OpTE->State == TreeEntry::Vectorize && 7789 OpTE->getOpcode() == Instruction::Load && !OpTE->isAltShuffle()) { 7790 if (OpTE->ReorderIndices.empty()) 7791 return TTI::CastContextHint::Normal; 7792 SmallVector<int> Mask; 7793 inversePermutation(OpTE->ReorderIndices, Mask); 7794 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size())) 7795 return TTI::CastContextHint::Reversed; 7796 } 7797 } else { 7798 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI); 7799 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle()) 7800 return TTI::CastContextHint::GatherScatter; 7801 } 7802 return TTI::CastContextHint::None; 7803 }; 7804 auto GetCostDiff = 7805 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost, 7806 function_ref<InstructionCost(InstructionCost)> VectorCost) { 7807 // Calculate the cost of this instruction. 7808 InstructionCost ScalarCost = 0; 7809 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) { 7810 // For some of the instructions no need to calculate cost for each 7811 // particular instruction, we can use the cost of the single 7812 // instruction x total number of scalar instructions. 7813 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0); 7814 } else { 7815 for (unsigned I = 0; I < Sz; ++I) { 7816 if (UsedScalars.test(I)) 7817 continue; 7818 ScalarCost += ScalarEltCost(I); 7819 } 7820 } 7821 7822 InstructionCost VecCost = VectorCost(CommonCost); 7823 // Check if the current node must be resized, if the parent node is not 7824 // resized. 7825 if (!UnaryInstruction::isCast(E->getOpcode()) && E->Idx != 0) { 7826 const EdgeInfo &EI = E->UserTreeIndices.front(); 7827 if ((EI.UserTE->getOpcode() != Instruction::Select || 7828 EI.EdgeIdx != 0) && 7829 It != MinBWs.end()) { 7830 auto UserBWIt = MinBWs.find(EI.UserTE); 7831 Type *UserScalarTy = 7832 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType(); 7833 if (UserBWIt != MinBWs.end()) 7834 UserScalarTy = IntegerType::get(ScalarTy->getContext(), 7835 UserBWIt->second.first); 7836 if (ScalarTy != UserScalarTy) { 7837 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 7838 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy); 7839 unsigned VecOpcode; 7840 auto *SrcVecTy = 7841 FixedVectorType::get(UserScalarTy, E->getVectorFactor()); 7842 if (BWSz > SrcBWSz) 7843 VecOpcode = Instruction::Trunc; 7844 else 7845 VecOpcode = 7846 It->second.second ? Instruction::SExt : Instruction::ZExt; 7847 TTI::CastContextHint CCH = GetCastContextHint(VL0); 7848 VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, 7849 CostKind); 7850 ScalarCost += 7851 Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy, 7852 CCH, CostKind); 7853 } 7854 } 7855 } 7856 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost, 7857 ScalarCost, "Calculated costs for Tree")); 7858 return VecCost - ScalarCost; 7859 }; 7860 // Calculate cost difference from vectorizing set of GEPs. 7861 // Negative value means vectorizing is profitable. 7862 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) { 7863 InstructionCost ScalarCost = 0; 7864 InstructionCost VecCost = 0; 7865 // Here we differentiate two cases: (1) when Ptrs represent a regular 7866 // vectorization tree node (as they are pointer arguments of scattered 7867 // loads) or (2) when Ptrs are the arguments of loads or stores being 7868 // vectorized as plane wide unit-stride load/store since all the 7869 // loads/stores are known to be from/to adjacent locations. 7870 assert(E->State == TreeEntry::Vectorize && 7871 "Entry state expected to be Vectorize here."); 7872 if (isa<LoadInst, StoreInst>(VL0)) { 7873 // Case 2: estimate costs for pointer related costs when vectorizing to 7874 // a wide load/store. 7875 // Scalar cost is estimated as a set of pointers with known relationship 7876 // between them. 7877 // For vector code we will use BasePtr as argument for the wide load/store 7878 // but we also need to account all the instructions which are going to 7879 // stay in vectorized code due to uses outside of these scalar 7880 // loads/stores. 7881 ScalarCost = TTI->getPointersChainCost( 7882 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy, 7883 CostKind); 7884 7885 SmallVector<const Value *> PtrsRetainedInVecCode; 7886 for (Value *V : Ptrs) { 7887 if (V == BasePtr) { 7888 PtrsRetainedInVecCode.push_back(V); 7889 continue; 7890 } 7891 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7892 // For simplicity assume Ptr to stay in vectorized code if it's not a 7893 // GEP instruction. We don't care since it's cost considered free. 7894 // TODO: We should check for any uses outside of vectorizable tree 7895 // rather than just single use. 7896 if (!Ptr || !Ptr->hasOneUse()) 7897 PtrsRetainedInVecCode.push_back(V); 7898 } 7899 7900 if (PtrsRetainedInVecCode.size() == Ptrs.size()) { 7901 // If all pointers stay in vectorized code then we don't have 7902 // any savings on that. 7903 LLVM_DEBUG(dumpTreeCosts(E, 0, ScalarCost, ScalarCost, 7904 "Calculated GEPs cost for Tree")); 7905 return InstructionCost{TTI::TCC_Free}; 7906 } 7907 VecCost = TTI->getPointersChainCost( 7908 PtrsRetainedInVecCode, BasePtr, 7909 TTI::PointersChainInfo::getKnownStride(), VecTy, CostKind); 7910 } else { 7911 // Case 1: Ptrs are the arguments of loads that we are going to transform 7912 // into masked gather load intrinsic. 7913 // All the scalar GEPs will be removed as a result of vectorization. 7914 // For any external uses of some lanes extract element instructions will 7915 // be generated (which cost is estimated separately). 7916 TTI::PointersChainInfo PtrsInfo = 7917 all_of(Ptrs, 7918 [](const Value *V) { 7919 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7920 return Ptr && !Ptr->hasAllConstantIndices(); 7921 }) 7922 ? TTI::PointersChainInfo::getUnknownStride() 7923 : TTI::PointersChainInfo::getKnownStride(); 7924 7925 ScalarCost = TTI->getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, 7926 CostKind); 7927 if (auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr)) { 7928 SmallVector<const Value *> Indices(BaseGEP->indices()); 7929 VecCost = TTI->getGEPCost(BaseGEP->getSourceElementType(), 7930 BaseGEP->getPointerOperand(), Indices, VecTy, 7931 CostKind); 7932 } 7933 } 7934 7935 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost, 7936 "Calculated GEPs cost for Tree")); 7937 7938 return VecCost - ScalarCost; 7939 }; 7940 7941 switch (ShuffleOrOp) { 7942 case Instruction::PHI: { 7943 // Count reused scalars. 7944 InstructionCost ScalarCost = 0; 7945 SmallPtrSet<const TreeEntry *, 4> CountedOps; 7946 for (Value *V : UniqueValues) { 7947 auto *PHI = dyn_cast<PHINode>(V); 7948 if (!PHI) 7949 continue; 7950 7951 ValueList Operands(PHI->getNumIncomingValues(), nullptr); 7952 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) { 7953 Value *Op = PHI->getIncomingValue(I); 7954 Operands[I] = Op; 7955 } 7956 if (const TreeEntry *OpTE = getTreeEntry(Operands.front())) 7957 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second) 7958 if (!OpTE->ReuseShuffleIndices.empty()) 7959 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() - 7960 OpTE->Scalars.size()); 7961 } 7962 7963 return CommonCost - ScalarCost; 7964 } 7965 case Instruction::ExtractValue: 7966 case Instruction::ExtractElement: { 7967 auto GetScalarCost = [&](unsigned Idx) { 7968 auto *I = cast<Instruction>(UniqueValues[Idx]); 7969 VectorType *SrcVecTy; 7970 if (ShuffleOrOp == Instruction::ExtractElement) { 7971 auto *EE = cast<ExtractElementInst>(I); 7972 SrcVecTy = EE->getVectorOperandType(); 7973 } else { 7974 auto *EV = cast<ExtractValueInst>(I); 7975 Type *AggregateTy = EV->getAggregateOperand()->getType(); 7976 unsigned NumElts; 7977 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy)) 7978 NumElts = ATy->getNumElements(); 7979 else 7980 NumElts = AggregateTy->getStructNumElements(); 7981 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts); 7982 } 7983 if (I->hasOneUse()) { 7984 Instruction *Ext = I->user_back(); 7985 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 7986 all_of(Ext->users(), 7987 [](User *U) { return isa<GetElementPtrInst>(U); })) { 7988 // Use getExtractWithExtendCost() to calculate the cost of 7989 // extractelement/ext pair. 7990 InstructionCost Cost = TTI->getExtractWithExtendCost( 7991 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I)); 7992 // Subtract the cost of s|zext which is subtracted separately. 7993 Cost -= TTI->getCastInstrCost( 7994 Ext->getOpcode(), Ext->getType(), I->getType(), 7995 TTI::getCastContextHint(Ext), CostKind, Ext); 7996 return Cost; 7997 } 7998 } 7999 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy, 8000 CostKind, *getExtractIndex(I)); 8001 }; 8002 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; }; 8003 return GetCostDiff(GetScalarCost, GetVectorCost); 8004 } 8005 case Instruction::InsertElement: { 8006 assert(E->ReuseShuffleIndices.empty() && 8007 "Unique insertelements only are expected."); 8008 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 8009 unsigned const NumElts = SrcVecTy->getNumElements(); 8010 unsigned const NumScalars = VL.size(); 8011 8012 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy); 8013 8014 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 8015 unsigned OffsetBeg = *getInsertIndex(VL.front()); 8016 unsigned OffsetEnd = OffsetBeg; 8017 InsertMask[OffsetBeg] = 0; 8018 for (auto [I, V] : enumerate(VL.drop_front())) { 8019 unsigned Idx = *getInsertIndex(V); 8020 if (OffsetBeg > Idx) 8021 OffsetBeg = Idx; 8022 else if (OffsetEnd < Idx) 8023 OffsetEnd = Idx; 8024 InsertMask[Idx] = I + 1; 8025 } 8026 unsigned VecScalarsSz = PowerOf2Ceil(NumElts); 8027 if (NumOfParts > 0) 8028 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts); 8029 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) * 8030 VecScalarsSz; 8031 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz); 8032 unsigned InsertVecSz = std::min<unsigned>( 8033 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1), 8034 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz); 8035 bool IsWholeSubvector = 8036 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0); 8037 // Check if we can safely insert a subvector. If it is not possible, just 8038 // generate a whole-sized vector and shuffle the source vector and the new 8039 // subvector. 8040 if (OffsetBeg + InsertVecSz > VecSz) { 8041 // Align OffsetBeg to generate correct mask. 8042 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset); 8043 InsertVecSz = VecSz; 8044 } 8045 8046 APInt DemandedElts = APInt::getZero(NumElts); 8047 // TODO: Add support for Instruction::InsertValue. 8048 SmallVector<int> Mask; 8049 if (!E->ReorderIndices.empty()) { 8050 inversePermutation(E->ReorderIndices, Mask); 8051 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem); 8052 } else { 8053 Mask.assign(VecSz, PoisonMaskElem); 8054 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0); 8055 } 8056 bool IsIdentity = true; 8057 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem); 8058 Mask.swap(PrevMask); 8059 for (unsigned I = 0; I < NumScalars; ++I) { 8060 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 8061 DemandedElts.setBit(InsertIdx); 8062 IsIdentity &= InsertIdx - OffsetBeg == I; 8063 Mask[InsertIdx - OffsetBeg] = I; 8064 } 8065 assert(Offset < NumElts && "Failed to find vector index offset"); 8066 8067 InstructionCost Cost = 0; 8068 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 8069 /*Insert*/ true, /*Extract*/ false, 8070 CostKind); 8071 8072 // First cost - resize to actual vector size if not identity shuffle or 8073 // need to shift the vector. 8074 // Do not calculate the cost if the actual size is the register size and 8075 // we can merge this shuffle with the following SK_Select. 8076 auto *InsertVecTy = FixedVectorType::get(ScalarTy, InsertVecSz); 8077 if (!IsIdentity) 8078 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 8079 InsertVecTy, Mask); 8080 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 8081 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 8082 })); 8083 // Second cost - permutation with subvector, if some elements are from the 8084 // initial vector or inserting a subvector. 8085 // TODO: Implement the analysis of the FirstInsert->getOperand(0) 8086 // subvector of ActualVecTy. 8087 SmallBitVector InMask = 8088 isUndefVector(FirstInsert->getOperand(0), 8089 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask)); 8090 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { 8091 if (InsertVecSz != VecSz) { 8092 auto *ActualVecTy = FixedVectorType::get(ScalarTy, VecSz); 8093 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy, 8094 std::nullopt, CostKind, OffsetBeg - Offset, 8095 InsertVecTy); 8096 } else { 8097 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) 8098 Mask[I] = InMask.test(I) ? PoisonMaskElem : I; 8099 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset; 8100 I <= End; ++I) 8101 if (Mask[I] != PoisonMaskElem) 8102 Mask[I] = I + VecSz; 8103 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I) 8104 Mask[I] = 8105 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I; 8106 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask); 8107 } 8108 } 8109 return Cost; 8110 } 8111 case Instruction::ZExt: 8112 case Instruction::SExt: 8113 case Instruction::FPToUI: 8114 case Instruction::FPToSI: 8115 case Instruction::FPExt: 8116 case Instruction::PtrToInt: 8117 case Instruction::IntToPtr: 8118 case Instruction::SIToFP: 8119 case Instruction::UIToFP: 8120 case Instruction::Trunc: 8121 case Instruction::FPTrunc: 8122 case Instruction::BitCast: { 8123 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 8124 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 8125 auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8126 unsigned Opcode = ShuffleOrOp; 8127 unsigned VecOpcode = Opcode; 8128 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 8129 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 8130 // Check if the values are candidates to demote. 8131 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 8132 if (SrcIt != MinBWs.end()) { 8133 SrcBWSz = SrcIt->second.first; 8134 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz); 8135 SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8136 } 8137 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 8138 if (BWSz == SrcBWSz) { 8139 VecOpcode = Instruction::BitCast; 8140 } else if (BWSz < SrcBWSz) { 8141 VecOpcode = Instruction::Trunc; 8142 } else if (It != MinBWs.end()) { 8143 assert(BWSz > SrcBWSz && "Invalid cast!"); 8144 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 8145 } 8146 } 8147 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost { 8148 // Do not count cost here if minimum bitwidth is in effect and it is just 8149 // a bitcast (here it is just a noop). 8150 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8151 return TTI::TCC_Free; 8152 auto *VI = VL0->getOpcode() == Opcode 8153 ? cast<Instruction>(UniqueValues[Idx]) 8154 : nullptr; 8155 return TTI->getCastInstrCost(Opcode, VL0->getType(), 8156 VL0->getOperand(0)->getType(), 8157 TTI::getCastContextHint(VI), CostKind, VI); 8158 }; 8159 auto GetVectorCost = [=](InstructionCost CommonCost) { 8160 // Do not count cost here if minimum bitwidth is in effect and it is just 8161 // a bitcast (here it is just a noop). 8162 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8163 return CommonCost; 8164 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr; 8165 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0)); 8166 return CommonCost + 8167 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind, 8168 VecOpcode == Opcode ? VI : nullptr); 8169 }; 8170 return GetCostDiff(GetScalarCost, GetVectorCost); 8171 } 8172 case Instruction::FCmp: 8173 case Instruction::ICmp: 8174 case Instruction::Select: { 8175 CmpInst::Predicate VecPred, SwappedVecPred; 8176 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value()); 8177 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) || 8178 match(VL0, MatchCmp)) 8179 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred); 8180 else 8181 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy() 8182 ? CmpInst::BAD_FCMP_PREDICATE 8183 : CmpInst::BAD_ICMP_PREDICATE; 8184 auto GetScalarCost = [&](unsigned Idx) { 8185 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8186 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy() 8187 ? CmpInst::BAD_FCMP_PREDICATE 8188 : CmpInst::BAD_ICMP_PREDICATE; 8189 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 8190 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) && 8191 !match(VI, MatchCmp)) || 8192 (CurrentPred != VecPred && CurrentPred != SwappedVecPred)) 8193 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy() 8194 ? CmpInst::BAD_FCMP_PREDICATE 8195 : CmpInst::BAD_ICMP_PREDICATE; 8196 8197 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 8198 Builder.getInt1Ty(), CurrentPred, CostKind, 8199 VI); 8200 }; 8201 auto GetVectorCost = [&](InstructionCost CommonCost) { 8202 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8203 8204 InstructionCost VecCost = TTI->getCmpSelInstrCost( 8205 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 8206 // Check if it is possible and profitable to use min/max for selects 8207 // in VL. 8208 // 8209 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 8210 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 8211 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 8212 {VecTy, VecTy}); 8213 InstructionCost IntrinsicCost = 8214 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8215 // If the selects are the only uses of the compares, they will be 8216 // dead and we can adjust the cost by removing their cost. 8217 if (IntrinsicAndUse.second) 8218 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 8219 MaskTy, VecPred, CostKind); 8220 VecCost = std::min(VecCost, IntrinsicCost); 8221 } 8222 return VecCost + CommonCost; 8223 }; 8224 return GetCostDiff(GetScalarCost, GetVectorCost); 8225 } 8226 case Instruction::FNeg: 8227 case Instruction::Add: 8228 case Instruction::FAdd: 8229 case Instruction::Sub: 8230 case Instruction::FSub: 8231 case Instruction::Mul: 8232 case Instruction::FMul: 8233 case Instruction::UDiv: 8234 case Instruction::SDiv: 8235 case Instruction::FDiv: 8236 case Instruction::URem: 8237 case Instruction::SRem: 8238 case Instruction::FRem: 8239 case Instruction::Shl: 8240 case Instruction::LShr: 8241 case Instruction::AShr: 8242 case Instruction::And: 8243 case Instruction::Or: 8244 case Instruction::Xor: { 8245 auto GetScalarCost = [&](unsigned Idx) { 8246 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8247 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1; 8248 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0)); 8249 TTI::OperandValueInfo Op2Info = 8250 TTI::getOperandInfo(VI->getOperand(OpIdx)); 8251 SmallVector<const Value *> Operands(VI->operand_values()); 8252 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind, 8253 Op1Info, Op2Info, Operands, VI); 8254 }; 8255 auto GetVectorCost = [=](InstructionCost CommonCost) { 8256 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1; 8257 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); 8258 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); 8259 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, 8260 Op2Info) + 8261 CommonCost; 8262 }; 8263 return GetCostDiff(GetScalarCost, GetVectorCost); 8264 } 8265 case Instruction::GetElementPtr: { 8266 return CommonCost + GetGEPCostDiff(VL, VL0); 8267 } 8268 case Instruction::Load: { 8269 auto GetScalarCost = [&](unsigned Idx) { 8270 auto *VI = cast<LoadInst>(UniqueValues[Idx]); 8271 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(), 8272 VI->getPointerAddressSpace(), CostKind, 8273 TTI::OperandValueInfo(), VI); 8274 }; 8275 auto *LI0 = cast<LoadInst>(VL0); 8276 auto GetVectorCost = [&](InstructionCost CommonCost) { 8277 InstructionCost VecLdCost; 8278 if (E->State == TreeEntry::Vectorize) { 8279 VecLdCost = TTI->getMemoryOpCost( 8280 Instruction::Load, VecTy, LI0->getAlign(), 8281 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()); 8282 } else { 8283 assert((E->State == TreeEntry::ScatterVectorize || 8284 E->State == TreeEntry::PossibleStridedVectorize) && 8285 "Unknown EntryState"); 8286 Align CommonAlignment = LI0->getAlign(); 8287 for (Value *V : UniqueValues) 8288 CommonAlignment = 8289 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 8290 VecLdCost = TTI->getGatherScatterOpCost( 8291 Instruction::Load, VecTy, LI0->getPointerOperand(), 8292 /*VariableMask=*/false, CommonAlignment, CostKind); 8293 } 8294 return VecLdCost + CommonCost; 8295 }; 8296 8297 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost); 8298 // If this node generates masked gather load then it is not a terminal node. 8299 // Hence address operand cost is estimated separately. 8300 if (E->State == TreeEntry::ScatterVectorize || 8301 E->State == TreeEntry::PossibleStridedVectorize) 8302 return Cost; 8303 8304 // Estimate cost of GEPs since this tree node is a terminator. 8305 SmallVector<Value *> PointerOps(VL.size()); 8306 for (auto [I, V] : enumerate(VL)) 8307 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand(); 8308 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand()); 8309 } 8310 case Instruction::Store: { 8311 bool IsReorder = !E->ReorderIndices.empty(); 8312 auto GetScalarCost = [=](unsigned Idx) { 8313 auto *VI = cast<StoreInst>(VL[Idx]); 8314 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand()); 8315 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(), 8316 VI->getPointerAddressSpace(), CostKind, 8317 OpInfo, VI); 8318 }; 8319 auto *BaseSI = 8320 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 8321 auto GetVectorCost = [=](InstructionCost CommonCost) { 8322 // We know that we can merge the stores. Calculate the cost. 8323 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); 8324 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), 8325 BaseSI->getPointerAddressSpace(), CostKind, 8326 OpInfo) + 8327 CommonCost; 8328 }; 8329 SmallVector<Value *> PointerOps(VL.size()); 8330 for (auto [I, V] : enumerate(VL)) { 8331 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I; 8332 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand(); 8333 } 8334 8335 return GetCostDiff(GetScalarCost, GetVectorCost) + 8336 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand()); 8337 } 8338 case Instruction::Call: { 8339 auto GetScalarCost = [&](unsigned Idx) { 8340 auto *CI = cast<CallInst>(UniqueValues[Idx]); 8341 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8342 if (ID != Intrinsic::not_intrinsic) { 8343 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 8344 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8345 } 8346 return TTI->getCallInstrCost(CI->getCalledFunction(), 8347 CI->getFunctionType()->getReturnType(), 8348 CI->getFunctionType()->params(), CostKind); 8349 }; 8350 auto GetVectorCost = [=](InstructionCost CommonCost) { 8351 auto *CI = cast<CallInst>(VL0); 8352 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 8353 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost; 8354 }; 8355 return GetCostDiff(GetScalarCost, GetVectorCost); 8356 } 8357 case Instruction::ShuffleVector: { 8358 assert(E->isAltShuffle() && 8359 ((Instruction::isBinaryOp(E->getOpcode()) && 8360 Instruction::isBinaryOp(E->getAltOpcode())) || 8361 (Instruction::isCast(E->getOpcode()) && 8362 Instruction::isCast(E->getAltOpcode())) || 8363 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8364 "Invalid Shuffle Vector Operand"); 8365 // Try to find the previous shuffle node with the same operands and same 8366 // main/alternate ops. 8367 auto TryFindNodeWithEqualOperands = [=]() { 8368 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 8369 if (TE.get() == E) 8370 break; 8371 if (TE->isAltShuffle() && 8372 ((TE->getOpcode() == E->getOpcode() && 8373 TE->getAltOpcode() == E->getAltOpcode()) || 8374 (TE->getOpcode() == E->getAltOpcode() && 8375 TE->getAltOpcode() == E->getOpcode())) && 8376 TE->hasEqualOperands(*E)) 8377 return true; 8378 } 8379 return false; 8380 }; 8381 auto GetScalarCost = [&](unsigned Idx) { 8382 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8383 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode"); 8384 (void)E; 8385 return TTI->getInstructionCost(VI, CostKind); 8386 }; 8387 // Need to clear CommonCost since the final shuffle cost is included into 8388 // vector cost. 8389 auto GetVectorCost = [&](InstructionCost) { 8390 // VecCost is equal to sum of the cost of creating 2 vectors 8391 // and the cost of creating shuffle. 8392 InstructionCost VecCost = 0; 8393 if (TryFindNodeWithEqualOperands()) { 8394 LLVM_DEBUG({ 8395 dbgs() << "SLP: diamond match for alternate node found.\n"; 8396 E->dump(); 8397 }); 8398 // No need to add new vector costs here since we're going to reuse 8399 // same main/alternate vector ops, just do different shuffling. 8400 } else if (Instruction::isBinaryOp(E->getOpcode())) { 8401 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 8402 VecCost += 8403 TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind); 8404 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8405 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8406 VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 8407 CI0->getPredicate(), CostKind, VL0); 8408 VecCost += TTI->getCmpSelInstrCost( 8409 E->getOpcode(), VecTy, MaskTy, 8410 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 8411 E->getAltOp()); 8412 } else { 8413 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 8414 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 8415 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 8416 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 8417 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 8418 TTI::CastContextHint::None, CostKind); 8419 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 8420 TTI::CastContextHint::None, CostKind); 8421 } 8422 SmallVector<int> Mask; 8423 E->buildAltOpShuffleMask( 8424 [E](Instruction *I) { 8425 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8426 return I->getOpcode() == E->getAltOpcode(); 8427 }, 8428 Mask); 8429 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, 8430 FinalVecTy, Mask); 8431 return VecCost; 8432 }; 8433 return GetCostDiff(GetScalarCost, GetVectorCost); 8434 } 8435 default: 8436 llvm_unreachable("Unknown instruction"); 8437 } 8438 } 8439 8440 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 8441 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 8442 << VectorizableTree.size() << " is fully vectorizable .\n"); 8443 8444 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 8445 SmallVector<int> Mask; 8446 return TE->State == TreeEntry::NeedToGather && 8447 !any_of(TE->Scalars, 8448 [this](Value *V) { return EphValues.contains(V); }) && 8449 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 8450 TE->Scalars.size() < Limit || 8451 ((TE->getOpcode() == Instruction::ExtractElement || 8452 all_of(TE->Scalars, 8453 [](Value *V) { 8454 return isa<ExtractElementInst, UndefValue>(V); 8455 })) && 8456 isFixedVectorShuffle(TE->Scalars, Mask)) || 8457 (TE->State == TreeEntry::NeedToGather && 8458 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 8459 }; 8460 8461 // We only handle trees of heights 1 and 2. 8462 if (VectorizableTree.size() == 1 && 8463 (VectorizableTree[0]->State == TreeEntry::Vectorize || 8464 (ForReduction && 8465 AreVectorizableGathers(VectorizableTree[0].get(), 8466 VectorizableTree[0]->Scalars.size()) && 8467 VectorizableTree[0]->getVectorFactor() > 2))) 8468 return true; 8469 8470 if (VectorizableTree.size() != 2) 8471 return false; 8472 8473 // Handle splat and all-constants stores. Also try to vectorize tiny trees 8474 // with the second gather nodes if they have less scalar operands rather than 8475 // the initial tree element (may be profitable to shuffle the second gather) 8476 // or they are extractelements, which form shuffle. 8477 SmallVector<int> Mask; 8478 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 8479 AreVectorizableGathers(VectorizableTree[1].get(), 8480 VectorizableTree[0]->Scalars.size())) 8481 return true; 8482 8483 // Gathering cost would be too much for tiny trees. 8484 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 8485 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 8486 VectorizableTree[0]->State != TreeEntry::ScatterVectorize && 8487 VectorizableTree[0]->State != TreeEntry::PossibleStridedVectorize)) 8488 return false; 8489 8490 return true; 8491 } 8492 8493 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 8494 TargetTransformInfo *TTI, 8495 bool MustMatchOrInst) { 8496 // Look past the root to find a source value. Arbitrarily follow the 8497 // path through operand 0 of any 'or'. Also, peek through optional 8498 // shift-left-by-multiple-of-8-bits. 8499 Value *ZextLoad = Root; 8500 const APInt *ShAmtC; 8501 bool FoundOr = false; 8502 while (!isa<ConstantExpr>(ZextLoad) && 8503 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 8504 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 8505 ShAmtC->urem(8) == 0))) { 8506 auto *BinOp = cast<BinaryOperator>(ZextLoad); 8507 ZextLoad = BinOp->getOperand(0); 8508 if (BinOp->getOpcode() == Instruction::Or) 8509 FoundOr = true; 8510 } 8511 // Check if the input is an extended load of the required or/shift expression. 8512 Value *Load; 8513 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 8514 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 8515 return false; 8516 8517 // Require that the total load bit width is a legal integer type. 8518 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 8519 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 8520 Type *SrcTy = Load->getType(); 8521 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 8522 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 8523 return false; 8524 8525 // Everything matched - assume that we can fold the whole sequence using 8526 // load combining. 8527 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 8528 << *(cast<Instruction>(Root)) << "\n"); 8529 8530 return true; 8531 } 8532 8533 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 8534 if (RdxKind != RecurKind::Or) 8535 return false; 8536 8537 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8538 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 8539 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 8540 /* MatchOr */ false); 8541 } 8542 8543 bool BoUpSLP::isLoadCombineCandidate() const { 8544 // Peek through a final sequence of stores and check if all operations are 8545 // likely to be load-combined. 8546 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8547 for (Value *Scalar : VectorizableTree[0]->Scalars) { 8548 Value *X; 8549 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 8550 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 8551 return false; 8552 } 8553 return true; 8554 } 8555 8556 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 8557 // No need to vectorize inserts of gathered values. 8558 if (VectorizableTree.size() == 2 && 8559 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 8560 VectorizableTree[1]->State == TreeEntry::NeedToGather && 8561 (VectorizableTree[1]->getVectorFactor() <= 2 || 8562 !(isSplat(VectorizableTree[1]->Scalars) || 8563 allConstant(VectorizableTree[1]->Scalars)))) 8564 return true; 8565 8566 // If the graph includes only PHI nodes and gathers, it is defnitely not 8567 // profitable for the vectorization, we can skip it, if the cost threshold is 8568 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of 8569 // gathers/buildvectors. 8570 constexpr int Limit = 4; 8571 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() && 8572 !VectorizableTree.empty() && 8573 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 8574 return (TE->State == TreeEntry::NeedToGather && 8575 TE->getOpcode() != Instruction::ExtractElement && 8576 count_if(TE->Scalars, 8577 [](Value *V) { return isa<ExtractElementInst>(V); }) <= 8578 Limit) || 8579 TE->getOpcode() == Instruction::PHI; 8580 })) 8581 return true; 8582 8583 // We can vectorize the tree if its size is greater than or equal to the 8584 // minimum size specified by the MinTreeSize command line option. 8585 if (VectorizableTree.size() >= MinTreeSize) 8586 return false; 8587 8588 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 8589 // can vectorize it if we can prove it fully vectorizable. 8590 if (isFullyVectorizableTinyTree(ForReduction)) 8591 return false; 8592 8593 assert(VectorizableTree.empty() 8594 ? ExternalUses.empty() 8595 : true && "We shouldn't have any external users"); 8596 8597 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 8598 // vectorizable. 8599 return true; 8600 } 8601 8602 InstructionCost BoUpSLP::getSpillCost() const { 8603 // Walk from the bottom of the tree to the top, tracking which values are 8604 // live. When we see a call instruction that is not part of our tree, 8605 // query TTI to see if there is a cost to keeping values live over it 8606 // (for example, if spills and fills are required). 8607 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 8608 InstructionCost Cost = 0; 8609 8610 SmallPtrSet<Instruction *, 4> LiveValues; 8611 Instruction *PrevInst = nullptr; 8612 8613 // The entries in VectorizableTree are not necessarily ordered by their 8614 // position in basic blocks. Collect them and order them by dominance so later 8615 // instructions are guaranteed to be visited first. For instructions in 8616 // different basic blocks, we only scan to the beginning of the block, so 8617 // their order does not matter, as long as all instructions in a basic block 8618 // are grouped together. Using dominance ensures a deterministic order. 8619 SmallVector<Instruction *, 16> OrderedScalars; 8620 for (const auto &TEPtr : VectorizableTree) { 8621 if (TEPtr->State != TreeEntry::Vectorize) 8622 continue; 8623 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 8624 if (!Inst) 8625 continue; 8626 OrderedScalars.push_back(Inst); 8627 } 8628 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 8629 auto *NodeA = DT->getNode(A->getParent()); 8630 auto *NodeB = DT->getNode(B->getParent()); 8631 assert(NodeA && "Should only process reachable instructions"); 8632 assert(NodeB && "Should only process reachable instructions"); 8633 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 8634 "Different nodes should have different DFS numbers"); 8635 if (NodeA != NodeB) 8636 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); 8637 return B->comesBefore(A); 8638 }); 8639 8640 for (Instruction *Inst : OrderedScalars) { 8641 if (!PrevInst) { 8642 PrevInst = Inst; 8643 continue; 8644 } 8645 8646 // Update LiveValues. 8647 LiveValues.erase(PrevInst); 8648 for (auto &J : PrevInst->operands()) { 8649 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 8650 LiveValues.insert(cast<Instruction>(&*J)); 8651 } 8652 8653 LLVM_DEBUG({ 8654 dbgs() << "SLP: #LV: " << LiveValues.size(); 8655 for (auto *X : LiveValues) 8656 dbgs() << " " << X->getName(); 8657 dbgs() << ", Looking at "; 8658 Inst->dump(); 8659 }); 8660 8661 // Now find the sequence of instructions between PrevInst and Inst. 8662 unsigned NumCalls = 0; 8663 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 8664 PrevInstIt = 8665 PrevInst->getIterator().getReverse(); 8666 while (InstIt != PrevInstIt) { 8667 if (PrevInstIt == PrevInst->getParent()->rend()) { 8668 PrevInstIt = Inst->getParent()->rbegin(); 8669 continue; 8670 } 8671 8672 auto NoCallIntrinsic = [this](Instruction *I) { 8673 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 8674 if (II->isAssumeLikeIntrinsic()) 8675 return true; 8676 FastMathFlags FMF; 8677 SmallVector<Type *, 4> Tys; 8678 for (auto &ArgOp : II->args()) 8679 Tys.push_back(ArgOp->getType()); 8680 if (auto *FPMO = dyn_cast<FPMathOperator>(II)) 8681 FMF = FPMO->getFastMathFlags(); 8682 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys, 8683 FMF); 8684 InstructionCost IntrCost = 8685 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput); 8686 InstructionCost CallCost = TTI->getCallInstrCost( 8687 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput); 8688 if (IntrCost < CallCost) 8689 return true; 8690 } 8691 return false; 8692 }; 8693 8694 // Debug information does not impact spill cost. 8695 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && 8696 &*PrevInstIt != PrevInst) 8697 NumCalls++; 8698 8699 ++PrevInstIt; 8700 } 8701 8702 if (NumCalls) { 8703 SmallVector<Type *, 4> V; 8704 for (auto *II : LiveValues) { 8705 auto *ScalarTy = II->getType(); 8706 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 8707 ScalarTy = VectorTy->getElementType(); 8708 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 8709 } 8710 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 8711 } 8712 8713 PrevInst = Inst; 8714 } 8715 8716 return Cost; 8717 } 8718 8719 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 8720 /// buildvector sequence. 8721 static bool isFirstInsertElement(const InsertElementInst *IE1, 8722 const InsertElementInst *IE2) { 8723 if (IE1 == IE2) 8724 return false; 8725 const auto *I1 = IE1; 8726 const auto *I2 = IE2; 8727 const InsertElementInst *PrevI1; 8728 const InsertElementInst *PrevI2; 8729 unsigned Idx1 = *getInsertIndex(IE1); 8730 unsigned Idx2 = *getInsertIndex(IE2); 8731 do { 8732 if (I2 == IE1) 8733 return true; 8734 if (I1 == IE2) 8735 return false; 8736 PrevI1 = I1; 8737 PrevI2 = I2; 8738 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 8739 getInsertIndex(I1).value_or(Idx2) != Idx2) 8740 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 8741 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 8742 getInsertIndex(I2).value_or(Idx1) != Idx1) 8743 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 8744 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 8745 llvm_unreachable("Two different buildvectors not expected."); 8746 } 8747 8748 namespace { 8749 /// Returns incoming Value *, if the requested type is Value * too, or a default 8750 /// value, otherwise. 8751 struct ValueSelect { 8752 template <typename U> 8753 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) { 8754 return V; 8755 } 8756 template <typename U> 8757 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) { 8758 return U(); 8759 } 8760 }; 8761 } // namespace 8762 8763 /// Does the analysis of the provided shuffle masks and performs the requested 8764 /// actions on the vectors with the given shuffle masks. It tries to do it in 8765 /// several steps. 8766 /// 1. If the Base vector is not undef vector, resizing the very first mask to 8767 /// have common VF and perform action for 2 input vectors (including non-undef 8768 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 8769 /// and processed as a shuffle of 2 elements. 8770 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 8771 /// action only for 1 vector with the given mask, if it is not the identity 8772 /// mask. 8773 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 8774 /// vectors, combing the masks properly between the steps. 8775 template <typename T> 8776 static T *performExtractsShuffleAction( 8777 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 8778 function_ref<unsigned(T *)> GetVF, 8779 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction, 8780 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 8781 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 8782 SmallVector<int> Mask(ShuffleMask.begin()->second); 8783 auto VMIt = std::next(ShuffleMask.begin()); 8784 T *Prev = nullptr; 8785 SmallBitVector UseMask = 8786 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask); 8787 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask); 8788 if (!IsBaseUndef.all()) { 8789 // Base is not undef, need to combine it with the next subvectors. 8790 std::pair<T *, bool> Res = 8791 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false); 8792 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask); 8793 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 8794 if (Mask[Idx] == PoisonMaskElem) 8795 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx; 8796 else 8797 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 8798 } 8799 auto *V = ValueSelect::get<T *>(Base); 8800 (void)V; 8801 assert((!V || GetVF(V) == Mask.size()) && 8802 "Expected base vector of VF number of elements."); 8803 Prev = Action(Mask, {nullptr, Res.first}); 8804 } else if (ShuffleMask.size() == 1) { 8805 // Base is undef and only 1 vector is shuffled - perform the action only for 8806 // single vector, if the mask is not the identity mask. 8807 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask, 8808 /*ForSingleMask=*/true); 8809 if (Res.second) 8810 // Identity mask is found. 8811 Prev = Res.first; 8812 else 8813 Prev = Action(Mask, {ShuffleMask.begin()->first}); 8814 } else { 8815 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 8816 // shuffles step by step, combining shuffle between the steps. 8817 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 8818 unsigned Vec2VF = GetVF(VMIt->first); 8819 if (Vec1VF == Vec2VF) { 8820 // No need to resize the input vectors since they are of the same size, we 8821 // can shuffle them directly. 8822 ArrayRef<int> SecMask = VMIt->second; 8823 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8824 if (SecMask[I] != PoisonMaskElem) { 8825 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8826 Mask[I] = SecMask[I] + Vec1VF; 8827 } 8828 } 8829 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 8830 } else { 8831 // Vectors of different sizes - resize and reshuffle. 8832 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask, 8833 /*ForSingleMask=*/false); 8834 std::pair<T *, bool> Res2 = 8835 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8836 ArrayRef<int> SecMask = VMIt->second; 8837 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8838 if (Mask[I] != PoisonMaskElem) { 8839 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8840 if (Res1.second) 8841 Mask[I] = I; 8842 } else if (SecMask[I] != PoisonMaskElem) { 8843 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8844 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 8845 } 8846 } 8847 Prev = Action(Mask, {Res1.first, Res2.first}); 8848 } 8849 VMIt = std::next(VMIt); 8850 } 8851 bool IsBaseNotUndef = !IsBaseUndef.all(); 8852 (void)IsBaseNotUndef; 8853 // Perform requested actions for the remaining masks/vectors. 8854 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 8855 // Shuffle other input vectors, if any. 8856 std::pair<T *, bool> Res = 8857 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8858 ArrayRef<int> SecMask = VMIt->second; 8859 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8860 if (SecMask[I] != PoisonMaskElem) { 8861 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) && 8862 "Multiple uses of scalars."); 8863 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 8864 } else if (Mask[I] != PoisonMaskElem) { 8865 Mask[I] = I; 8866 } 8867 } 8868 Prev = Action(Mask, {Prev, Res.first}); 8869 } 8870 return Prev; 8871 } 8872 8873 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 8874 InstructionCost Cost = 0; 8875 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 8876 << VectorizableTree.size() << ".\n"); 8877 8878 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 8879 8880 SmallPtrSet<Value *, 4> CheckedExtracts; 8881 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 8882 TreeEntry &TE = *VectorizableTree[I]; 8883 if (TE.State == TreeEntry::NeedToGather) { 8884 if (const TreeEntry *E = getTreeEntry(TE.getMainOp()); 8885 E && E->getVectorFactor() == TE.getVectorFactor() && 8886 E->isSame(TE.Scalars)) { 8887 // Some gather nodes might be absolutely the same as some vectorizable 8888 // nodes after reordering, need to handle it. 8889 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle " 8890 << shortBundleName(TE.Scalars) << ".\n" 8891 << "SLP: Current total cost = " << Cost << "\n"); 8892 continue; 8893 } 8894 } 8895 8896 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts); 8897 Cost += C; 8898 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle " 8899 << shortBundleName(TE.Scalars) << ".\n" 8900 << "SLP: Current total cost = " << Cost << "\n"); 8901 } 8902 8903 SmallPtrSet<Value *, 16> ExtractCostCalculated; 8904 InstructionCost ExtractCost = 0; 8905 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 8906 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 8907 SmallVector<APInt> DemandedElts; 8908 SmallDenseSet<Value *, 4> UsedInserts; 8909 DenseSet<Value *> VectorCasts; 8910 for (ExternalUser &EU : ExternalUses) { 8911 // We only add extract cost once for the same scalar. 8912 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 8913 !ExtractCostCalculated.insert(EU.Scalar).second) 8914 continue; 8915 8916 // Uses by ephemeral values are free (because the ephemeral value will be 8917 // removed prior to code generation, and so the extraction will be 8918 // removed as well). 8919 if (EphValues.count(EU.User)) 8920 continue; 8921 8922 // No extract cost for vector "scalar" 8923 if (isa<FixedVectorType>(EU.Scalar->getType())) 8924 continue; 8925 8926 // If found user is an insertelement, do not calculate extract cost but try 8927 // to detect it as a final shuffled/identity match. 8928 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 8929 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 8930 if (!UsedInserts.insert(VU).second) 8931 continue; 8932 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 8933 if (InsertIdx) { 8934 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 8935 auto *It = find_if( 8936 FirstUsers, 8937 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) { 8938 return areTwoInsertFromSameBuildVector( 8939 VU, cast<InsertElementInst>(Pair.first), 8940 [this](InsertElementInst *II) -> Value * { 8941 Value *Op0 = II->getOperand(0); 8942 if (getTreeEntry(II) && !getTreeEntry(Op0)) 8943 return nullptr; 8944 return Op0; 8945 }); 8946 }); 8947 int VecId = -1; 8948 if (It == FirstUsers.end()) { 8949 (void)ShuffleMasks.emplace_back(); 8950 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 8951 if (Mask.empty()) 8952 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 8953 // Find the insertvector, vectorized in tree, if any. 8954 Value *Base = VU; 8955 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 8956 if (IEBase != EU.User && 8957 (!IEBase->hasOneUse() || 8958 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx)) 8959 break; 8960 // Build the mask for the vectorized insertelement instructions. 8961 if (const TreeEntry *E = getTreeEntry(IEBase)) { 8962 VU = IEBase; 8963 do { 8964 IEBase = cast<InsertElementInst>(Base); 8965 int Idx = *getInsertIndex(IEBase); 8966 assert(Mask[Idx] == PoisonMaskElem && 8967 "InsertElementInstruction used already."); 8968 Mask[Idx] = Idx; 8969 Base = IEBase->getOperand(0); 8970 } while (E == getTreeEntry(Base)); 8971 break; 8972 } 8973 Base = cast<InsertElementInst>(Base)->getOperand(0); 8974 } 8975 FirstUsers.emplace_back(VU, ScalarTE); 8976 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 8977 VecId = FirstUsers.size() - 1; 8978 auto It = MinBWs.find(ScalarTE); 8979 if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) { 8980 unsigned BWSz = It->second.second; 8981 unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType()); 8982 unsigned VecOpcode; 8983 if (BWSz < SrcBWSz) 8984 VecOpcode = Instruction::Trunc; 8985 else 8986 VecOpcode = 8987 It->second.second ? Instruction::SExt : Instruction::ZExt; 8988 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 8989 InstructionCost C = TTI->getCastInstrCost( 8990 VecOpcode, FTy, 8991 FixedVectorType::get( 8992 IntegerType::get(FTy->getContext(), It->second.first), 8993 FTy->getNumElements()), 8994 TTI::CastContextHint::None, CostKind); 8995 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 8996 << " for extending externally used vector with " 8997 "non-equal minimum bitwidth.\n"); 8998 Cost += C; 8999 } 9000 } else { 9001 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 9002 It->first = VU; 9003 VecId = std::distance(FirstUsers.begin(), It); 9004 } 9005 int InIdx = *InsertIdx; 9006 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 9007 if (Mask.empty()) 9008 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9009 Mask[InIdx] = EU.Lane; 9010 DemandedElts[VecId].setBit(InIdx); 9011 continue; 9012 } 9013 } 9014 } 9015 9016 // If we plan to rewrite the tree in a smaller type, we will need to sign 9017 // extend the extracted value back to the original type. Here, we account 9018 // for the extract and the added cost of the sign extend if needed. 9019 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 9020 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9021 auto It = MinBWs.find(getTreeEntry(EU.Scalar)); 9022 if (It != MinBWs.end()) { 9023 auto *MinTy = IntegerType::get(F->getContext(), It->second.first); 9024 unsigned Extend = 9025 It->second.second ? Instruction::SExt : Instruction::ZExt; 9026 VecTy = FixedVectorType::get(MinTy, BundleWidth); 9027 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 9028 VecTy, EU.Lane); 9029 } else { 9030 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 9031 CostKind, EU.Lane); 9032 } 9033 } 9034 // Add reduced value cost, if resized. 9035 if (!VectorizedVals.empty()) { 9036 auto BWIt = MinBWs.find(VectorizableTree.front().get()); 9037 if (BWIt != MinBWs.end()) { 9038 Type *DstTy = VectorizableTree.front()->Scalars.front()->getType(); 9039 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy); 9040 unsigned Opcode = Instruction::Trunc; 9041 if (OriginalSz < BWIt->second.first) 9042 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt; 9043 Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first); 9044 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy, 9045 TTI::CastContextHint::None, 9046 TTI::TCK_RecipThroughput); 9047 } 9048 } 9049 9050 InstructionCost SpillCost = getSpillCost(); 9051 Cost += SpillCost + ExtractCost; 9052 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask, 9053 bool) { 9054 InstructionCost C = 0; 9055 unsigned VF = Mask.size(); 9056 unsigned VecVF = TE->getVectorFactor(); 9057 if (VF != VecVF && 9058 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 9059 !ShuffleVectorInst::isIdentityMask(Mask, VF))) { 9060 SmallVector<int> OrigMask(VecVF, PoisonMaskElem); 9061 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 9062 OrigMask.begin()); 9063 C = TTI->getShuffleCost( 9064 TTI::SK_PermuteSingleSrc, 9065 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 9066 LLVM_DEBUG( 9067 dbgs() << "SLP: Adding cost " << C 9068 << " for final shuffle of insertelement external users.\n"; 9069 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9070 Cost += C; 9071 return std::make_pair(TE, true); 9072 } 9073 return std::make_pair(TE, false); 9074 }; 9075 // Calculate the cost of the reshuffled vectors, if any. 9076 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 9077 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 9078 auto Vector = ShuffleMasks[I].takeVector(); 9079 unsigned VF = 0; 9080 auto EstimateShufflesCost = [&](ArrayRef<int> Mask, 9081 ArrayRef<const TreeEntry *> TEs) { 9082 assert((TEs.size() == 1 || TEs.size() == 2) && 9083 "Expected exactly 1 or 2 tree entries."); 9084 if (TEs.size() == 1) { 9085 if (VF == 0) 9086 VF = TEs.front()->getVectorFactor(); 9087 auto *FTy = 9088 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9089 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) && 9090 !all_of(enumerate(Mask), [=](const auto &Data) { 9091 return Data.value() == PoisonMaskElem || 9092 (Data.index() < VF && 9093 static_cast<int>(Data.index()) == Data.value()); 9094 })) { 9095 InstructionCost C = 9096 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 9097 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9098 << " for final shuffle of insertelement " 9099 "external users.\n"; 9100 TEs.front()->dump(); 9101 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9102 Cost += C; 9103 } 9104 } else { 9105 if (VF == 0) { 9106 if (TEs.front() && 9107 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor()) 9108 VF = TEs.front()->getVectorFactor(); 9109 else 9110 VF = Mask.size(); 9111 } 9112 auto *FTy = 9113 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9114 InstructionCost C = 9115 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask); 9116 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9117 << " for final shuffle of vector node and external " 9118 "insertelement users.\n"; 9119 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 9120 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9121 Cost += C; 9122 } 9123 VF = Mask.size(); 9124 return TEs.back(); 9125 }; 9126 (void)performExtractsShuffleAction<const TreeEntry>( 9127 MutableArrayRef(Vector.data(), Vector.size()), Base, 9128 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 9129 EstimateShufflesCost); 9130 InstructionCost InsertCost = TTI->getScalarizationOverhead( 9131 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 9132 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput); 9133 Cost -= InsertCost; 9134 } 9135 9136 #ifndef NDEBUG 9137 SmallString<256> Str; 9138 { 9139 raw_svector_ostream OS(Str); 9140 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 9141 << "SLP: Extract Cost = " << ExtractCost << ".\n" 9142 << "SLP: Total Cost = " << Cost << ".\n"; 9143 } 9144 LLVM_DEBUG(dbgs() << Str); 9145 if (ViewSLPTree) 9146 ViewGraph(this, "SLP" + F->getName(), false, Str); 9147 #endif 9148 9149 return Cost; 9150 } 9151 9152 /// Tries to find extractelement instructions with constant indices from fixed 9153 /// vector type and gather such instructions into a bunch, which highly likely 9154 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9155 /// successful, the matched scalars are replaced by poison values in \p VL for 9156 /// future analysis. 9157 std::optional<TTI::ShuffleKind> 9158 BoUpSLP::tryToGatherSingleRegisterExtractElements( 9159 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const { 9160 // Scan list of gathered scalars for extractelements that can be represented 9161 // as shuffles. 9162 MapVector<Value *, SmallVector<int>> VectorOpToIdx; 9163 SmallVector<int> UndefVectorExtracts; 9164 for (int I = 0, E = VL.size(); I < E; ++I) { 9165 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9166 if (!EI) { 9167 if (isa<UndefValue>(VL[I])) 9168 UndefVectorExtracts.push_back(I); 9169 continue; 9170 } 9171 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType()); 9172 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand())) 9173 continue; 9174 std::optional<unsigned> Idx = getExtractIndex(EI); 9175 // Undefined index. 9176 if (!Idx) { 9177 UndefVectorExtracts.push_back(I); 9178 continue; 9179 } 9180 SmallBitVector ExtractMask(VecTy->getNumElements(), true); 9181 ExtractMask.reset(*Idx); 9182 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) { 9183 UndefVectorExtracts.push_back(I); 9184 continue; 9185 } 9186 VectorOpToIdx[EI->getVectorOperand()].push_back(I); 9187 } 9188 // Sort the vector operands by the maximum number of uses in extractelements. 9189 MapVector<unsigned, SmallVector<Value *>> VFToVector; 9190 for (const auto &Data : VectorOpToIdx) 9191 VFToVector[cast<FixedVectorType>(Data.first->getType())->getNumElements()] 9192 .push_back(Data.first); 9193 for (auto &Data : VFToVector) { 9194 stable_sort(Data.second, [&VectorOpToIdx](Value *V1, Value *V2) { 9195 return VectorOpToIdx.find(V1)->second.size() > 9196 VectorOpToIdx.find(V2)->second.size(); 9197 }); 9198 } 9199 // Find the best pair of the vectors with the same number of elements or a 9200 // single vector. 9201 const int UndefSz = UndefVectorExtracts.size(); 9202 unsigned SingleMax = 0; 9203 Value *SingleVec = nullptr; 9204 unsigned PairMax = 0; 9205 std::pair<Value *, Value *> PairVec(nullptr, nullptr); 9206 for (auto &Data : VFToVector) { 9207 Value *V1 = Data.second.front(); 9208 if (SingleMax < VectorOpToIdx[V1].size() + UndefSz) { 9209 SingleMax = VectorOpToIdx[V1].size() + UndefSz; 9210 SingleVec = V1; 9211 } 9212 Value *V2 = nullptr; 9213 if (Data.second.size() > 1) 9214 V2 = *std::next(Data.second.begin()); 9215 if (V2 && PairMax < VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + 9216 UndefSz) { 9217 PairMax = VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + UndefSz; 9218 PairVec = std::make_pair(V1, V2); 9219 } 9220 } 9221 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0) 9222 return std::nullopt; 9223 // Check if better to perform a shuffle of 2 vectors or just of a single 9224 // vector. 9225 SmallVector<Value *> SavedVL(VL.begin(), VL.end()); 9226 SmallVector<Value *> GatheredExtracts( 9227 VL.size(), PoisonValue::get(VL.front()->getType())); 9228 if (SingleMax >= PairMax && SingleMax) { 9229 for (int Idx : VectorOpToIdx[SingleVec]) 9230 std::swap(GatheredExtracts[Idx], VL[Idx]); 9231 } else { 9232 for (Value *V : {PairVec.first, PairVec.second}) 9233 for (int Idx : VectorOpToIdx[V]) 9234 std::swap(GatheredExtracts[Idx], VL[Idx]); 9235 } 9236 // Add extracts from undefs too. 9237 for (int Idx : UndefVectorExtracts) 9238 std::swap(GatheredExtracts[Idx], VL[Idx]); 9239 // Check that gather of extractelements can be represented as just a 9240 // shuffle of a single/two vectors the scalars are extracted from. 9241 std::optional<TTI::ShuffleKind> Res = 9242 isFixedVectorShuffle(GatheredExtracts, Mask); 9243 if (!Res) { 9244 // TODO: try to check other subsets if possible. 9245 // Restore the original VL if attempt was not successful. 9246 copy(SavedVL, VL.begin()); 9247 return std::nullopt; 9248 } 9249 // Restore unused scalars from mask, if some of the extractelements were not 9250 // selected for shuffle. 9251 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) { 9252 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) && 9253 isa<UndefValue>(GatheredExtracts[I])) { 9254 std::swap(VL[I], GatheredExtracts[I]); 9255 continue; 9256 } 9257 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9258 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) || 9259 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) || 9260 is_contained(UndefVectorExtracts, I)) 9261 continue; 9262 } 9263 return Res; 9264 } 9265 9266 /// Tries to find extractelement instructions with constant indices from fixed 9267 /// vector type and gather such instructions into a bunch, which highly likely 9268 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9269 /// successful, the matched scalars are replaced by poison values in \p VL for 9270 /// future analysis. 9271 SmallVector<std::optional<TTI::ShuffleKind>> 9272 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 9273 SmallVectorImpl<int> &Mask, 9274 unsigned NumParts) const { 9275 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1."); 9276 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts); 9277 Mask.assign(VL.size(), PoisonMaskElem); 9278 unsigned SliceSize = VL.size() / NumParts; 9279 for (unsigned Part = 0; Part < NumParts; ++Part) { 9280 // Scan list of gathered scalars for extractelements that can be represented 9281 // as shuffles. 9282 MutableArrayRef<Value *> SubVL = 9283 MutableArrayRef(VL).slice(Part * SliceSize, SliceSize); 9284 SmallVector<int> SubMask; 9285 std::optional<TTI::ShuffleKind> Res = 9286 tryToGatherSingleRegisterExtractElements(SubVL, SubMask); 9287 ShufflesRes[Part] = Res; 9288 copy(SubMask, std::next(Mask.begin(), Part * SliceSize)); 9289 } 9290 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) { 9291 return Res.has_value(); 9292 })) 9293 ShufflesRes.clear(); 9294 return ShufflesRes; 9295 } 9296 9297 std::optional<TargetTransformInfo::ShuffleKind> 9298 BoUpSLP::isGatherShuffledSingleRegisterEntry( 9299 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 9300 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) { 9301 Entries.clear(); 9302 // TODO: currently checking only for Scalars in the tree entry, need to count 9303 // reused elements too for better cost estimation. 9304 const EdgeInfo &TEUseEI = TE->UserTreeIndices.front(); 9305 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE); 9306 const BasicBlock *TEInsertBlock = nullptr; 9307 // Main node of PHI entries keeps the correct order of operands/incoming 9308 // blocks. 9309 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) { 9310 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx); 9311 TEInsertPt = TEInsertBlock->getTerminator(); 9312 } else { 9313 TEInsertBlock = TEInsertPt->getParent(); 9314 } 9315 auto *NodeUI = DT->getNode(TEInsertBlock); 9316 assert(NodeUI && "Should only process reachable instructions"); 9317 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end()); 9318 auto CheckOrdering = [&](const Instruction *InsertPt) { 9319 // Argument InsertPt is an instruction where vector code for some other 9320 // tree entry (one that shares one or more scalars with TE) is going to be 9321 // generated. This lambda returns true if insertion point of vector code 9322 // for the TE dominates that point (otherwise dependency is the other way 9323 // around). The other node is not limited to be of a gather kind. Gather 9324 // nodes are not scheduled and their vector code is inserted before their 9325 // first user. If user is PHI, that is supposed to be at the end of a 9326 // predecessor block. Otherwise it is the last instruction among scalars of 9327 // the user node. So, instead of checking dependency between instructions 9328 // themselves, we check dependency between their insertion points for vector 9329 // code (since each scalar instruction ends up as a lane of a vector 9330 // instruction). 9331 const BasicBlock *InsertBlock = InsertPt->getParent(); 9332 auto *NodeEUI = DT->getNode(InsertBlock); 9333 if (!NodeEUI) 9334 return false; 9335 assert((NodeUI == NodeEUI) == 9336 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && 9337 "Different nodes should have different DFS numbers"); 9338 // Check the order of the gather nodes users. 9339 if (TEInsertPt->getParent() != InsertBlock && 9340 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI))) 9341 return false; 9342 if (TEInsertPt->getParent() == InsertBlock && 9343 TEInsertPt->comesBefore(InsertPt)) 9344 return false; 9345 return true; 9346 }; 9347 // Find all tree entries used by the gathered values. If no common entries 9348 // found - not a shuffle. 9349 // Here we build a set of tree nodes for each gathered value and trying to 9350 // find the intersection between these sets. If we have at least one common 9351 // tree node for each gathered value - we have just a permutation of the 9352 // single vector. If we have 2 different sets, we're in situation where we 9353 // have a permutation of 2 input vectors. 9354 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 9355 DenseMap<Value *, int> UsedValuesEntry; 9356 for (Value *V : VL) { 9357 if (isConstant(V)) 9358 continue; 9359 // Build a list of tree entries where V is used. 9360 SmallPtrSet<const TreeEntry *, 4> VToTEs; 9361 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) { 9362 if (TEPtr == TE) 9363 continue; 9364 assert(any_of(TEPtr->Scalars, 9365 [&](Value *V) { return GatheredScalars.contains(V); }) && 9366 "Must contain at least single gathered value."); 9367 assert(TEPtr->UserTreeIndices.size() == 1 && 9368 "Expected only single user of a gather node."); 9369 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front(); 9370 9371 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp()); 9372 const Instruction *InsertPt = 9373 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator() 9374 : &getLastInstructionInBundle(UseEI.UserTE); 9375 if (TEInsertPt == InsertPt) { 9376 // If 2 gathers are operands of the same entry (regardless of whether 9377 // user is PHI or else), compare operands indices, use the earlier one 9378 // as the base. 9379 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx) 9380 continue; 9381 // If the user instruction is used for some reason in different 9382 // vectorized nodes - make it depend on index. 9383 if (TEUseEI.UserTE != UseEI.UserTE && 9384 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx) 9385 continue; 9386 } 9387 9388 // Check if the user node of the TE comes after user node of TEPtr, 9389 // otherwise TEPtr depends on TE. 9390 if ((TEInsertBlock != InsertPt->getParent() || 9391 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) && 9392 !CheckOrdering(InsertPt)) 9393 continue; 9394 VToTEs.insert(TEPtr); 9395 } 9396 if (const TreeEntry *VTE = getTreeEntry(V)) { 9397 Instruction &LastBundleInst = getLastInstructionInBundle(VTE); 9398 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst)) 9399 continue; 9400 auto It = MinBWs.find(VTE); 9401 // If vectorize node is demoted - do not match. 9402 if (It != MinBWs.end() && 9403 It->second.first != DL->getTypeSizeInBits(V->getType())) 9404 continue; 9405 VToTEs.insert(VTE); 9406 } 9407 if (VToTEs.empty()) 9408 continue; 9409 if (UsedTEs.empty()) { 9410 // The first iteration, just insert the list of nodes to vector. 9411 UsedTEs.push_back(VToTEs); 9412 UsedValuesEntry.try_emplace(V, 0); 9413 } else { 9414 // Need to check if there are any previously used tree nodes which use V. 9415 // If there are no such nodes, consider that we have another one input 9416 // vector. 9417 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 9418 unsigned Idx = 0; 9419 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 9420 // Do we have a non-empty intersection of previously listed tree entries 9421 // and tree entries using current V? 9422 set_intersect(VToTEs, Set); 9423 if (!VToTEs.empty()) { 9424 // Yes, write the new subset and continue analysis for the next 9425 // scalar. 9426 Set.swap(VToTEs); 9427 break; 9428 } 9429 VToTEs = SavedVToTEs; 9430 ++Idx; 9431 } 9432 // No non-empty intersection found - need to add a second set of possible 9433 // source vectors. 9434 if (Idx == UsedTEs.size()) { 9435 // If the number of input vectors is greater than 2 - not a permutation, 9436 // fallback to the regular gather. 9437 // TODO: support multiple reshuffled nodes. 9438 if (UsedTEs.size() == 2) 9439 continue; 9440 UsedTEs.push_back(SavedVToTEs); 9441 Idx = UsedTEs.size() - 1; 9442 } 9443 UsedValuesEntry.try_emplace(V, Idx); 9444 } 9445 } 9446 9447 if (UsedTEs.empty()) { 9448 Entries.clear(); 9449 return std::nullopt; 9450 } 9451 9452 unsigned VF = 0; 9453 if (UsedTEs.size() == 1) { 9454 // Keep the order to avoid non-determinism. 9455 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(), 9456 UsedTEs.front().end()); 9457 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9458 return TE1->Idx < TE2->Idx; 9459 }); 9460 // Try to find the perfect match in another gather node at first. 9461 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) { 9462 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars); 9463 }); 9464 if (It != FirstEntries.end() && 9465 ((*It)->getVectorFactor() == VL.size() || 9466 ((*It)->getVectorFactor() == TE->Scalars.size() && 9467 TE->ReuseShuffleIndices.size() == VL.size() && 9468 (*It)->isSame(TE->Scalars)))) { 9469 Entries.push_back(*It); 9470 if ((*It)->getVectorFactor() == VL.size()) { 9471 std::iota(std::next(Mask.begin(), Part * VL.size()), 9472 std::next(Mask.begin(), (Part + 1) * VL.size()), 0); 9473 } else { 9474 SmallVector<int> CommonMask = TE->getCommonMask(); 9475 copy(CommonMask, Mask.begin()); 9476 } 9477 // Clear undef scalars. 9478 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9479 if (isa<PoisonValue>(VL[I])) 9480 Mask[I] = PoisonMaskElem; 9481 return TargetTransformInfo::SK_PermuteSingleSrc; 9482 } 9483 // No perfect match, just shuffle, so choose the first tree node from the 9484 // tree. 9485 Entries.push_back(FirstEntries.front()); 9486 } else { 9487 // Try to find nodes with the same vector factor. 9488 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 9489 // Keep the order of tree nodes to avoid non-determinism. 9490 DenseMap<int, const TreeEntry *> VFToTE; 9491 for (const TreeEntry *TE : UsedTEs.front()) { 9492 unsigned VF = TE->getVectorFactor(); 9493 auto It = VFToTE.find(VF); 9494 if (It != VFToTE.end()) { 9495 if (It->second->Idx > TE->Idx) 9496 It->getSecond() = TE; 9497 continue; 9498 } 9499 VFToTE.try_emplace(VF, TE); 9500 } 9501 // Same, keep the order to avoid non-determinism. 9502 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(), 9503 UsedTEs.back().end()); 9504 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9505 return TE1->Idx < TE2->Idx; 9506 }); 9507 for (const TreeEntry *TE : SecondEntries) { 9508 auto It = VFToTE.find(TE->getVectorFactor()); 9509 if (It != VFToTE.end()) { 9510 VF = It->first; 9511 Entries.push_back(It->second); 9512 Entries.push_back(TE); 9513 break; 9514 } 9515 } 9516 // No 2 source vectors with the same vector factor - just choose 2 with max 9517 // index. 9518 if (Entries.empty()) { 9519 Entries.push_back( 9520 *std::max_element(UsedTEs.front().begin(), UsedTEs.front().end(), 9521 [](const TreeEntry *TE1, const TreeEntry *TE2) { 9522 return TE1->Idx < TE2->Idx; 9523 })); 9524 Entries.push_back(SecondEntries.front()); 9525 VF = std::max(Entries.front()->getVectorFactor(), 9526 Entries.back()->getVectorFactor()); 9527 } 9528 } 9529 9530 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof); 9531 // Checks if the 2 PHIs are compatible in terms of high possibility to be 9532 // vectorized. 9533 auto AreCompatiblePHIs = [&](Value *V, Value *V1) { 9534 auto *PHI = cast<PHINode>(V); 9535 auto *PHI1 = cast<PHINode>(V1); 9536 // Check that all incoming values are compatible/from same parent (if they 9537 // are instructions). 9538 // The incoming values are compatible if they all are constants, or 9539 // instruction with the same/alternate opcodes from the same basic block. 9540 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 9541 Value *In = PHI->getIncomingValue(I); 9542 Value *In1 = PHI1->getIncomingValue(I); 9543 if (isConstant(In) && isConstant(In1)) 9544 continue; 9545 if (!getSameOpcode({In, In1}, *TLI).getOpcode()) 9546 return false; 9547 if (cast<Instruction>(In)->getParent() != 9548 cast<Instruction>(In1)->getParent()) 9549 return false; 9550 } 9551 return true; 9552 }; 9553 // Check if the value can be ignored during analysis for shuffled gathers. 9554 // We suppose it is better to ignore instruction, which do not form splats, 9555 // are not vectorized/not extractelements (these instructions will be handled 9556 // by extractelements processing) or may form vector node in future. 9557 auto MightBeIgnored = [=](Value *V) { 9558 auto *I = dyn_cast<Instruction>(V); 9559 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) && 9560 !isVectorLikeInstWithConstOps(I) && 9561 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I); 9562 }; 9563 // Check that the neighbor instruction may form a full vector node with the 9564 // current instruction V. It is possible, if they have same/alternate opcode 9565 // and same parent basic block. 9566 auto NeighborMightBeIgnored = [&](Value *V, int Idx) { 9567 Value *V1 = VL[Idx]; 9568 bool UsedInSameVTE = false; 9569 auto It = UsedValuesEntry.find(V1); 9570 if (It != UsedValuesEntry.end()) 9571 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second; 9572 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE && 9573 getSameOpcode({V, V1}, *TLI).getOpcode() && 9574 cast<Instruction>(V)->getParent() == 9575 cast<Instruction>(V1)->getParent() && 9576 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1)); 9577 }; 9578 // Build a shuffle mask for better cost estimation and vector emission. 9579 SmallBitVector UsedIdxs(Entries.size()); 9580 SmallVector<std::pair<unsigned, int>> EntryLanes; 9581 for (int I = 0, E = VL.size(); I < E; ++I) { 9582 Value *V = VL[I]; 9583 auto It = UsedValuesEntry.find(V); 9584 if (It == UsedValuesEntry.end()) 9585 continue; 9586 // Do not try to shuffle scalars, if they are constants, or instructions 9587 // that can be vectorized as a result of the following vector build 9588 // vectorization. 9589 if (isConstant(V) || (MightBeIgnored(V) && 9590 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) || 9591 (I != E - 1 && NeighborMightBeIgnored(V, I + 1))))) 9592 continue; 9593 unsigned Idx = It->second; 9594 EntryLanes.emplace_back(Idx, I); 9595 UsedIdxs.set(Idx); 9596 } 9597 // Iterate through all shuffled scalars and select entries, which can be used 9598 // for final shuffle. 9599 SmallVector<const TreeEntry *> TempEntries; 9600 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) { 9601 if (!UsedIdxs.test(I)) 9602 continue; 9603 // Fix the entry number for the given scalar. If it is the first entry, set 9604 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes). 9605 // These indices are used when calculating final shuffle mask as the vector 9606 // offset. 9607 for (std::pair<unsigned, int> &Pair : EntryLanes) 9608 if (Pair.first == I) 9609 Pair.first = TempEntries.size(); 9610 TempEntries.push_back(Entries[I]); 9611 } 9612 Entries.swap(TempEntries); 9613 if (EntryLanes.size() == Entries.size() && 9614 !VL.equals(ArrayRef(TE->Scalars) 9615 .slice(Part * VL.size(), 9616 std::min<int>(VL.size(), TE->Scalars.size())))) { 9617 // We may have here 1 or 2 entries only. If the number of scalars is equal 9618 // to the number of entries, no need to do the analysis, it is not very 9619 // profitable. Since VL is not the same as TE->Scalars, it means we already 9620 // have some shuffles before. Cut off not profitable case. 9621 Entries.clear(); 9622 return std::nullopt; 9623 } 9624 // Build the final mask, check for the identity shuffle, if possible. 9625 bool IsIdentity = Entries.size() == 1; 9626 // Pair.first is the offset to the vector, while Pair.second is the index of 9627 // scalar in the list. 9628 for (const std::pair<unsigned, int> &Pair : EntryLanes) { 9629 unsigned Idx = Part * VL.size() + Pair.second; 9630 Mask[Idx] = Pair.first * VF + 9631 Entries[Pair.first]->findLaneForValue(VL[Pair.second]); 9632 IsIdentity &= Mask[Idx] == Pair.second; 9633 } 9634 switch (Entries.size()) { 9635 case 1: 9636 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2) 9637 return TargetTransformInfo::SK_PermuteSingleSrc; 9638 break; 9639 case 2: 9640 if (EntryLanes.size() > 2 || VL.size() <= 2) 9641 return TargetTransformInfo::SK_PermuteTwoSrc; 9642 break; 9643 default: 9644 break; 9645 } 9646 Entries.clear(); 9647 // Clear the corresponding mask elements. 9648 std::fill(std::next(Mask.begin(), Part * VL.size()), 9649 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem); 9650 return std::nullopt; 9651 } 9652 9653 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 9654 BoUpSLP::isGatherShuffledEntry( 9655 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 9656 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 9657 unsigned NumParts) { 9658 assert(NumParts > 0 && NumParts < VL.size() && 9659 "Expected positive number of registers."); 9660 Entries.clear(); 9661 // No need to check for the topmost gather node. 9662 if (TE == VectorizableTree.front().get()) 9663 return {}; 9664 Mask.assign(VL.size(), PoisonMaskElem); 9665 assert(TE->UserTreeIndices.size() == 1 && 9666 "Expected only single user of the gather node."); 9667 assert(VL.size() % NumParts == 0 && 9668 "Number of scalars must be divisible by NumParts."); 9669 unsigned SliceSize = VL.size() / NumParts; 9670 SmallVector<std::optional<TTI::ShuffleKind>> Res; 9671 for (unsigned Part = 0; Part < NumParts; ++Part) { 9672 ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize); 9673 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back(); 9674 std::optional<TTI::ShuffleKind> SubRes = 9675 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part); 9676 if (!SubRes) 9677 SubEntries.clear(); 9678 Res.push_back(SubRes); 9679 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc && 9680 SubEntries.front()->getVectorFactor() == VL.size() && 9681 (SubEntries.front()->isSame(TE->Scalars) || 9682 SubEntries.front()->isSame(VL))) { 9683 SmallVector<const TreeEntry *> LocalSubEntries; 9684 LocalSubEntries.swap(SubEntries); 9685 Entries.clear(); 9686 Res.clear(); 9687 std::iota(Mask.begin(), Mask.end(), 0); 9688 // Clear undef scalars. 9689 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9690 if (isa<PoisonValue>(VL[I])) 9691 Mask[I] = PoisonMaskElem; 9692 Entries.emplace_back(1, LocalSubEntries.front()); 9693 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc); 9694 return Res; 9695 } 9696 } 9697 if (all_of(Res, 9698 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) { 9699 Entries.clear(); 9700 return {}; 9701 } 9702 return Res; 9703 } 9704 9705 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, 9706 bool ForPoisonSrc) const { 9707 // Find the type of the operands in VL. 9708 Type *ScalarTy = VL[0]->getType(); 9709 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 9710 ScalarTy = SI->getValueOperand()->getType(); 9711 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 9712 bool DuplicateNonConst = false; 9713 // Find the cost of inserting/extracting values from the vector. 9714 // Check if the same elements are inserted several times and count them as 9715 // shuffle candidates. 9716 APInt ShuffledElements = APInt::getZero(VL.size()); 9717 DenseSet<Value *> UniqueElements; 9718 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9719 InstructionCost Cost; 9720 auto EstimateInsertCost = [&](unsigned I, Value *V) { 9721 if (!ForPoisonSrc) 9722 Cost += 9723 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 9724 I, Constant::getNullValue(VecTy), V); 9725 }; 9726 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 9727 Value *V = VL[I]; 9728 // No need to shuffle duplicates for constants. 9729 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) { 9730 ShuffledElements.setBit(I); 9731 continue; 9732 } 9733 if (!UniqueElements.insert(V).second) { 9734 DuplicateNonConst = true; 9735 ShuffledElements.setBit(I); 9736 continue; 9737 } 9738 EstimateInsertCost(I, V); 9739 } 9740 if (ForPoisonSrc) 9741 Cost = 9742 TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true, 9743 /*Extract*/ false, CostKind); 9744 if (DuplicateNonConst) 9745 Cost += 9746 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 9747 return Cost; 9748 } 9749 9750 // Perform operand reordering on the instructions in VL and return the reordered 9751 // operands in Left and Right. 9752 void BoUpSLP::reorderInputsAccordingToOpcode( 9753 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 9754 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 9755 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) { 9756 if (VL.empty()) 9757 return; 9758 VLOperands Ops(VL, TLI, DL, SE, R); 9759 // Reorder the operands in place. 9760 Ops.reorder(); 9761 Left = Ops.getVL(0); 9762 Right = Ops.getVL(1); 9763 } 9764 9765 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { 9766 auto &Res = EntryToLastInstruction.FindAndConstruct(E); 9767 if (Res.second) 9768 return *Res.second; 9769 // Get the basic block this bundle is in. All instructions in the bundle 9770 // should be in this block (except for extractelement-like instructions with 9771 // constant indeces). 9772 auto *Front = E->getMainOp(); 9773 auto *BB = Front->getParent(); 9774 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 9775 if (E->getOpcode() == Instruction::GetElementPtr && 9776 !isa<GetElementPtrInst>(V)) 9777 return true; 9778 auto *I = cast<Instruction>(V); 9779 return !E->isOpcodeOrAlt(I) || I->getParent() == BB || 9780 isVectorLikeInstWithConstOps(I); 9781 })); 9782 9783 auto FindLastInst = [&]() { 9784 Instruction *LastInst = Front; 9785 for (Value *V : E->Scalars) { 9786 auto *I = dyn_cast<Instruction>(V); 9787 if (!I) 9788 continue; 9789 if (LastInst->getParent() == I->getParent()) { 9790 if (LastInst->comesBefore(I)) 9791 LastInst = I; 9792 continue; 9793 } 9794 assert(((E->getOpcode() == Instruction::GetElementPtr && 9795 !isa<GetElementPtrInst>(I)) || 9796 (isVectorLikeInstWithConstOps(LastInst) && 9797 isVectorLikeInstWithConstOps(I))) && 9798 "Expected vector-like or non-GEP in GEP node insts only."); 9799 if (!DT->isReachableFromEntry(LastInst->getParent())) { 9800 LastInst = I; 9801 continue; 9802 } 9803 if (!DT->isReachableFromEntry(I->getParent())) 9804 continue; 9805 auto *NodeA = DT->getNode(LastInst->getParent()); 9806 auto *NodeB = DT->getNode(I->getParent()); 9807 assert(NodeA && "Should only process reachable instructions"); 9808 assert(NodeB && "Should only process reachable instructions"); 9809 assert((NodeA == NodeB) == 9810 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9811 "Different nodes should have different DFS numbers"); 9812 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) 9813 LastInst = I; 9814 } 9815 BB = LastInst->getParent(); 9816 return LastInst; 9817 }; 9818 9819 auto FindFirstInst = [&]() { 9820 Instruction *FirstInst = Front; 9821 for (Value *V : E->Scalars) { 9822 auto *I = dyn_cast<Instruction>(V); 9823 if (!I) 9824 continue; 9825 if (FirstInst->getParent() == I->getParent()) { 9826 if (I->comesBefore(FirstInst)) 9827 FirstInst = I; 9828 continue; 9829 } 9830 assert(((E->getOpcode() == Instruction::GetElementPtr && 9831 !isa<GetElementPtrInst>(I)) || 9832 (isVectorLikeInstWithConstOps(FirstInst) && 9833 isVectorLikeInstWithConstOps(I))) && 9834 "Expected vector-like or non-GEP in GEP node insts only."); 9835 if (!DT->isReachableFromEntry(FirstInst->getParent())) { 9836 FirstInst = I; 9837 continue; 9838 } 9839 if (!DT->isReachableFromEntry(I->getParent())) 9840 continue; 9841 auto *NodeA = DT->getNode(FirstInst->getParent()); 9842 auto *NodeB = DT->getNode(I->getParent()); 9843 assert(NodeA && "Should only process reachable instructions"); 9844 assert(NodeB && "Should only process reachable instructions"); 9845 assert((NodeA == NodeB) == 9846 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9847 "Different nodes should have different DFS numbers"); 9848 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn()) 9849 FirstInst = I; 9850 } 9851 return FirstInst; 9852 }; 9853 9854 // Set the insert point to the beginning of the basic block if the entry 9855 // should not be scheduled. 9856 if (doesNotNeedToSchedule(E->Scalars) || 9857 (E->State != TreeEntry::NeedToGather && 9858 all_of(E->Scalars, isVectorLikeInstWithConstOps))) { 9859 if ((E->getOpcode() == Instruction::GetElementPtr && 9860 any_of(E->Scalars, 9861 [](Value *V) { 9862 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); 9863 })) || 9864 all_of(E->Scalars, [](Value *V) { 9865 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V); 9866 })) 9867 Res.second = FindLastInst(); 9868 else 9869 Res.second = FindFirstInst(); 9870 return *Res.second; 9871 } 9872 9873 // Find the last instruction. The common case should be that BB has been 9874 // scheduled, and the last instruction is VL.back(). So we start with 9875 // VL.back() and iterate over schedule data until we reach the end of the 9876 // bundle. The end of the bundle is marked by null ScheduleData. 9877 if (BlocksSchedules.count(BB)) { 9878 Value *V = E->isOneOf(E->Scalars.back()); 9879 if (doesNotNeedToBeScheduled(V)) 9880 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 9881 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 9882 if (Bundle && Bundle->isPartOfBundle()) 9883 for (; Bundle; Bundle = Bundle->NextInBundle) 9884 if (Bundle->OpValue == Bundle->Inst) 9885 Res.second = Bundle->Inst; 9886 } 9887 9888 // LastInst can still be null at this point if there's either not an entry 9889 // for BB in BlocksSchedules or there's no ScheduleData available for 9890 // VL.back(). This can be the case if buildTree_rec aborts for various 9891 // reasons (e.g., the maximum recursion depth is reached, the maximum region 9892 // size is reached, etc.). ScheduleData is initialized in the scheduling 9893 // "dry-run". 9894 // 9895 // If this happens, we can still find the last instruction by brute force. We 9896 // iterate forwards from Front (inclusive) until we either see all 9897 // instructions in the bundle or reach the end of the block. If Front is the 9898 // last instruction in program order, LastInst will be set to Front, and we 9899 // will visit all the remaining instructions in the block. 9900 // 9901 // One of the reasons we exit early from buildTree_rec is to place an upper 9902 // bound on compile-time. Thus, taking an additional compile-time hit here is 9903 // not ideal. However, this should be exceedingly rare since it requires that 9904 // we both exit early from buildTree_rec and that the bundle be out-of-order 9905 // (causing us to iterate all the way to the end of the block). 9906 if (!Res.second) 9907 Res.second = FindLastInst(); 9908 assert(Res.second && "Failed to find last instruction in bundle"); 9909 return *Res.second; 9910 } 9911 9912 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 9913 auto *Front = E->getMainOp(); 9914 Instruction *LastInst = &getLastInstructionInBundle(E); 9915 assert(LastInst && "Failed to find last instruction in bundle"); 9916 BasicBlock::iterator LastInstIt = LastInst->getIterator(); 9917 // If the instruction is PHI, set the insert point after all the PHIs. 9918 bool IsPHI = isa<PHINode>(LastInst); 9919 if (IsPHI) 9920 LastInstIt = LastInst->getParent()->getFirstNonPHIIt(); 9921 if (IsPHI || (E->State != TreeEntry::NeedToGather && 9922 doesNotNeedToSchedule(E->Scalars))) { 9923 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt); 9924 } else { 9925 // Set the insertion point after the last instruction in the bundle. Set the 9926 // debug location to Front. 9927 Builder.SetInsertPoint( 9928 LastInst->getParent(), 9929 LastInst->getNextNonDebugInstruction()->getIterator()); 9930 } 9931 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 9932 } 9933 9934 Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root) { 9935 // List of instructions/lanes from current block and/or the blocks which are 9936 // part of the current loop. These instructions will be inserted at the end to 9937 // make it possible to optimize loops and hoist invariant instructions out of 9938 // the loops body with better chances for success. 9939 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 9940 SmallSet<int, 4> PostponedIndices; 9941 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 9942 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 9943 SmallPtrSet<BasicBlock *, 4> Visited; 9944 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 9945 InsertBB = InsertBB->getSinglePredecessor(); 9946 return InsertBB && InsertBB == InstBB; 9947 }; 9948 for (int I = 0, E = VL.size(); I < E; ++I) { 9949 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 9950 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 9951 getTreeEntry(Inst) || 9952 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) && 9953 PostponedIndices.insert(I).second) 9954 PostponedInsts.emplace_back(Inst, I); 9955 } 9956 9957 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 9958 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 9959 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 9960 if (!InsElt) 9961 return Vec; 9962 GatherShuffleExtractSeq.insert(InsElt); 9963 CSEBlocks.insert(InsElt->getParent()); 9964 // Add to our 'need-to-extract' list. 9965 if (isa<Instruction>(V)) { 9966 if (TreeEntry *Entry = getTreeEntry(V)) { 9967 // Find which lane we need to extract. 9968 unsigned FoundLane = Entry->findLaneForValue(V); 9969 ExternalUses.emplace_back(V, InsElt, FoundLane); 9970 } 9971 } 9972 return Vec; 9973 }; 9974 Value *Val0 = 9975 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 9976 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 9977 Value *Vec = Root ? Root : PoisonValue::get(VecTy); 9978 SmallVector<int> NonConsts; 9979 // Insert constant values at first. 9980 for (int I = 0, E = VL.size(); I < E; ++I) { 9981 if (PostponedIndices.contains(I)) 9982 continue; 9983 if (!isConstant(VL[I])) { 9984 NonConsts.push_back(I); 9985 continue; 9986 } 9987 if (Root) { 9988 if (!isa<UndefValue>(VL[I])) { 9989 NonConsts.push_back(I); 9990 continue; 9991 } 9992 if (isa<PoisonValue>(VL[I])) 9993 continue; 9994 if (auto *SV = dyn_cast<ShuffleVectorInst>(Root)) { 9995 if (SV->getMaskValue(I) == PoisonMaskElem) 9996 continue; 9997 } 9998 } 9999 Vec = CreateInsertElement(Vec, VL[I], I); 10000 } 10001 // Insert non-constant values. 10002 for (int I : NonConsts) 10003 Vec = CreateInsertElement(Vec, VL[I], I); 10004 // Append instructions, which are/may be part of the loop, in the end to make 10005 // it possible to hoist non-loop-based instructions. 10006 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 10007 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 10008 10009 return Vec; 10010 } 10011 10012 /// Merges shuffle masks and emits final shuffle instruction, if required. It 10013 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 10014 /// when the actual shuffle instruction is generated only if this is actually 10015 /// required. Otherwise, the shuffle instruction emission is delayed till the 10016 /// end of the process, to reduce the number of emitted instructions and further 10017 /// analysis/transformations. 10018 /// The class also will look through the previously emitted shuffle instructions 10019 /// and properly mark indices in mask as undef. 10020 /// For example, given the code 10021 /// \code 10022 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 10023 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 10024 /// \endcode 10025 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 10026 /// look through %s1 and %s2 and emit 10027 /// \code 10028 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10029 /// \endcode 10030 /// instead. 10031 /// If 2 operands are of different size, the smallest one will be resized and 10032 /// the mask recalculated properly. 10033 /// For example, given the code 10034 /// \code 10035 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 10036 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 10037 /// \endcode 10038 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 10039 /// look through %s1 and %s2 and emit 10040 /// \code 10041 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10042 /// \endcode 10043 /// instead. 10044 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis { 10045 bool IsFinalized = false; 10046 /// Combined mask for all applied operands and masks. It is built during 10047 /// analysis and actual emission of shuffle vector instructions. 10048 SmallVector<int> CommonMask; 10049 /// List of operands for the shuffle vector instruction. It hold at max 2 10050 /// operands, if the 3rd is going to be added, the first 2 are combined into 10051 /// shuffle with \p CommonMask mask, the first operand sets to be the 10052 /// resulting shuffle and the second operand sets to be the newly added 10053 /// operand. The \p CommonMask is transformed in the proper way after that. 10054 SmallVector<Value *, 2> InVectors; 10055 IRBuilderBase &Builder; 10056 BoUpSLP &R; 10057 10058 class ShuffleIRBuilder { 10059 IRBuilderBase &Builder; 10060 /// Holds all of the instructions that we gathered. 10061 SetVector<Instruction *> &GatherShuffleExtractSeq; 10062 /// A list of blocks that we are going to CSE. 10063 DenseSet<BasicBlock *> &CSEBlocks; 10064 10065 public: 10066 ShuffleIRBuilder(IRBuilderBase &Builder, 10067 SetVector<Instruction *> &GatherShuffleExtractSeq, 10068 DenseSet<BasicBlock *> &CSEBlocks) 10069 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq), 10070 CSEBlocks(CSEBlocks) {} 10071 ~ShuffleIRBuilder() = default; 10072 /// Creates shufflevector for the 2 operands with the given mask. 10073 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) { 10074 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask); 10075 if (auto *I = dyn_cast<Instruction>(Vec)) { 10076 GatherShuffleExtractSeq.insert(I); 10077 CSEBlocks.insert(I->getParent()); 10078 } 10079 return Vec; 10080 } 10081 /// Creates permutation of the single vector operand with the given mask, if 10082 /// it is not identity mask. 10083 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) { 10084 if (Mask.empty()) 10085 return V1; 10086 unsigned VF = Mask.size(); 10087 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10088 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF)) 10089 return V1; 10090 Value *Vec = Builder.CreateShuffleVector(V1, Mask); 10091 if (auto *I = dyn_cast<Instruction>(Vec)) { 10092 GatherShuffleExtractSeq.insert(I); 10093 CSEBlocks.insert(I->getParent()); 10094 } 10095 return Vec; 10096 } 10097 Value *createIdentity(Value *V) { return V; } 10098 Value *createPoison(Type *Ty, unsigned VF) { 10099 return PoisonValue::get(FixedVectorType::get(Ty, VF)); 10100 } 10101 /// Resizes 2 input vector to match the sizes, if the they are not equal 10102 /// yet. The smallest vector is resized to the size of the larger vector. 10103 void resizeToMatch(Value *&V1, Value *&V2) { 10104 if (V1->getType() == V2->getType()) 10105 return; 10106 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10107 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 10108 int VF = std::max(V1VF, V2VF); 10109 int MinVF = std::min(V1VF, V2VF); 10110 SmallVector<int> IdentityMask(VF, PoisonMaskElem); 10111 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF), 10112 0); 10113 Value *&Op = MinVF == V1VF ? V1 : V2; 10114 Op = Builder.CreateShuffleVector(Op, IdentityMask); 10115 if (auto *I = dyn_cast<Instruction>(Op)) { 10116 GatherShuffleExtractSeq.insert(I); 10117 CSEBlocks.insert(I->getParent()); 10118 } 10119 if (MinVF == V1VF) 10120 V1 = Op; 10121 else 10122 V2 = Op; 10123 } 10124 }; 10125 10126 /// Smart shuffle instruction emission, walks through shuffles trees and 10127 /// tries to find the best matching vector for the actual shuffle 10128 /// instruction. 10129 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) { 10130 assert(V1 && "Expected at least one vector value."); 10131 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq, 10132 R.CSEBlocks); 10133 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask, 10134 ShuffleBuilder); 10135 } 10136 10137 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 10138 /// shuffle emission. 10139 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 10140 ArrayRef<int> Mask) { 10141 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10142 if (Mask[Idx] != PoisonMaskElem) 10143 CommonMask[Idx] = Idx; 10144 } 10145 10146 public: 10147 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R) 10148 : Builder(Builder), R(R) {} 10149 10150 /// Adjusts extractelements after reusing them. 10151 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 10152 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 10153 unsigned NumParts, bool &UseVecBaseAsInput) { 10154 UseVecBaseAsInput = false; 10155 SmallPtrSet<Value *, 4> UniqueBases; 10156 Value *VecBase = nullptr; 10157 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10158 int Idx = Mask[I]; 10159 if (Idx == PoisonMaskElem) 10160 continue; 10161 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10162 VecBase = EI->getVectorOperand(); 10163 if (const TreeEntry *TE = R.getTreeEntry(VecBase)) 10164 VecBase = TE->VectorizedValue; 10165 assert(VecBase && "Expected vectorized value."); 10166 UniqueBases.insert(VecBase); 10167 // If the only one use is vectorized - can delete the extractelement 10168 // itself. 10169 if (!EI->hasOneUse() || any_of(EI->users(), [&](User *U) { 10170 return !R.ScalarToTreeEntry.count(U); 10171 })) 10172 continue; 10173 R.eraseInstruction(EI); 10174 } 10175 if (NumParts == 1 || UniqueBases.size() == 1) 10176 return VecBase; 10177 UseVecBaseAsInput = true; 10178 auto TransformToIdentity = [](MutableArrayRef<int> Mask) { 10179 for (auto [I, Idx] : enumerate(Mask)) 10180 if (Idx != PoisonMaskElem) 10181 Idx = I; 10182 }; 10183 // Perform multi-register vector shuffle, joining them into a single virtual 10184 // long vector. 10185 // Need to shuffle each part independently and then insert all this parts 10186 // into a long virtual vector register, forming the original vector. 10187 Value *Vec = nullptr; 10188 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10189 unsigned SliceSize = E->Scalars.size() / NumParts; 10190 for (unsigned Part = 0; Part < NumParts; ++Part) { 10191 ArrayRef<Value *> VL = 10192 ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize); 10193 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 10194 constexpr int MaxBases = 2; 10195 SmallVector<Value *, MaxBases> Bases(MaxBases); 10196 #ifndef NDEBUG 10197 int PrevSize = 0; 10198 #endif // NDEBUG 10199 for (const auto [I, V]: enumerate(VL)) { 10200 if (SubMask[I] == PoisonMaskElem) 10201 continue; 10202 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand(); 10203 if (const TreeEntry *TE = R.getTreeEntry(VecOp)) 10204 VecOp = TE->VectorizedValue; 10205 assert(VecOp && "Expected vectorized value."); 10206 const int Size = 10207 cast<FixedVectorType>(VecOp->getType())->getNumElements(); 10208 #ifndef NDEBUG 10209 assert((PrevSize == Size || PrevSize == 0) && 10210 "Expected vectors of the same size."); 10211 PrevSize = Size; 10212 #endif // NDEBUG 10213 Bases[SubMask[I] < Size ? 0 : 1] = VecOp; 10214 } 10215 if (!Bases.front()) 10216 continue; 10217 Value *SubVec; 10218 if (Bases.back()) { 10219 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask); 10220 TransformToIdentity(SubMask); 10221 } else { 10222 SubVec = Bases.front(); 10223 } 10224 if (!Vec) { 10225 Vec = SubVec; 10226 assert((Part == 0 || all_of(seq<unsigned>(0, Part), 10227 [&](unsigned P) { 10228 ArrayRef<int> SubMask = 10229 Mask.slice(P * SliceSize, SliceSize); 10230 return all_of(SubMask, [](int Idx) { 10231 return Idx == PoisonMaskElem; 10232 }); 10233 })) && 10234 "Expected first part or all previous parts masked."); 10235 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10236 } else { 10237 unsigned VF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10238 if (Vec->getType() != SubVec->getType()) { 10239 unsigned SubVecVF = 10240 cast<FixedVectorType>(SubVec->getType())->getNumElements(); 10241 VF = std::max(VF, SubVecVF); 10242 } 10243 // Adjust SubMask. 10244 for (auto [I, Idx] : enumerate(SubMask)) 10245 if (Idx != PoisonMaskElem) 10246 Idx += VF; 10247 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10248 Vec = createShuffle(Vec, SubVec, VecMask); 10249 TransformToIdentity(VecMask); 10250 } 10251 } 10252 copy(VecMask, Mask.begin()); 10253 return Vec; 10254 } 10255 /// Checks if the specified entry \p E needs to be delayed because of its 10256 /// dependency nodes. 10257 std::optional<Value *> 10258 needToDelay(const TreeEntry *E, 10259 ArrayRef<SmallVector<const TreeEntry *>> Deps) const { 10260 // No need to delay emission if all deps are ready. 10261 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) { 10262 return all_of( 10263 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; }); 10264 })) 10265 return std::nullopt; 10266 // Postpone gather emission, will be emitted after the end of the 10267 // process to keep correct order. 10268 auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(), 10269 E->getVectorFactor()); 10270 return Builder.CreateAlignedLoad( 10271 VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())), 10272 MaybeAlign()); 10273 } 10274 /// Adds 2 input vectors (in form of tree entries) and the mask for their 10275 /// shuffling. 10276 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 10277 add(E1.VectorizedValue, E2.VectorizedValue, Mask); 10278 } 10279 /// Adds single input vector (in form of tree entry) and the mask for its 10280 /// shuffling. 10281 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 10282 add(E1.VectorizedValue, Mask); 10283 } 10284 /// Adds 2 input vectors and the mask for their shuffling. 10285 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 10286 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors."); 10287 if (InVectors.empty()) { 10288 InVectors.push_back(V1); 10289 InVectors.push_back(V2); 10290 CommonMask.assign(Mask.begin(), Mask.end()); 10291 return; 10292 } 10293 Value *Vec = InVectors.front(); 10294 if (InVectors.size() == 2) { 10295 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10296 transformMaskAfterShuffle(CommonMask, CommonMask); 10297 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() != 10298 Mask.size()) { 10299 Vec = createShuffle(Vec, nullptr, CommonMask); 10300 transformMaskAfterShuffle(CommonMask, CommonMask); 10301 } 10302 V1 = createShuffle(V1, V2, Mask); 10303 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10304 if (Mask[Idx] != PoisonMaskElem) 10305 CommonMask[Idx] = Idx + Sz; 10306 InVectors.front() = Vec; 10307 if (InVectors.size() == 2) 10308 InVectors.back() = V1; 10309 else 10310 InVectors.push_back(V1); 10311 } 10312 /// Adds another one input vector and the mask for the shuffling. 10313 void add(Value *V1, ArrayRef<int> Mask, bool = false) { 10314 if (InVectors.empty()) { 10315 if (!isa<FixedVectorType>(V1->getType())) { 10316 V1 = createShuffle(V1, nullptr, CommonMask); 10317 CommonMask.assign(Mask.size(), PoisonMaskElem); 10318 transformMaskAfterShuffle(CommonMask, Mask); 10319 } 10320 InVectors.push_back(V1); 10321 CommonMask.assign(Mask.begin(), Mask.end()); 10322 return; 10323 } 10324 const auto *It = find(InVectors, V1); 10325 if (It == InVectors.end()) { 10326 if (InVectors.size() == 2 || 10327 InVectors.front()->getType() != V1->getType() || 10328 !isa<FixedVectorType>(V1->getType())) { 10329 Value *V = InVectors.front(); 10330 if (InVectors.size() == 2) { 10331 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10332 transformMaskAfterShuffle(CommonMask, CommonMask); 10333 } else if (cast<FixedVectorType>(V->getType())->getNumElements() != 10334 CommonMask.size()) { 10335 V = createShuffle(InVectors.front(), nullptr, CommonMask); 10336 transformMaskAfterShuffle(CommonMask, CommonMask); 10337 } 10338 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10339 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem) 10340 CommonMask[Idx] = 10341 V->getType() != V1->getType() 10342 ? Idx + Sz 10343 : Mask[Idx] + cast<FixedVectorType>(V1->getType()) 10344 ->getNumElements(); 10345 if (V->getType() != V1->getType()) 10346 V1 = createShuffle(V1, nullptr, Mask); 10347 InVectors.front() = V; 10348 if (InVectors.size() == 2) 10349 InVectors.back() = V1; 10350 else 10351 InVectors.push_back(V1); 10352 return; 10353 } 10354 // Check if second vector is required if the used elements are already 10355 // used from the first one. 10356 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10357 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) { 10358 InVectors.push_back(V1); 10359 break; 10360 } 10361 } 10362 int VF = CommonMask.size(); 10363 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 10364 VF = FTy->getNumElements(); 10365 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10366 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 10367 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF); 10368 } 10369 /// Adds another one input vector and the mask for the shuffling. 10370 void addOrdered(Value *V1, ArrayRef<unsigned> Order) { 10371 SmallVector<int> NewMask; 10372 inversePermutation(Order, NewMask); 10373 add(V1, NewMask); 10374 } 10375 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 10376 Value *Root = nullptr) { 10377 return R.gather(VL, Root); 10378 } 10379 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); } 10380 /// Finalize emission of the shuffles. 10381 /// \param Action the action (if any) to be performed before final applying of 10382 /// the \p ExtMask mask. 10383 Value * 10384 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 10385 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 10386 IsFinalized = true; 10387 if (Action) { 10388 Value *Vec = InVectors.front(); 10389 if (InVectors.size() == 2) { 10390 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10391 InVectors.pop_back(); 10392 } else { 10393 Vec = createShuffle(Vec, nullptr, CommonMask); 10394 } 10395 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10396 if (CommonMask[Idx] != PoisonMaskElem) 10397 CommonMask[Idx] = Idx; 10398 assert(VF > 0 && 10399 "Expected vector length for the final value before action."); 10400 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10401 if (VecVF < VF) { 10402 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 10403 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0); 10404 Vec = createShuffle(Vec, nullptr, ResizeMask); 10405 } 10406 Action(Vec, CommonMask); 10407 InVectors.front() = Vec; 10408 } 10409 if (!ExtMask.empty()) { 10410 if (CommonMask.empty()) { 10411 CommonMask.assign(ExtMask.begin(), ExtMask.end()); 10412 } else { 10413 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 10414 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 10415 if (ExtMask[I] == PoisonMaskElem) 10416 continue; 10417 NewMask[I] = CommonMask[ExtMask[I]]; 10418 } 10419 CommonMask.swap(NewMask); 10420 } 10421 } 10422 if (CommonMask.empty()) { 10423 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 10424 return InVectors.front(); 10425 } 10426 if (InVectors.size() == 2) 10427 return createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10428 return createShuffle(InVectors.front(), nullptr, CommonMask); 10429 } 10430 10431 ~ShuffleInstructionBuilder() { 10432 assert((IsFinalized || CommonMask.empty()) && 10433 "Shuffle construction must be finalized."); 10434 } 10435 }; 10436 10437 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, 10438 bool PostponedPHIs) { 10439 ValueList &VL = E->getOperand(NodeIdx); 10440 if (E->State == TreeEntry::PossibleStridedVectorize && 10441 !E->ReorderIndices.empty()) { 10442 SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end()); 10443 reorderScalars(VL, Mask); 10444 } 10445 const unsigned VF = VL.size(); 10446 InstructionsState S = getSameOpcode(VL, *TLI); 10447 // Special processing for GEPs bundle, which may include non-gep values. 10448 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) { 10449 const auto *It = 10450 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 10451 if (It != VL.end()) 10452 S = getSameOpcode(*It, *TLI); 10453 } 10454 if (S.getOpcode()) { 10455 auto CheckSameVE = [&](const TreeEntry *VE) { 10456 return VE->isSame(VL) && 10457 (any_of(VE->UserTreeIndices, 10458 [E, NodeIdx](const EdgeInfo &EI) { 10459 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10460 }) || 10461 any_of(VectorizableTree, 10462 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) { 10463 return TE->isOperandGatherNode({E, NodeIdx}) && 10464 VE->isSame(TE->Scalars); 10465 })); 10466 }; 10467 TreeEntry *VE = getTreeEntry(S.OpValue); 10468 bool IsSameVE = VE && CheckSameVE(VE); 10469 if (!IsSameVE) { 10470 auto It = MultiNodeScalars.find(S.OpValue); 10471 if (It != MultiNodeScalars.end()) { 10472 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) { 10473 return TE != VE && CheckSameVE(TE); 10474 }); 10475 if (I != It->getSecond().end()) { 10476 VE = *I; 10477 IsSameVE = true; 10478 } 10479 } 10480 } 10481 if (IsSameVE) { 10482 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) { 10483 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 10484 ShuffleBuilder.add(V, Mask); 10485 return ShuffleBuilder.finalize(std::nullopt); 10486 }; 10487 Value *V = vectorizeTree(VE, PostponedPHIs); 10488 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 10489 if (!VE->ReuseShuffleIndices.empty()) { 10490 // Reshuffle to get only unique values. 10491 // If some of the scalars are duplicated in the vectorization 10492 // tree entry, we do not vectorize them but instead generate a 10493 // mask for the reuses. But if there are several users of the 10494 // same entry, they may have different vectorization factors. 10495 // This is especially important for PHI nodes. In this case, we 10496 // need to adapt the resulting instruction for the user 10497 // vectorization factor and have to reshuffle it again to take 10498 // only unique elements of the vector. Without this code the 10499 // function incorrectly returns reduced vector instruction with 10500 // the same elements, not with the unique ones. 10501 10502 // block: 10503 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 10504 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 10505 // ... (use %2) 10506 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 10507 // br %block 10508 SmallVector<int> UniqueIdxs(VF, PoisonMaskElem); 10509 SmallSet<int, 4> UsedIdxs; 10510 int Pos = 0; 10511 for (int Idx : VE->ReuseShuffleIndices) { 10512 if (Idx != static_cast<int>(VF) && Idx != PoisonMaskElem && 10513 UsedIdxs.insert(Idx).second) 10514 UniqueIdxs[Idx] = Pos; 10515 ++Pos; 10516 } 10517 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 10518 "less than original vector size."); 10519 UniqueIdxs.append(VF - UsedIdxs.size(), PoisonMaskElem); 10520 V = FinalShuffle(V, UniqueIdxs); 10521 } else { 10522 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 10523 "Expected vectorization factor less " 10524 "than original vector size."); 10525 SmallVector<int> UniformMask(VF, 0); 10526 std::iota(UniformMask.begin(), UniformMask.end(), 0); 10527 V = FinalShuffle(V, UniformMask); 10528 } 10529 } 10530 // Need to update the operand gather node, if actually the operand is not a 10531 // vectorized node, but the buildvector/gather node, which matches one of 10532 // the vectorized nodes. 10533 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) { 10534 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10535 }) == VE->UserTreeIndices.end()) { 10536 auto *It = find_if( 10537 VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 10538 return TE->State == TreeEntry::NeedToGather && 10539 TE->UserTreeIndices.front().UserTE == E && 10540 TE->UserTreeIndices.front().EdgeIdx == NodeIdx; 10541 }); 10542 assert(It != VectorizableTree.end() && "Expected gather node operand."); 10543 (*It)->VectorizedValue = V; 10544 } 10545 return V; 10546 } 10547 } 10548 10549 // Find the corresponding gather entry and vectorize it. 10550 // Allows to be more accurate with tree/graph transformations, checks for the 10551 // correctness of the transformations in many cases. 10552 auto *I = find_if(VectorizableTree, 10553 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) { 10554 return TE->isOperandGatherNode({E, NodeIdx}); 10555 }); 10556 assert(I != VectorizableTree.end() && "Gather node is not in the graph."); 10557 assert(I->get()->UserTreeIndices.size() == 1 && 10558 "Expected only single user for the gather node."); 10559 assert(I->get()->isSame(VL) && "Expected same list of scalars."); 10560 return vectorizeTree(I->get(), PostponedPHIs); 10561 } 10562 10563 template <typename BVTy, typename ResTy, typename... Args> 10564 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) { 10565 assert(E->State == TreeEntry::NeedToGather && "Expected gather node."); 10566 unsigned VF = E->getVectorFactor(); 10567 10568 bool NeedFreeze = false; 10569 SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(), 10570 E->ReuseShuffleIndices.end()); 10571 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end()); 10572 // Build a mask out of the reorder indices and reorder scalars per this 10573 // mask. 10574 SmallVector<int> ReorderMask; 10575 inversePermutation(E->ReorderIndices, ReorderMask); 10576 if (!ReorderMask.empty()) 10577 reorderScalars(GatheredScalars, ReorderMask); 10578 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF) { 10579 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) { 10580 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10581 })) 10582 return false; 10583 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE; 10584 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx; 10585 if (UserTE->getNumOperands() != 2) 10586 return false; 10587 auto *It = 10588 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) { 10589 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) { 10590 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx; 10591 }) != TE->UserTreeIndices.end(); 10592 }); 10593 if (It == VectorizableTree.end()) 10594 return false; 10595 int Idx; 10596 if ((Mask.size() < InputVF && 10597 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) && 10598 Idx == 0) || 10599 (Mask.size() == InputVF && 10600 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) { 10601 std::iota(Mask.begin(), Mask.end(), 0); 10602 } else { 10603 unsigned I = 10604 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; }); 10605 std::fill(Mask.begin(), Mask.end(), I); 10606 } 10607 return true; 10608 }; 10609 BVTy ShuffleBuilder(Params...); 10610 ResTy Res = ResTy(); 10611 SmallVector<int> Mask; 10612 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem); 10613 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles; 10614 Value *ExtractVecBase = nullptr; 10615 bool UseVecBaseAsInput = false; 10616 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles; 10617 SmallVector<SmallVector<const TreeEntry *>> Entries; 10618 Type *ScalarTy = GatheredScalars.front()->getType(); 10619 auto *VecTy = FixedVectorType::get(ScalarTy, GatheredScalars.size()); 10620 unsigned NumParts = TTI->getNumberOfParts(VecTy); 10621 if (NumParts == 0 || NumParts >= GatheredScalars.size()) 10622 NumParts = 1; 10623 if (!all_of(GatheredScalars, UndefValue::classof)) { 10624 // Check for gathered extracts. 10625 bool Resized = false; 10626 ExtractShuffles = 10627 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts); 10628 if (!ExtractShuffles.empty()) { 10629 SmallVector<const TreeEntry *> ExtractEntries; 10630 for (auto [Idx, I] : enumerate(ExtractMask)) { 10631 if (I == PoisonMaskElem) 10632 continue; 10633 if (const auto *TE = getTreeEntry( 10634 cast<ExtractElementInst>(E->Scalars[Idx])->getVectorOperand())) 10635 ExtractEntries.push_back(TE); 10636 } 10637 if (std::optional<ResTy> Delayed = 10638 ShuffleBuilder.needToDelay(E, ExtractEntries)) { 10639 // Delay emission of gathers which are not ready yet. 10640 PostponedGathers.insert(E); 10641 // Postpone gather emission, will be emitted after the end of the 10642 // process to keep correct order. 10643 return *Delayed; 10644 } 10645 if (Value *VecBase = ShuffleBuilder.adjustExtracts( 10646 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) { 10647 ExtractVecBase = VecBase; 10648 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType())) 10649 if (VF == VecBaseTy->getNumElements() && 10650 GatheredScalars.size() != VF) { 10651 Resized = true; 10652 GatheredScalars.append(VF - GatheredScalars.size(), 10653 PoisonValue::get(ScalarTy)); 10654 } 10655 } 10656 } 10657 // Gather extracts after we check for full matched gathers only. 10658 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load || 10659 E->isAltShuffle() || 10660 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) || 10661 isSplat(E->Scalars) || 10662 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) { 10663 GatherShuffles = 10664 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts); 10665 } 10666 if (!GatherShuffles.empty()) { 10667 if (std::optional<ResTy> Delayed = 10668 ShuffleBuilder.needToDelay(E, Entries)) { 10669 // Delay emission of gathers which are not ready yet. 10670 PostponedGathers.insert(E); 10671 // Postpone gather emission, will be emitted after the end of the 10672 // process to keep correct order. 10673 return *Delayed; 10674 } 10675 if (GatherShuffles.size() == 1 && 10676 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc && 10677 Entries.front().front()->isSame(E->Scalars)) { 10678 // Perfect match in the graph, will reuse the previously vectorized 10679 // node. Cost is 0. 10680 LLVM_DEBUG( 10681 dbgs() 10682 << "SLP: perfect diamond match for gather bundle " 10683 << shortBundleName(E->Scalars) << ".\n"); 10684 // Restore the mask for previous partially matched values. 10685 Mask.resize(E->Scalars.size()); 10686 const TreeEntry *FrontTE = Entries.front().front(); 10687 if (FrontTE->ReorderIndices.empty() && 10688 ((FrontTE->ReuseShuffleIndices.empty() && 10689 E->Scalars.size() == FrontTE->Scalars.size()) || 10690 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) { 10691 std::iota(Mask.begin(), Mask.end(), 0); 10692 } else { 10693 for (auto [I, V] : enumerate(E->Scalars)) { 10694 if (isa<PoisonValue>(V)) { 10695 Mask[I] = PoisonMaskElem; 10696 continue; 10697 } 10698 Mask[I] = FrontTE->findLaneForValue(V); 10699 } 10700 } 10701 ShuffleBuilder.add(*FrontTE, Mask); 10702 Res = ShuffleBuilder.finalize(E->getCommonMask()); 10703 return Res; 10704 } 10705 if (!Resized) { 10706 if (GatheredScalars.size() != VF && 10707 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) { 10708 return any_of(TEs, [&](const TreeEntry *TE) { 10709 return TE->getVectorFactor() == VF; 10710 }); 10711 })) 10712 GatheredScalars.append(VF - GatheredScalars.size(), 10713 PoisonValue::get(ScalarTy)); 10714 } 10715 // Remove shuffled elements from list of gathers. 10716 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10717 if (Mask[I] != PoisonMaskElem) 10718 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10719 } 10720 } 10721 } 10722 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars, 10723 SmallVectorImpl<int> &ReuseMask, 10724 bool IsRootPoison) { 10725 // For splats with can emit broadcasts instead of gathers, so try to find 10726 // such sequences. 10727 bool IsSplat = IsRootPoison && isSplat(Scalars) && 10728 (Scalars.size() > 2 || Scalars.front() == Scalars.back()); 10729 Scalars.append(VF - Scalars.size(), PoisonValue::get(ScalarTy)); 10730 SmallVector<int> UndefPos; 10731 DenseMap<Value *, unsigned> UniquePositions; 10732 // Gather unique non-const values and all constant values. 10733 // For repeated values, just shuffle them. 10734 int NumNonConsts = 0; 10735 int SinglePos = 0; 10736 for (auto [I, V] : enumerate(Scalars)) { 10737 if (isa<UndefValue>(V)) { 10738 if (!isa<PoisonValue>(V)) { 10739 ReuseMask[I] = I; 10740 UndefPos.push_back(I); 10741 } 10742 continue; 10743 } 10744 if (isConstant(V)) { 10745 ReuseMask[I] = I; 10746 continue; 10747 } 10748 ++NumNonConsts; 10749 SinglePos = I; 10750 Value *OrigV = V; 10751 Scalars[I] = PoisonValue::get(ScalarTy); 10752 if (IsSplat) { 10753 Scalars.front() = OrigV; 10754 ReuseMask[I] = 0; 10755 } else { 10756 const auto Res = UniquePositions.try_emplace(OrigV, I); 10757 Scalars[Res.first->second] = OrigV; 10758 ReuseMask[I] = Res.first->second; 10759 } 10760 } 10761 if (NumNonConsts == 1) { 10762 // Restore single insert element. 10763 if (IsSplat) { 10764 ReuseMask.assign(VF, PoisonMaskElem); 10765 std::swap(Scalars.front(), Scalars[SinglePos]); 10766 if (!UndefPos.empty() && UndefPos.front() == 0) 10767 Scalars.front() = UndefValue::get(ScalarTy); 10768 } 10769 ReuseMask[SinglePos] = SinglePos; 10770 } else if (!UndefPos.empty() && IsSplat) { 10771 // For undef values, try to replace them with the simple broadcast. 10772 // We can do it if the broadcasted value is guaranteed to be 10773 // non-poisonous, or by freezing the incoming scalar value first. 10774 auto *It = find_if(Scalars, [this, E](Value *V) { 10775 return !isa<UndefValue>(V) && 10776 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) || 10777 (E->UserTreeIndices.size() == 1 && 10778 any_of(V->uses(), [E](const Use &U) { 10779 // Check if the value already used in the same operation in 10780 // one of the nodes already. 10781 return E->UserTreeIndices.front().EdgeIdx != 10782 U.getOperandNo() && 10783 is_contained( 10784 E->UserTreeIndices.front().UserTE->Scalars, 10785 U.getUser()); 10786 }))); 10787 }); 10788 if (It != Scalars.end()) { 10789 // Replace undefs by the non-poisoned scalars and emit broadcast. 10790 int Pos = std::distance(Scalars.begin(), It); 10791 for (int I : UndefPos) { 10792 // Set the undef position to the non-poisoned scalar. 10793 ReuseMask[I] = Pos; 10794 // Replace the undef by the poison, in the mask it is replaced by 10795 // non-poisoned scalar already. 10796 if (I != Pos) 10797 Scalars[I] = PoisonValue::get(ScalarTy); 10798 } 10799 } else { 10800 // Replace undefs by the poisons, emit broadcast and then emit 10801 // freeze. 10802 for (int I : UndefPos) { 10803 ReuseMask[I] = PoisonMaskElem; 10804 if (isa<UndefValue>(Scalars[I])) 10805 Scalars[I] = PoisonValue::get(ScalarTy); 10806 } 10807 NeedFreeze = true; 10808 } 10809 } 10810 }; 10811 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) { 10812 bool IsNonPoisoned = true; 10813 bool IsUsedInExpr = true; 10814 Value *Vec1 = nullptr; 10815 if (!ExtractShuffles.empty()) { 10816 // Gather of extractelements can be represented as just a shuffle of 10817 // a single/two vectors the scalars are extracted from. 10818 // Find input vectors. 10819 Value *Vec2 = nullptr; 10820 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10821 if (!Mask.empty() && Mask[I] != PoisonMaskElem) 10822 ExtractMask[I] = PoisonMaskElem; 10823 } 10824 if (UseVecBaseAsInput) { 10825 Vec1 = ExtractVecBase; 10826 } else { 10827 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10828 if (ExtractMask[I] == PoisonMaskElem) 10829 continue; 10830 if (isa<UndefValue>(E->Scalars[I])) 10831 continue; 10832 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10833 Value *VecOp = EI->getVectorOperand(); 10834 if (const auto *TE = getTreeEntry(VecOp)) 10835 if (TE->VectorizedValue) 10836 VecOp = TE->VectorizedValue; 10837 if (!Vec1) { 10838 Vec1 = VecOp; 10839 } else if (Vec1 != EI->getVectorOperand()) { 10840 assert((!Vec2 || Vec2 == EI->getVectorOperand()) && 10841 "Expected only 1 or 2 vectors shuffle."); 10842 Vec2 = VecOp; 10843 } 10844 } 10845 } 10846 if (Vec2) { 10847 IsUsedInExpr = false; 10848 IsNonPoisoned &= 10849 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2); 10850 ShuffleBuilder.add(Vec1, Vec2, ExtractMask); 10851 } else if (Vec1) { 10852 IsUsedInExpr &= FindReusedSplat( 10853 ExtractMask, 10854 cast<FixedVectorType>(Vec1->getType())->getNumElements()); 10855 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true); 10856 IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1); 10857 } else { 10858 IsUsedInExpr = false; 10859 ShuffleBuilder.add(PoisonValue::get(FixedVectorType::get( 10860 ScalarTy, GatheredScalars.size())), 10861 ExtractMask, /*ForExtracts=*/true); 10862 } 10863 } 10864 if (!GatherShuffles.empty()) { 10865 unsigned SliceSize = E->Scalars.size() / NumParts; 10866 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10867 for (const auto [I, TEs] : enumerate(Entries)) { 10868 if (TEs.empty()) { 10869 assert(!GatherShuffles[I] && 10870 "No shuffles with empty entries list expected."); 10871 continue; 10872 } 10873 assert((TEs.size() == 1 || TEs.size() == 2) && 10874 "Expected shuffle of 1 or 2 entries."); 10875 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize); 10876 VecMask.assign(VecMask.size(), PoisonMaskElem); 10877 copy(SubMask, std::next(VecMask.begin(), I * SliceSize)); 10878 if (TEs.size() == 1) { 10879 IsUsedInExpr &= 10880 FindReusedSplat(VecMask, TEs.front()->getVectorFactor()); 10881 ShuffleBuilder.add(*TEs.front(), VecMask); 10882 if (TEs.front()->VectorizedValue) 10883 IsNonPoisoned &= 10884 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue); 10885 } else { 10886 IsUsedInExpr = false; 10887 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask); 10888 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue) 10889 IsNonPoisoned &= 10890 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) && 10891 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue); 10892 } 10893 } 10894 } 10895 // Try to figure out best way to combine values: build a shuffle and insert 10896 // elements or just build several shuffles. 10897 // Insert non-constant scalars. 10898 SmallVector<Value *> NonConstants(GatheredScalars); 10899 int EMSz = ExtractMask.size(); 10900 int MSz = Mask.size(); 10901 // Try to build constant vector and shuffle with it only if currently we 10902 // have a single permutation and more than 1 scalar constants. 10903 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty(); 10904 bool IsIdentityShuffle = 10905 ((UseVecBaseAsInput || 10906 all_of(ExtractShuffles, 10907 [](const std::optional<TTI::ShuffleKind> &SK) { 10908 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10909 TTI::SK_PermuteSingleSrc; 10910 })) && 10911 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) && 10912 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) || 10913 (!GatherShuffles.empty() && 10914 all_of(GatherShuffles, 10915 [](const std::optional<TTI::ShuffleKind> &SK) { 10916 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10917 TTI::SK_PermuteSingleSrc; 10918 }) && 10919 none_of(Mask, [&](int I) { return I >= MSz; }) && 10920 ShuffleVectorInst::isIdentityMask(Mask, MSz)); 10921 bool EnoughConstsForShuffle = 10922 IsSingleShuffle && 10923 (none_of(GatheredScalars, 10924 [](Value *V) { 10925 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10926 }) || 10927 any_of(GatheredScalars, 10928 [](Value *V) { 10929 return isa<Constant>(V) && !isa<UndefValue>(V); 10930 })) && 10931 (!IsIdentityShuffle || 10932 (GatheredScalars.size() == 2 && 10933 any_of(GatheredScalars, 10934 [](Value *V) { return !isa<UndefValue>(V); })) || 10935 count_if(GatheredScalars, [](Value *V) { 10936 return isa<Constant>(V) && !isa<PoisonValue>(V); 10937 }) > 1); 10938 // NonConstants array contains just non-constant values, GatheredScalars 10939 // contains only constant to build final vector and then shuffle. 10940 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) { 10941 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I])) 10942 NonConstants[I] = PoisonValue::get(ScalarTy); 10943 else 10944 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10945 } 10946 // Generate constants for final shuffle and build a mask for them. 10947 if (!all_of(GatheredScalars, PoisonValue::classof)) { 10948 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem); 10949 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true); 10950 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size()); 10951 ShuffleBuilder.add(BV, BVMask); 10952 } 10953 if (all_of(NonConstants, [=](Value *V) { 10954 return isa<PoisonValue>(V) || 10955 (IsSingleShuffle && ((IsIdentityShuffle && 10956 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V)); 10957 })) 10958 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10959 else 10960 Res = ShuffleBuilder.finalize( 10961 E->ReuseShuffleIndices, E->Scalars.size(), 10962 [&](Value *&Vec, SmallVectorImpl<int> &Mask) { 10963 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false); 10964 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec); 10965 }); 10966 } else if (!allConstant(GatheredScalars)) { 10967 // Gather unique scalars and all constants. 10968 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem); 10969 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true); 10970 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size()); 10971 ShuffleBuilder.add(BV, ReuseMask); 10972 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10973 } else { 10974 // Gather all constants. 10975 SmallVector<int> Mask(E->Scalars.size(), PoisonMaskElem); 10976 for (auto [I, V] : enumerate(E->Scalars)) { 10977 if (!isa<PoisonValue>(V)) 10978 Mask[I] = I; 10979 } 10980 Value *BV = ShuffleBuilder.gather(E->Scalars); 10981 ShuffleBuilder.add(BV, Mask); 10982 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10983 } 10984 10985 if (NeedFreeze) 10986 Res = ShuffleBuilder.createFreeze(Res); 10987 return Res; 10988 } 10989 10990 Value *BoUpSLP::createBuildVector(const TreeEntry *E) { 10991 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, Builder, 10992 *this); 10993 } 10994 10995 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { 10996 IRBuilder<>::InsertPointGuard Guard(Builder); 10997 10998 if (E->VectorizedValue && 10999 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI || 11000 E->isAltShuffle())) { 11001 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 11002 return E->VectorizedValue; 11003 } 11004 11005 if (E->State == TreeEntry::NeedToGather) { 11006 // Set insert point for non-reduction initial nodes. 11007 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList) 11008 setInsertPointAfterBundle(E); 11009 Value *Vec = createBuildVector(E); 11010 E->VectorizedValue = Vec; 11011 return Vec; 11012 } 11013 11014 auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy, 11015 bool IsSigned) { 11016 if (V->getType() != VecTy) 11017 V = Builder.CreateIntCast(V, VecTy, IsSigned); 11018 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 11019 if (E->getOpcode() == Instruction::Store) { 11020 ArrayRef<int> Mask = 11021 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()), 11022 E->ReorderIndices.size()); 11023 ShuffleBuilder.add(V, Mask); 11024 } else if (E->State == TreeEntry::PossibleStridedVectorize) { 11025 ShuffleBuilder.addOrdered(V, std::nullopt); 11026 } else { 11027 ShuffleBuilder.addOrdered(V, E->ReorderIndices); 11028 } 11029 return ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11030 }; 11031 11032 assert((E->State == TreeEntry::Vectorize || 11033 E->State == TreeEntry::ScatterVectorize || 11034 E->State == TreeEntry::PossibleStridedVectorize) && 11035 "Unhandled state"); 11036 unsigned ShuffleOrOp = 11037 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 11038 Instruction *VL0 = E->getMainOp(); 11039 Type *ScalarTy = VL0->getType(); 11040 if (auto *Store = dyn_cast<StoreInst>(VL0)) 11041 ScalarTy = Store->getValueOperand()->getType(); 11042 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 11043 ScalarTy = IE->getOperand(1)->getType(); 11044 bool IsSigned = false; 11045 auto It = MinBWs.find(E); 11046 if (It != MinBWs.end()) { 11047 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 11048 IsSigned = It->second.second; 11049 } 11050 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 11051 switch (ShuffleOrOp) { 11052 case Instruction::PHI: { 11053 assert((E->ReorderIndices.empty() || 11054 E != VectorizableTree.front().get() || 11055 !E->UserTreeIndices.empty()) && 11056 "PHI reordering is free."); 11057 if (PostponedPHIs && E->VectorizedValue) 11058 return E->VectorizedValue; 11059 auto *PH = cast<PHINode>(VL0); 11060 Builder.SetInsertPoint(PH->getParent(), 11061 PH->getParent()->getFirstNonPHIIt()); 11062 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11063 if (PostponedPHIs || !E->VectorizedValue) { 11064 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 11065 E->PHI = NewPhi; 11066 Value *V = NewPhi; 11067 11068 // Adjust insertion point once all PHI's have been generated. 11069 Builder.SetInsertPoint(PH->getParent(), 11070 PH->getParent()->getFirstInsertionPt()); 11071 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11072 11073 V = FinalShuffle(V, E, VecTy, IsSigned); 11074 11075 E->VectorizedValue = V; 11076 if (PostponedPHIs) 11077 return V; 11078 } 11079 PHINode *NewPhi = cast<PHINode>(E->PHI); 11080 // If phi node is fully emitted - exit. 11081 if (NewPhi->getNumIncomingValues() != 0) 11082 return NewPhi; 11083 11084 // PHINodes may have multiple entries from the same block. We want to 11085 // visit every block once. 11086 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 11087 11088 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 11089 ValueList Operands; 11090 BasicBlock *IBB = PH->getIncomingBlock(I); 11091 11092 // Stop emission if all incoming values are generated. 11093 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) { 11094 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11095 return NewPhi; 11096 } 11097 11098 if (!VisitedBBs.insert(IBB).second) { 11099 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 11100 continue; 11101 } 11102 11103 Builder.SetInsertPoint(IBB->getTerminator()); 11104 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11105 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true); 11106 if (VecTy != Vec->getType()) { 11107 assert(MinBWs.contains(getOperandEntry(E, I)) && 11108 "Expected item in MinBWs."); 11109 Vec = Builder.CreateIntCast(Vec, VecTy, It->second.second); 11110 } 11111 NewPhi->addIncoming(Vec, IBB); 11112 } 11113 11114 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 11115 "Invalid number of incoming values"); 11116 return NewPhi; 11117 } 11118 11119 case Instruction::ExtractElement: { 11120 Value *V = E->getSingleOperand(0); 11121 setInsertPointAfterBundle(E); 11122 V = FinalShuffle(V, E, VecTy, IsSigned); 11123 E->VectorizedValue = V; 11124 return V; 11125 } 11126 case Instruction::ExtractValue: { 11127 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 11128 Builder.SetInsertPoint(LI); 11129 Value *Ptr = LI->getPointerOperand(); 11130 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 11131 Value *NewV = propagateMetadata(V, E->Scalars); 11132 NewV = FinalShuffle(NewV, E, VecTy, IsSigned); 11133 E->VectorizedValue = NewV; 11134 return NewV; 11135 } 11136 case Instruction::InsertElement: { 11137 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 11138 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 11139 Value *V = vectorizeOperand(E, 1, PostponedPHIs); 11140 ArrayRef<Value *> Op = E->getOperand(1); 11141 Type *ScalarTy = Op.front()->getType(); 11142 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) { 11143 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs."); 11144 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1)); 11145 assert(Res.first > 0 && "Expected item in MinBWs."); 11146 V = Builder.CreateIntCast( 11147 V, 11148 FixedVectorType::get( 11149 ScalarTy, 11150 cast<FixedVectorType>(V->getType())->getNumElements()), 11151 Res.second); 11152 } 11153 11154 // Create InsertVector shuffle if necessary 11155 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 11156 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 11157 })); 11158 const unsigned NumElts = 11159 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 11160 const unsigned NumScalars = E->Scalars.size(); 11161 11162 unsigned Offset = *getInsertIndex(VL0); 11163 assert(Offset < NumElts && "Failed to find vector index offset"); 11164 11165 // Create shuffle to resize vector 11166 SmallVector<int> Mask; 11167 if (!E->ReorderIndices.empty()) { 11168 inversePermutation(E->ReorderIndices, Mask); 11169 Mask.append(NumElts - NumScalars, PoisonMaskElem); 11170 } else { 11171 Mask.assign(NumElts, PoisonMaskElem); 11172 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 11173 } 11174 // Create InsertVector shuffle if necessary 11175 bool IsIdentity = true; 11176 SmallVector<int> PrevMask(NumElts, PoisonMaskElem); 11177 Mask.swap(PrevMask); 11178 for (unsigned I = 0; I < NumScalars; ++I) { 11179 Value *Scalar = E->Scalars[PrevMask[I]]; 11180 unsigned InsertIdx = *getInsertIndex(Scalar); 11181 IsIdentity &= InsertIdx - Offset == I; 11182 Mask[InsertIdx - Offset] = I; 11183 } 11184 if (!IsIdentity || NumElts != NumScalars) { 11185 Value *V2 = nullptr; 11186 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V); 11187 SmallVector<int> InsertMask(Mask); 11188 if (NumElts != NumScalars && Offset == 0) { 11189 // Follow all insert element instructions from the current buildvector 11190 // sequence. 11191 InsertElementInst *Ins = cast<InsertElementInst>(VL0); 11192 do { 11193 std::optional<unsigned> InsertIdx = getInsertIndex(Ins); 11194 if (!InsertIdx) 11195 break; 11196 if (InsertMask[*InsertIdx] == PoisonMaskElem) 11197 InsertMask[*InsertIdx] = *InsertIdx; 11198 if (!Ins->hasOneUse()) 11199 break; 11200 Ins = dyn_cast_or_null<InsertElementInst>( 11201 Ins->getUniqueUndroppableUser()); 11202 } while (Ins); 11203 SmallBitVector UseMask = 11204 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11205 SmallBitVector IsFirstPoison = 11206 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11207 SmallBitVector IsFirstUndef = 11208 isUndefVector(FirstInsert->getOperand(0), UseMask); 11209 if (!IsFirstPoison.all()) { 11210 unsigned Idx = 0; 11211 for (unsigned I = 0; I < NumElts; I++) { 11212 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) && 11213 IsFirstUndef.test(I)) { 11214 if (IsVNonPoisonous) { 11215 InsertMask[I] = I < NumScalars ? I : 0; 11216 continue; 11217 } 11218 if (!V2) 11219 V2 = UndefValue::get(V->getType()); 11220 if (Idx >= NumScalars) 11221 Idx = NumScalars - 1; 11222 InsertMask[I] = NumScalars + Idx; 11223 ++Idx; 11224 } else if (InsertMask[I] != PoisonMaskElem && 11225 Mask[I] == PoisonMaskElem) { 11226 InsertMask[I] = PoisonMaskElem; 11227 } 11228 } 11229 } else { 11230 InsertMask = Mask; 11231 } 11232 } 11233 if (!V2) 11234 V2 = PoisonValue::get(V->getType()); 11235 V = Builder.CreateShuffleVector(V, V2, InsertMask); 11236 if (auto *I = dyn_cast<Instruction>(V)) { 11237 GatherShuffleExtractSeq.insert(I); 11238 CSEBlocks.insert(I->getParent()); 11239 } 11240 } 11241 11242 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 11243 for (unsigned I = 0; I < NumElts; I++) { 11244 if (Mask[I] != PoisonMaskElem) 11245 InsertMask[Offset + I] = I; 11246 } 11247 SmallBitVector UseMask = 11248 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11249 SmallBitVector IsFirstUndef = 11250 isUndefVector(FirstInsert->getOperand(0), UseMask); 11251 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) && 11252 NumElts != NumScalars) { 11253 if (IsFirstUndef.all()) { 11254 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) { 11255 SmallBitVector IsFirstPoison = 11256 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11257 if (!IsFirstPoison.all()) { 11258 for (unsigned I = 0; I < NumElts; I++) { 11259 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I)) 11260 InsertMask[I] = I + NumElts; 11261 } 11262 } 11263 V = Builder.CreateShuffleVector( 11264 V, 11265 IsFirstPoison.all() ? PoisonValue::get(V->getType()) 11266 : FirstInsert->getOperand(0), 11267 InsertMask, cast<Instruction>(E->Scalars.back())->getName()); 11268 if (auto *I = dyn_cast<Instruction>(V)) { 11269 GatherShuffleExtractSeq.insert(I); 11270 CSEBlocks.insert(I->getParent()); 11271 } 11272 } 11273 } else { 11274 SmallBitVector IsFirstPoison = 11275 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11276 for (unsigned I = 0; I < NumElts; I++) { 11277 if (InsertMask[I] == PoisonMaskElem) 11278 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I; 11279 else 11280 InsertMask[I] += NumElts; 11281 } 11282 V = Builder.CreateShuffleVector( 11283 FirstInsert->getOperand(0), V, InsertMask, 11284 cast<Instruction>(E->Scalars.back())->getName()); 11285 if (auto *I = dyn_cast<Instruction>(V)) { 11286 GatherShuffleExtractSeq.insert(I); 11287 CSEBlocks.insert(I->getParent()); 11288 } 11289 } 11290 } 11291 11292 ++NumVectorInstructions; 11293 E->VectorizedValue = V; 11294 return V; 11295 } 11296 case Instruction::ZExt: 11297 case Instruction::SExt: 11298 case Instruction::FPToUI: 11299 case Instruction::FPToSI: 11300 case Instruction::FPExt: 11301 case Instruction::PtrToInt: 11302 case Instruction::IntToPtr: 11303 case Instruction::SIToFP: 11304 case Instruction::UIToFP: 11305 case Instruction::Trunc: 11306 case Instruction::FPTrunc: 11307 case Instruction::BitCast: { 11308 setInsertPointAfterBundle(E); 11309 11310 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs); 11311 if (E->VectorizedValue) { 11312 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11313 return E->VectorizedValue; 11314 } 11315 11316 auto *CI = cast<CastInst>(VL0); 11317 Instruction::CastOps VecOpcode = CI->getOpcode(); 11318 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 11319 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 11320 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 11321 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 11322 // Check if the values are candidates to demote. 11323 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 11324 if (SrcIt != MinBWs.end()) 11325 SrcBWSz = SrcIt->second.first; 11326 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 11327 if (BWSz == SrcBWSz) { 11328 VecOpcode = Instruction::BitCast; 11329 } else if (BWSz < SrcBWSz) { 11330 VecOpcode = Instruction::Trunc; 11331 } else if (It != MinBWs.end()) { 11332 assert(BWSz > SrcBWSz && "Invalid cast!"); 11333 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 11334 } 11335 } 11336 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast) 11337 ? InVec 11338 : Builder.CreateCast(VecOpcode, InVec, VecTy); 11339 V = FinalShuffle(V, E, VecTy, IsSigned); 11340 11341 E->VectorizedValue = V; 11342 ++NumVectorInstructions; 11343 return V; 11344 } 11345 case Instruction::FCmp: 11346 case Instruction::ICmp: { 11347 setInsertPointAfterBundle(E); 11348 11349 Value *L = vectorizeOperand(E, 0, PostponedPHIs); 11350 if (E->VectorizedValue) { 11351 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11352 return E->VectorizedValue; 11353 } 11354 Value *R = vectorizeOperand(E, 1, PostponedPHIs); 11355 if (E->VectorizedValue) { 11356 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11357 return E->VectorizedValue; 11358 } 11359 if (L->getType() != R->getType()) { 11360 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11361 MinBWs.contains(getOperandEntry(E, 1))) && 11362 "Expected item in MinBWs."); 11363 L = Builder.CreateIntCast(L, VecTy, IsSigned); 11364 R = Builder.CreateIntCast(R, VecTy, IsSigned); 11365 } 11366 11367 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 11368 Value *V = Builder.CreateCmp(P0, L, R); 11369 propagateIRFlags(V, E->Scalars, VL0); 11370 // Do not cast for cmps. 11371 VecTy = cast<FixedVectorType>(V->getType()); 11372 V = FinalShuffle(V, E, VecTy, IsSigned); 11373 11374 E->VectorizedValue = V; 11375 ++NumVectorInstructions; 11376 return V; 11377 } 11378 case Instruction::Select: { 11379 setInsertPointAfterBundle(E); 11380 11381 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs); 11382 if (E->VectorizedValue) { 11383 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11384 return E->VectorizedValue; 11385 } 11386 Value *True = vectorizeOperand(E, 1, PostponedPHIs); 11387 if (E->VectorizedValue) { 11388 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11389 return E->VectorizedValue; 11390 } 11391 Value *False = vectorizeOperand(E, 2, PostponedPHIs); 11392 if (E->VectorizedValue) { 11393 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11394 return E->VectorizedValue; 11395 } 11396 if (True->getType() != False->getType()) { 11397 assert((MinBWs.contains(getOperandEntry(E, 1)) || 11398 MinBWs.contains(getOperandEntry(E, 2))) && 11399 "Expected item in MinBWs."); 11400 True = Builder.CreateIntCast(True, VecTy, IsSigned); 11401 False = Builder.CreateIntCast(False, VecTy, IsSigned); 11402 } 11403 11404 Value *V = Builder.CreateSelect(Cond, True, False); 11405 V = FinalShuffle(V, E, VecTy, IsSigned); 11406 11407 E->VectorizedValue = V; 11408 ++NumVectorInstructions; 11409 return V; 11410 } 11411 case Instruction::FNeg: { 11412 setInsertPointAfterBundle(E); 11413 11414 Value *Op = vectorizeOperand(E, 0, PostponedPHIs); 11415 11416 if (E->VectorizedValue) { 11417 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11418 return E->VectorizedValue; 11419 } 11420 11421 Value *V = Builder.CreateUnOp( 11422 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 11423 propagateIRFlags(V, E->Scalars, VL0); 11424 if (auto *I = dyn_cast<Instruction>(V)) 11425 V = propagateMetadata(I, E->Scalars); 11426 11427 V = FinalShuffle(V, E, VecTy, IsSigned); 11428 11429 E->VectorizedValue = V; 11430 ++NumVectorInstructions; 11431 11432 return V; 11433 } 11434 case Instruction::Add: 11435 case Instruction::FAdd: 11436 case Instruction::Sub: 11437 case Instruction::FSub: 11438 case Instruction::Mul: 11439 case Instruction::FMul: 11440 case Instruction::UDiv: 11441 case Instruction::SDiv: 11442 case Instruction::FDiv: 11443 case Instruction::URem: 11444 case Instruction::SRem: 11445 case Instruction::FRem: 11446 case Instruction::Shl: 11447 case Instruction::LShr: 11448 case Instruction::AShr: 11449 case Instruction::And: 11450 case Instruction::Or: 11451 case Instruction::Xor: { 11452 setInsertPointAfterBundle(E); 11453 11454 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs); 11455 if (E->VectorizedValue) { 11456 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11457 return E->VectorizedValue; 11458 } 11459 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs); 11460 if (E->VectorizedValue) { 11461 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11462 return E->VectorizedValue; 11463 } 11464 if (LHS->getType() != RHS->getType()) { 11465 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11466 MinBWs.contains(getOperandEntry(E, 1))) && 11467 "Expected item in MinBWs."); 11468 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11469 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11470 } 11471 11472 Value *V = Builder.CreateBinOp( 11473 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 11474 RHS); 11475 propagateIRFlags(V, E->Scalars, VL0, !MinBWs.contains(E)); 11476 if (auto *I = dyn_cast<Instruction>(V)) 11477 V = propagateMetadata(I, E->Scalars); 11478 11479 V = FinalShuffle(V, E, VecTy, IsSigned); 11480 11481 E->VectorizedValue = V; 11482 ++NumVectorInstructions; 11483 11484 return V; 11485 } 11486 case Instruction::Load: { 11487 // Loads are inserted at the head of the tree because we don't want to 11488 // sink them all the way down past store instructions. 11489 setInsertPointAfterBundle(E); 11490 11491 LoadInst *LI = cast<LoadInst>(VL0); 11492 Instruction *NewLI; 11493 Value *PO = LI->getPointerOperand(); 11494 if (E->State == TreeEntry::Vectorize) { 11495 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign()); 11496 11497 // The pointer operand uses an in-tree scalar so we add the new 11498 // LoadInst to ExternalUses list to make sure that an extract will 11499 // be generated in the future. 11500 if (isa<Instruction>(PO)) { 11501 if (TreeEntry *Entry = getTreeEntry(PO)) { 11502 // Find which lane we need to extract. 11503 unsigned FoundLane = Entry->findLaneForValue(PO); 11504 ExternalUses.emplace_back(PO, NewLI, FoundLane); 11505 } 11506 } 11507 } else { 11508 assert((E->State == TreeEntry::ScatterVectorize || 11509 E->State == TreeEntry::PossibleStridedVectorize) && 11510 "Unhandled state"); 11511 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs); 11512 if (E->VectorizedValue) { 11513 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11514 return E->VectorizedValue; 11515 } 11516 // Use the minimum alignment of the gathered loads. 11517 Align CommonAlignment = LI->getAlign(); 11518 for (Value *V : E->Scalars) 11519 CommonAlignment = 11520 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 11521 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 11522 } 11523 Value *V = propagateMetadata(NewLI, E->Scalars); 11524 11525 V = FinalShuffle(V, E, VecTy, IsSigned); 11526 E->VectorizedValue = V; 11527 ++NumVectorInstructions; 11528 return V; 11529 } 11530 case Instruction::Store: { 11531 auto *SI = cast<StoreInst>(VL0); 11532 11533 setInsertPointAfterBundle(E); 11534 11535 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs); 11536 VecValue = FinalShuffle(VecValue, E, VecTy, IsSigned); 11537 11538 Value *Ptr = SI->getPointerOperand(); 11539 StoreInst *ST = 11540 Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); 11541 11542 // The pointer operand uses an in-tree scalar, so add the new StoreInst to 11543 // ExternalUses to make sure that an extract will be generated in the 11544 // future. 11545 if (isa<Instruction>(Ptr)) { 11546 if (TreeEntry *Entry = getTreeEntry(Ptr)) { 11547 // Find which lane we need to extract. 11548 unsigned FoundLane = Entry->findLaneForValue(Ptr); 11549 ExternalUses.push_back(ExternalUser(Ptr, ST, FoundLane)); 11550 } 11551 } 11552 11553 Value *V = propagateMetadata(ST, E->Scalars); 11554 11555 E->VectorizedValue = V; 11556 ++NumVectorInstructions; 11557 return V; 11558 } 11559 case Instruction::GetElementPtr: { 11560 auto *GEP0 = cast<GetElementPtrInst>(VL0); 11561 setInsertPointAfterBundle(E); 11562 11563 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs); 11564 if (E->VectorizedValue) { 11565 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11566 return E->VectorizedValue; 11567 } 11568 11569 SmallVector<Value *> OpVecs; 11570 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 11571 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs); 11572 if (E->VectorizedValue) { 11573 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11574 return E->VectorizedValue; 11575 } 11576 OpVecs.push_back(OpVec); 11577 } 11578 11579 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 11580 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) { 11581 SmallVector<Value *> GEPs; 11582 for (Value *V : E->Scalars) { 11583 if (isa<GetElementPtrInst>(V)) 11584 GEPs.push_back(V); 11585 } 11586 V = propagateMetadata(I, GEPs); 11587 } 11588 11589 V = FinalShuffle(V, E, VecTy, IsSigned); 11590 11591 E->VectorizedValue = V; 11592 ++NumVectorInstructions; 11593 11594 return V; 11595 } 11596 case Instruction::Call: { 11597 CallInst *CI = cast<CallInst>(VL0); 11598 setInsertPointAfterBundle(E); 11599 11600 Intrinsic::ID IID = Intrinsic::not_intrinsic; 11601 if (Function *FI = CI->getCalledFunction()) 11602 IID = FI->getIntrinsicID(); 11603 11604 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 11605 11606 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 11607 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 11608 VecCallCosts.first <= VecCallCosts.second; 11609 11610 Value *ScalarArg = nullptr; 11611 SmallVector<Value *> OpVecs; 11612 SmallVector<Type *, 2> TysForDecl; 11613 // Add return type if intrinsic is overloaded on it. 11614 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, -1)) 11615 TysForDecl.push_back( 11616 FixedVectorType::get(CI->getType(), E->Scalars.size())); 11617 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 11618 ValueList OpVL; 11619 // Some intrinsics have scalar arguments. This argument should not be 11620 // vectorized. 11621 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, I)) { 11622 CallInst *CEI = cast<CallInst>(VL0); 11623 ScalarArg = CEI->getArgOperand(I); 11624 OpVecs.push_back(CEI->getArgOperand(I)); 11625 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, I)) 11626 TysForDecl.push_back(ScalarArg->getType()); 11627 continue; 11628 } 11629 11630 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs); 11631 if (E->VectorizedValue) { 11632 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11633 return E->VectorizedValue; 11634 } 11635 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n"); 11636 OpVecs.push_back(OpVec); 11637 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, I)) 11638 TysForDecl.push_back(OpVec->getType()); 11639 } 11640 11641 Function *CF; 11642 if (!UseIntrinsic) { 11643 VFShape Shape = 11644 VFShape::get(CI->getFunctionType(), 11645 ElementCount::getFixed( 11646 static_cast<unsigned>(VecTy->getNumElements())), 11647 false /*HasGlobalPred*/); 11648 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 11649 } else { 11650 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 11651 } 11652 11653 SmallVector<OperandBundleDef, 1> OpBundles; 11654 CI->getOperandBundlesAsDefs(OpBundles); 11655 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 11656 11657 // The scalar argument uses an in-tree scalar so we add the new vectorized 11658 // call to ExternalUses list to make sure that an extract will be 11659 // generated in the future. 11660 if (isa_and_present<Instruction>(ScalarArg)) { 11661 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 11662 // Find which lane we need to extract. 11663 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 11664 ExternalUses.push_back( 11665 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 11666 } 11667 } 11668 11669 propagateIRFlags(V, E->Scalars, VL0); 11670 V = FinalShuffle(V, E, VecTy, IsSigned); 11671 11672 E->VectorizedValue = V; 11673 ++NumVectorInstructions; 11674 return V; 11675 } 11676 case Instruction::ShuffleVector: { 11677 assert(E->isAltShuffle() && 11678 ((Instruction::isBinaryOp(E->getOpcode()) && 11679 Instruction::isBinaryOp(E->getAltOpcode())) || 11680 (Instruction::isCast(E->getOpcode()) && 11681 Instruction::isCast(E->getAltOpcode())) || 11682 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 11683 "Invalid Shuffle Vector Operand"); 11684 11685 Value *LHS = nullptr, *RHS = nullptr; 11686 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 11687 setInsertPointAfterBundle(E); 11688 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11689 if (E->VectorizedValue) { 11690 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11691 return E->VectorizedValue; 11692 } 11693 RHS = vectorizeOperand(E, 1, PostponedPHIs); 11694 } else { 11695 setInsertPointAfterBundle(E); 11696 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11697 } 11698 if (E->VectorizedValue) { 11699 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11700 return E->VectorizedValue; 11701 } 11702 if (LHS && RHS && LHS->getType() != RHS->getType()) { 11703 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11704 MinBWs.contains(getOperandEntry(E, 1))) && 11705 "Expected item in MinBWs."); 11706 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11707 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11708 } 11709 11710 Value *V0, *V1; 11711 if (Instruction::isBinaryOp(E->getOpcode())) { 11712 V0 = Builder.CreateBinOp( 11713 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 11714 V1 = Builder.CreateBinOp( 11715 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 11716 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 11717 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 11718 auto *AltCI = cast<CmpInst>(E->getAltOp()); 11719 CmpInst::Predicate AltPred = AltCI->getPredicate(); 11720 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 11721 } else { 11722 V0 = Builder.CreateCast( 11723 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 11724 V1 = Builder.CreateCast( 11725 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 11726 } 11727 // Add V0 and V1 to later analysis to try to find and remove matching 11728 // instruction, if any. 11729 for (Value *V : {V0, V1}) { 11730 if (auto *I = dyn_cast<Instruction>(V)) { 11731 GatherShuffleExtractSeq.insert(I); 11732 CSEBlocks.insert(I->getParent()); 11733 } 11734 } 11735 11736 // Create shuffle to take alternate operations from the vector. 11737 // Also, gather up main and alt scalar ops to propagate IR flags to 11738 // each vector operation. 11739 ValueList OpScalars, AltScalars; 11740 SmallVector<int> Mask; 11741 E->buildAltOpShuffleMask( 11742 [E, this](Instruction *I) { 11743 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 11744 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(), 11745 *TLI); 11746 }, 11747 Mask, &OpScalars, &AltScalars); 11748 11749 propagateIRFlags(V0, OpScalars); 11750 propagateIRFlags(V1, AltScalars); 11751 11752 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 11753 if (auto *I = dyn_cast<Instruction>(V)) { 11754 V = propagateMetadata(I, E->Scalars); 11755 GatherShuffleExtractSeq.insert(I); 11756 CSEBlocks.insert(I->getParent()); 11757 } 11758 11759 if (V->getType() != VecTy && !isa<CmpInst>(VL0)) 11760 V = Builder.CreateIntCast( 11761 V, FixedVectorType::get(ScalarTy, E->getVectorFactor()), IsSigned); 11762 E->VectorizedValue = V; 11763 ++NumVectorInstructions; 11764 11765 return V; 11766 } 11767 default: 11768 llvm_unreachable("unknown inst"); 11769 } 11770 return nullptr; 11771 } 11772 11773 Value *BoUpSLP::vectorizeTree() { 11774 ExtraValueToDebugLocsMap ExternallyUsedValues; 11775 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 11776 return vectorizeTree(ExternallyUsedValues, ReplacedExternals); 11777 } 11778 11779 namespace { 11780 /// Data type for handling buildvector sequences with the reused scalars from 11781 /// other tree entries. 11782 struct ShuffledInsertData { 11783 /// List of insertelements to be replaced by shuffles. 11784 SmallVector<InsertElementInst *> InsertElements; 11785 /// The parent vectors and shuffle mask for the given list of inserts. 11786 MapVector<Value *, SmallVector<int>> ValueMasks; 11787 }; 11788 } // namespace 11789 11790 Value *BoUpSLP::vectorizeTree( 11791 const ExtraValueToDebugLocsMap &ExternallyUsedValues, 11792 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 11793 Instruction *ReductionRoot) { 11794 // All blocks must be scheduled before any instructions are inserted. 11795 for (auto &BSIter : BlocksSchedules) { 11796 scheduleBlock(BSIter.second.get()); 11797 } 11798 // Clean Entry-to-LastInstruction table. It can be affected after scheduling, 11799 // need to rebuild it. 11800 EntryToLastInstruction.clear(); 11801 11802 if (ReductionRoot) 11803 Builder.SetInsertPoint(ReductionRoot->getParent(), 11804 ReductionRoot->getIterator()); 11805 else 11806 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11807 11808 // Postpone emission of PHIs operands to avoid cyclic dependencies issues. 11809 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true); 11810 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) 11811 if (TE->State == TreeEntry::Vectorize && 11812 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() && 11813 TE->VectorizedValue) 11814 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false); 11815 // Run through the list of postponed gathers and emit them, replacing the temp 11816 // emitted allocas with actual vector instructions. 11817 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef(); 11818 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues; 11819 for (const TreeEntry *E : PostponedNodes) { 11820 auto *TE = const_cast<TreeEntry *>(E); 11821 if (auto *VecTE = getTreeEntry(TE->Scalars.front())) 11822 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand( 11823 TE->UserTreeIndices.front().EdgeIdx))) 11824 // Found gather node which is absolutely the same as one of the 11825 // vectorized nodes. It may happen after reordering. 11826 continue; 11827 auto *PrevVec = cast<Instruction>(TE->VectorizedValue); 11828 TE->VectorizedValue = nullptr; 11829 auto *UserI = 11830 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue); 11831 // If user is a PHI node, its vector code have to be inserted right before 11832 // block terminator. Since the node was delayed, there were some unresolved 11833 // dependencies at the moment when stab instruction was emitted. In a case 11834 // when any of these dependencies turn out an operand of another PHI, coming 11835 // from this same block, position of a stab instruction will become invalid. 11836 // The is because source vector that supposed to feed this gather node was 11837 // inserted at the end of the block [after stab instruction]. So we need 11838 // to adjust insertion point again to the end of block. 11839 if (isa<PHINode>(UserI)) { 11840 // Insert before all users. 11841 Instruction *InsertPt = PrevVec->getParent()->getTerminator(); 11842 for (User *U : PrevVec->users()) { 11843 if (U == UserI) 11844 continue; 11845 auto *UI = dyn_cast<Instruction>(U); 11846 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent()) 11847 continue; 11848 if (UI->comesBefore(InsertPt)) 11849 InsertPt = UI; 11850 } 11851 Builder.SetInsertPoint(InsertPt); 11852 } else { 11853 Builder.SetInsertPoint(PrevVec); 11854 } 11855 Builder.SetCurrentDebugLocation(UserI->getDebugLoc()); 11856 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false); 11857 PrevVec->replaceAllUsesWith(Vec); 11858 PostponedValues.try_emplace(Vec).first->second.push_back(TE); 11859 // Replace the stub vector node, if it was used before for one of the 11860 // buildvector nodes already. 11861 auto It = PostponedValues.find(PrevVec); 11862 if (It != PostponedValues.end()) { 11863 for (TreeEntry *VTE : It->getSecond()) 11864 VTE->VectorizedValue = Vec; 11865 } 11866 eraseInstruction(PrevVec); 11867 } 11868 11869 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 11870 << " values .\n"); 11871 11872 SmallVector<ShuffledInsertData> ShuffledInserts; 11873 // Maps vector instruction to original insertelement instruction 11874 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 11875 // Maps extract Scalar to the corresponding extractelement instruction in the 11876 // basic block. Only one extractelement per block should be emitted. 11877 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs; 11878 SmallDenseSet<Value *, 4> UsedInserts; 11879 DenseMap<Value *, Value *> VectorCasts; 11880 // Extract all of the elements with the external uses. 11881 for (const auto &ExternalUse : ExternalUses) { 11882 Value *Scalar = ExternalUse.Scalar; 11883 llvm::User *User = ExternalUse.User; 11884 11885 // Skip users that we already RAUW. This happens when one instruction 11886 // has multiple uses of the same value. 11887 if (User && !is_contained(Scalar->users(), User)) 11888 continue; 11889 TreeEntry *E = getTreeEntry(Scalar); 11890 assert(E && "Invalid scalar"); 11891 assert(E->State != TreeEntry::NeedToGather && 11892 "Extracting from a gather list"); 11893 // Non-instruction pointers are not deleted, just skip them. 11894 if (E->getOpcode() == Instruction::GetElementPtr && 11895 !isa<GetElementPtrInst>(Scalar)) 11896 continue; 11897 11898 Value *Vec = E->VectorizedValue; 11899 assert(Vec && "Can't find vectorizable value"); 11900 11901 Value *Lane = Builder.getInt32(ExternalUse.Lane); 11902 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 11903 if (Scalar->getType() != Vec->getType()) { 11904 Value *Ex = nullptr; 11905 auto It = ScalarToEEs.find(Scalar); 11906 if (It != ScalarToEEs.end()) { 11907 // No need to emit many extracts, just move the only one in the 11908 // current block. 11909 auto EEIt = It->second.find(Builder.GetInsertBlock()); 11910 if (EEIt != It->second.end()) { 11911 Instruction *I = EEIt->second; 11912 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 11913 Builder.GetInsertPoint()->comesBefore(I)) 11914 I->moveBefore(*Builder.GetInsertPoint()->getParent(), 11915 Builder.GetInsertPoint()); 11916 Ex = I; 11917 } 11918 } 11919 if (!Ex) { 11920 // "Reuse" the existing extract to improve final codegen. 11921 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 11922 Ex = Builder.CreateExtractElement(ES->getOperand(0), 11923 ES->getOperand(1)); 11924 } else { 11925 Ex = Builder.CreateExtractElement(Vec, Lane); 11926 } 11927 if (auto *I = dyn_cast<Instruction>(Ex)) 11928 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I); 11929 } 11930 // The then branch of the previous if may produce constants, since 0 11931 // operand might be a constant. 11932 if (auto *ExI = dyn_cast<Instruction>(Ex)) { 11933 GatherShuffleExtractSeq.insert(ExI); 11934 CSEBlocks.insert(ExI->getParent()); 11935 } 11936 // If necessary, sign-extend or zero-extend ScalarRoot 11937 // to the larger type. 11938 if (Scalar->getType() != Ex->getType()) 11939 return Builder.CreateIntCast(Ex, Scalar->getType(), 11940 MinBWs.find(E)->second.second); 11941 return Ex; 11942 } 11943 assert(isa<FixedVectorType>(Scalar->getType()) && 11944 isa<InsertElementInst>(Scalar) && 11945 "In-tree scalar of vector type is not insertelement?"); 11946 auto *IE = cast<InsertElementInst>(Scalar); 11947 VectorToInsertElement.try_emplace(Vec, IE); 11948 return Vec; 11949 }; 11950 // If User == nullptr, the Scalar is used as extra arg. Generate 11951 // ExtractElement instruction and update the record for this scalar in 11952 // ExternallyUsedValues. 11953 if (!User) { 11954 assert(ExternallyUsedValues.count(Scalar) && 11955 "Scalar with nullptr as an external user must be registered in " 11956 "ExternallyUsedValues map"); 11957 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 11958 if (auto *PHI = dyn_cast<PHINode>(VecI)) 11959 Builder.SetInsertPoint(PHI->getParent(), 11960 PHI->getParent()->getFirstNonPHIIt()); 11961 else 11962 Builder.SetInsertPoint(VecI->getParent(), 11963 std::next(VecI->getIterator())); 11964 } else { 11965 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11966 } 11967 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 11968 // Required to update internally referenced instructions. 11969 Scalar->replaceAllUsesWith(NewInst); 11970 ReplacedExternals.emplace_back(Scalar, NewInst); 11971 continue; 11972 } 11973 11974 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 11975 // Skip if the scalar is another vector op or Vec is not an instruction. 11976 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 11977 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 11978 if (!UsedInserts.insert(VU).second) 11979 continue; 11980 // Need to use original vector, if the root is truncated. 11981 auto BWIt = MinBWs.find(E); 11982 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) { 11983 auto VecIt = VectorCasts.find(Scalar); 11984 if (VecIt == VectorCasts.end()) { 11985 IRBuilder<>::InsertPointGuard Guard(Builder); 11986 if (auto *IVec = dyn_cast<Instruction>(Vec)) 11987 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction()); 11988 Vec = Builder.CreateIntCast(Vec, VU->getType(), 11989 BWIt->second.second); 11990 VectorCasts.try_emplace(Scalar, Vec); 11991 } else { 11992 Vec = VecIt->second; 11993 } 11994 } 11995 11996 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 11997 if (InsertIdx) { 11998 auto *It = 11999 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 12000 // Checks if 2 insertelements are from the same buildvector. 12001 InsertElementInst *VecInsert = Data.InsertElements.front(); 12002 return areTwoInsertFromSameBuildVector( 12003 VU, VecInsert, 12004 [](InsertElementInst *II) { return II->getOperand(0); }); 12005 }); 12006 unsigned Idx = *InsertIdx; 12007 if (It == ShuffledInserts.end()) { 12008 (void)ShuffledInserts.emplace_back(); 12009 It = std::next(ShuffledInserts.begin(), 12010 ShuffledInserts.size() - 1); 12011 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12012 if (Mask.empty()) 12013 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12014 // Find the insertvector, vectorized in tree, if any. 12015 Value *Base = VU; 12016 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 12017 if (IEBase != User && 12018 (!IEBase->hasOneUse() || 12019 getInsertIndex(IEBase).value_or(Idx) == Idx)) 12020 break; 12021 // Build the mask for the vectorized insertelement instructions. 12022 if (const TreeEntry *E = getTreeEntry(IEBase)) { 12023 do { 12024 IEBase = cast<InsertElementInst>(Base); 12025 int IEIdx = *getInsertIndex(IEBase); 12026 assert(Mask[Idx] == PoisonMaskElem && 12027 "InsertElementInstruction used already."); 12028 Mask[IEIdx] = IEIdx; 12029 Base = IEBase->getOperand(0); 12030 } while (E == getTreeEntry(Base)); 12031 break; 12032 } 12033 Base = cast<InsertElementInst>(Base)->getOperand(0); 12034 // After the vectorization the def-use chain has changed, need 12035 // to look through original insertelement instructions, if they 12036 // get replaced by vector instructions. 12037 auto It = VectorToInsertElement.find(Base); 12038 if (It != VectorToInsertElement.end()) 12039 Base = It->second; 12040 } 12041 } 12042 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12043 if (Mask.empty()) 12044 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12045 Mask[Idx] = ExternalUse.Lane; 12046 It->InsertElements.push_back(cast<InsertElementInst>(User)); 12047 continue; 12048 } 12049 } 12050 } 12051 } 12052 12053 // Generate extracts for out-of-tree users. 12054 // Find the insertion point for the extractelement lane. 12055 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 12056 if (PHINode *PH = dyn_cast<PHINode>(User)) { 12057 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 12058 if (PH->getIncomingValue(I) == Scalar) { 12059 Instruction *IncomingTerminator = 12060 PH->getIncomingBlock(I)->getTerminator(); 12061 if (isa<CatchSwitchInst>(IncomingTerminator)) { 12062 Builder.SetInsertPoint(VecI->getParent(), 12063 std::next(VecI->getIterator())); 12064 } else { 12065 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator()); 12066 } 12067 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12068 PH->setOperand(I, NewInst); 12069 } 12070 } 12071 } else { 12072 Builder.SetInsertPoint(cast<Instruction>(User)); 12073 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12074 User->replaceUsesOfWith(Scalar, NewInst); 12075 } 12076 } else { 12077 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 12078 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12079 User->replaceUsesOfWith(Scalar, NewInst); 12080 } 12081 12082 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 12083 } 12084 12085 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) { 12086 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 12087 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 12088 int VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 12089 for (int I = 0, E = Mask.size(); I < E; ++I) { 12090 if (Mask[I] < VF) 12091 CombinedMask1[I] = Mask[I]; 12092 else 12093 CombinedMask2[I] = Mask[I] - VF; 12094 } 12095 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 12096 ShuffleBuilder.add(V1, CombinedMask1); 12097 if (V2) 12098 ShuffleBuilder.add(V2, CombinedMask2); 12099 return ShuffleBuilder.finalize(std::nullopt); 12100 }; 12101 12102 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask, 12103 bool ForSingleMask) { 12104 unsigned VF = Mask.size(); 12105 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 12106 if (VF != VecVF) { 12107 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 12108 Vec = CreateShuffle(Vec, nullptr, Mask); 12109 return std::make_pair(Vec, true); 12110 } 12111 if (!ForSingleMask) { 12112 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 12113 for (unsigned I = 0; I < VF; ++I) { 12114 if (Mask[I] != PoisonMaskElem) 12115 ResizeMask[Mask[I]] = Mask[I]; 12116 } 12117 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 12118 } 12119 } 12120 12121 return std::make_pair(Vec, false); 12122 }; 12123 // Perform shuffling of the vectorize tree entries for better handling of 12124 // external extracts. 12125 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 12126 // Find the first and the last instruction in the list of insertelements. 12127 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 12128 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 12129 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 12130 Builder.SetInsertPoint(LastInsert); 12131 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 12132 Value *NewInst = performExtractsShuffleAction<Value>( 12133 MutableArrayRef(Vector.data(), Vector.size()), 12134 FirstInsert->getOperand(0), 12135 [](Value *Vec) { 12136 return cast<VectorType>(Vec->getType()) 12137 ->getElementCount() 12138 .getKnownMinValue(); 12139 }, 12140 ResizeToVF, 12141 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 12142 ArrayRef<Value *> Vals) { 12143 assert((Vals.size() == 1 || Vals.size() == 2) && 12144 "Expected exactly 1 or 2 input values."); 12145 if (Vals.size() == 1) { 12146 // Do not create shuffle if the mask is a simple identity 12147 // non-resizing mask. 12148 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 12149 ->getNumElements() || 12150 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 12151 return CreateShuffle(Vals.front(), nullptr, Mask); 12152 return Vals.front(); 12153 } 12154 return CreateShuffle(Vals.front() ? Vals.front() 12155 : FirstInsert->getOperand(0), 12156 Vals.back(), Mask); 12157 }); 12158 auto It = ShuffledInserts[I].InsertElements.rbegin(); 12159 // Rebuild buildvector chain. 12160 InsertElementInst *II = nullptr; 12161 if (It != ShuffledInserts[I].InsertElements.rend()) 12162 II = *It; 12163 SmallVector<Instruction *> Inserts; 12164 while (It != ShuffledInserts[I].InsertElements.rend()) { 12165 assert(II && "Must be an insertelement instruction."); 12166 if (*It == II) 12167 ++It; 12168 else 12169 Inserts.push_back(cast<Instruction>(II)); 12170 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 12171 } 12172 for (Instruction *II : reverse(Inserts)) { 12173 II->replaceUsesOfWith(II->getOperand(0), NewInst); 12174 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 12175 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 12176 II->moveAfter(NewI); 12177 NewInst = II; 12178 } 12179 LastInsert->replaceAllUsesWith(NewInst); 12180 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 12181 IE->replaceUsesOfWith(IE->getOperand(0), 12182 PoisonValue::get(IE->getOperand(0)->getType())); 12183 IE->replaceUsesOfWith(IE->getOperand(1), 12184 PoisonValue::get(IE->getOperand(1)->getType())); 12185 eraseInstruction(IE); 12186 } 12187 CSEBlocks.insert(LastInsert->getParent()); 12188 } 12189 12190 SmallVector<Instruction *> RemovedInsts; 12191 // For each vectorized value: 12192 for (auto &TEPtr : VectorizableTree) { 12193 TreeEntry *Entry = TEPtr.get(); 12194 12195 // No need to handle users of gathered values. 12196 if (Entry->State == TreeEntry::NeedToGather) 12197 continue; 12198 12199 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 12200 12201 // For each lane: 12202 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 12203 Value *Scalar = Entry->Scalars[Lane]; 12204 12205 if (Entry->getOpcode() == Instruction::GetElementPtr && 12206 !isa<GetElementPtrInst>(Scalar)) 12207 continue; 12208 #ifndef NDEBUG 12209 Type *Ty = Scalar->getType(); 12210 if (!Ty->isVoidTy()) { 12211 for (User *U : Scalar->users()) { 12212 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 12213 12214 // It is legal to delete users in the ignorelist. 12215 assert((getTreeEntry(U) || 12216 (UserIgnoreList && UserIgnoreList->contains(U)) || 12217 (isa_and_nonnull<Instruction>(U) && 12218 isDeleted(cast<Instruction>(U)))) && 12219 "Deleting out-of-tree value"); 12220 } 12221 } 12222 #endif 12223 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 12224 eraseInstruction(cast<Instruction>(Scalar)); 12225 // Retain to-be-deleted instructions for some debug-info 12226 // bookkeeping. NOTE: eraseInstruction only marks the instruction for 12227 // deletion - instructions are not deleted until later. 12228 RemovedInsts.push_back(cast<Instruction>(Scalar)); 12229 } 12230 } 12231 12232 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the 12233 // new vector instruction. 12234 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue)) 12235 V->mergeDIAssignID(RemovedInsts); 12236 12237 Builder.ClearInsertionPoint(); 12238 InstrElementSize.clear(); 12239 12240 return VectorizableTree[0]->VectorizedValue; 12241 } 12242 12243 void BoUpSLP::optimizeGatherSequence() { 12244 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size() 12245 << " gather sequences instructions.\n"); 12246 // LICM InsertElementInst sequences. 12247 for (Instruction *I : GatherShuffleExtractSeq) { 12248 if (isDeleted(I)) 12249 continue; 12250 12251 // Check if this block is inside a loop. 12252 Loop *L = LI->getLoopFor(I->getParent()); 12253 if (!L) 12254 continue; 12255 12256 // Check if it has a preheader. 12257 BasicBlock *PreHeader = L->getLoopPreheader(); 12258 if (!PreHeader) 12259 continue; 12260 12261 // If the vector or the element that we insert into it are 12262 // instructions that are defined in this basic block then we can't 12263 // hoist this instruction. 12264 if (any_of(I->operands(), [L](Value *V) { 12265 auto *OpI = dyn_cast<Instruction>(V); 12266 return OpI && L->contains(OpI); 12267 })) 12268 continue; 12269 12270 // We can hoist this instruction. Move it to the pre-header. 12271 I->moveBefore(PreHeader->getTerminator()); 12272 CSEBlocks.insert(PreHeader); 12273 } 12274 12275 // Make a list of all reachable blocks in our CSE queue. 12276 SmallVector<const DomTreeNode *, 8> CSEWorkList; 12277 CSEWorkList.reserve(CSEBlocks.size()); 12278 for (BasicBlock *BB : CSEBlocks) 12279 if (DomTreeNode *N = DT->getNode(BB)) { 12280 assert(DT->isReachableFromEntry(N)); 12281 CSEWorkList.push_back(N); 12282 } 12283 12284 // Sort blocks by domination. This ensures we visit a block after all blocks 12285 // dominating it are visited. 12286 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 12287 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 12288 "Different nodes should have different DFS numbers"); 12289 return A->getDFSNumIn() < B->getDFSNumIn(); 12290 }); 12291 12292 // Less defined shuffles can be replaced by the more defined copies. 12293 // Between two shuffles one is less defined if it has the same vector operands 12294 // and its mask indeces are the same as in the first one or undefs. E.g. 12295 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 12296 // poison, <0, 0, 0, 0>. 12297 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 12298 SmallVectorImpl<int> &NewMask) { 12299 if (I1->getType() != I2->getType()) 12300 return false; 12301 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 12302 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 12303 if (!SI1 || !SI2) 12304 return I1->isIdenticalTo(I2); 12305 if (SI1->isIdenticalTo(SI2)) 12306 return true; 12307 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 12308 if (SI1->getOperand(I) != SI2->getOperand(I)) 12309 return false; 12310 // Check if the second instruction is more defined than the first one. 12311 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 12312 ArrayRef<int> SM1 = SI1->getShuffleMask(); 12313 // Count trailing undefs in the mask to check the final number of used 12314 // registers. 12315 unsigned LastUndefsCnt = 0; 12316 for (int I = 0, E = NewMask.size(); I < E; ++I) { 12317 if (SM1[I] == PoisonMaskElem) 12318 ++LastUndefsCnt; 12319 else 12320 LastUndefsCnt = 0; 12321 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem && 12322 NewMask[I] != SM1[I]) 12323 return false; 12324 if (NewMask[I] == PoisonMaskElem) 12325 NewMask[I] = SM1[I]; 12326 } 12327 // Check if the last undefs actually change the final number of used vector 12328 // registers. 12329 return SM1.size() - LastUndefsCnt > 1 && 12330 TTI->getNumberOfParts(SI1->getType()) == 12331 TTI->getNumberOfParts( 12332 FixedVectorType::get(SI1->getType()->getElementType(), 12333 SM1.size() - LastUndefsCnt)); 12334 }; 12335 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 12336 // instructions. TODO: We can further optimize this scan if we split the 12337 // instructions into different buckets based on the insert lane. 12338 SmallVector<Instruction *, 16> Visited; 12339 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 12340 assert(*I && 12341 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 12342 "Worklist not sorted properly!"); 12343 BasicBlock *BB = (*I)->getBlock(); 12344 // For all instructions in blocks containing gather sequences: 12345 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 12346 if (isDeleted(&In)) 12347 continue; 12348 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) && 12349 !GatherShuffleExtractSeq.contains(&In)) 12350 continue; 12351 12352 // Check if we can replace this instruction with any of the 12353 // visited instructions. 12354 bool Replaced = false; 12355 for (Instruction *&V : Visited) { 12356 SmallVector<int> NewMask; 12357 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 12358 DT->dominates(V->getParent(), In.getParent())) { 12359 In.replaceAllUsesWith(V); 12360 eraseInstruction(&In); 12361 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 12362 if (!NewMask.empty()) 12363 SI->setShuffleMask(NewMask); 12364 Replaced = true; 12365 break; 12366 } 12367 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 12368 GatherShuffleExtractSeq.contains(V) && 12369 IsIdenticalOrLessDefined(V, &In, NewMask) && 12370 DT->dominates(In.getParent(), V->getParent())) { 12371 In.moveAfter(V); 12372 V->replaceAllUsesWith(&In); 12373 eraseInstruction(V); 12374 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 12375 if (!NewMask.empty()) 12376 SI->setShuffleMask(NewMask); 12377 V = &In; 12378 Replaced = true; 12379 break; 12380 } 12381 } 12382 if (!Replaced) { 12383 assert(!is_contained(Visited, &In)); 12384 Visited.push_back(&In); 12385 } 12386 } 12387 } 12388 CSEBlocks.clear(); 12389 GatherShuffleExtractSeq.clear(); 12390 } 12391 12392 BoUpSLP::ScheduleData * 12393 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 12394 ScheduleData *Bundle = nullptr; 12395 ScheduleData *PrevInBundle = nullptr; 12396 for (Value *V : VL) { 12397 if (doesNotNeedToBeScheduled(V)) 12398 continue; 12399 ScheduleData *BundleMember = getScheduleData(V); 12400 assert(BundleMember && 12401 "no ScheduleData for bundle member " 12402 "(maybe not in same basic block)"); 12403 assert(BundleMember->isSchedulingEntity() && 12404 "bundle member already part of other bundle"); 12405 if (PrevInBundle) { 12406 PrevInBundle->NextInBundle = BundleMember; 12407 } else { 12408 Bundle = BundleMember; 12409 } 12410 12411 // Group the instructions to a bundle. 12412 BundleMember->FirstInBundle = Bundle; 12413 PrevInBundle = BundleMember; 12414 } 12415 assert(Bundle && "Failed to find schedule bundle"); 12416 return Bundle; 12417 } 12418 12419 // Groups the instructions to a bundle (which is then a single scheduling entity) 12420 // and schedules instructions until the bundle gets ready. 12421 std::optional<BoUpSLP::ScheduleData *> 12422 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 12423 const InstructionsState &S) { 12424 // No need to schedule PHIs, insertelement, extractelement and extractvalue 12425 // instructions. 12426 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 12427 doesNotNeedToSchedule(VL)) 12428 return nullptr; 12429 12430 // Initialize the instruction bundle. 12431 Instruction *OldScheduleEnd = ScheduleEnd; 12432 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 12433 12434 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 12435 ScheduleData *Bundle) { 12436 // The scheduling region got new instructions at the lower end (or it is a 12437 // new region for the first bundle). This makes it necessary to 12438 // recalculate all dependencies. 12439 // It is seldom that this needs to be done a second time after adding the 12440 // initial bundle to the region. 12441 if (ScheduleEnd != OldScheduleEnd) { 12442 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 12443 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 12444 ReSchedule = true; 12445 } 12446 if (Bundle) { 12447 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 12448 << " in block " << BB->getName() << "\n"); 12449 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 12450 } 12451 12452 if (ReSchedule) { 12453 resetSchedule(); 12454 initialFillReadyList(ReadyInsts); 12455 } 12456 12457 // Now try to schedule the new bundle or (if no bundle) just calculate 12458 // dependencies. As soon as the bundle is "ready" it means that there are no 12459 // cyclic dependencies and we can schedule it. Note that's important that we 12460 // don't "schedule" the bundle yet (see cancelScheduling). 12461 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 12462 !ReadyInsts.empty()) { 12463 ScheduleData *Picked = ReadyInsts.pop_back_val(); 12464 assert(Picked->isSchedulingEntity() && Picked->isReady() && 12465 "must be ready to schedule"); 12466 schedule(Picked, ReadyInsts); 12467 } 12468 }; 12469 12470 // Make sure that the scheduling region contains all 12471 // instructions of the bundle. 12472 for (Value *V : VL) { 12473 if (doesNotNeedToBeScheduled(V)) 12474 continue; 12475 if (!extendSchedulingRegion(V, S)) { 12476 // If the scheduling region got new instructions at the lower end (or it 12477 // is a new region for the first bundle). This makes it necessary to 12478 // recalculate all dependencies. 12479 // Otherwise the compiler may crash trying to incorrectly calculate 12480 // dependencies and emit instruction in the wrong order at the actual 12481 // scheduling. 12482 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 12483 return std::nullopt; 12484 } 12485 } 12486 12487 bool ReSchedule = false; 12488 for (Value *V : VL) { 12489 if (doesNotNeedToBeScheduled(V)) 12490 continue; 12491 ScheduleData *BundleMember = getScheduleData(V); 12492 assert(BundleMember && 12493 "no ScheduleData for bundle member (maybe not in same basic block)"); 12494 12495 // Make sure we don't leave the pieces of the bundle in the ready list when 12496 // whole bundle might not be ready. 12497 ReadyInsts.remove(BundleMember); 12498 12499 if (!BundleMember->IsScheduled) 12500 continue; 12501 // A bundle member was scheduled as single instruction before and now 12502 // needs to be scheduled as part of the bundle. We just get rid of the 12503 // existing schedule. 12504 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 12505 << " was already scheduled\n"); 12506 ReSchedule = true; 12507 } 12508 12509 auto *Bundle = buildBundle(VL); 12510 TryScheduleBundleImpl(ReSchedule, Bundle); 12511 if (!Bundle->isReady()) { 12512 cancelScheduling(VL, S.OpValue); 12513 return std::nullopt; 12514 } 12515 return Bundle; 12516 } 12517 12518 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 12519 Value *OpValue) { 12520 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 12521 doesNotNeedToSchedule(VL)) 12522 return; 12523 12524 if (doesNotNeedToBeScheduled(OpValue)) 12525 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 12526 ScheduleData *Bundle = getScheduleData(OpValue); 12527 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 12528 assert(!Bundle->IsScheduled && 12529 "Can't cancel bundle which is already scheduled"); 12530 assert(Bundle->isSchedulingEntity() && 12531 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 12532 "tried to unbundle something which is not a bundle"); 12533 12534 // Remove the bundle from the ready list. 12535 if (Bundle->isReady()) 12536 ReadyInsts.remove(Bundle); 12537 12538 // Un-bundle: make single instructions out of the bundle. 12539 ScheduleData *BundleMember = Bundle; 12540 while (BundleMember) { 12541 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 12542 BundleMember->FirstInBundle = BundleMember; 12543 ScheduleData *Next = BundleMember->NextInBundle; 12544 BundleMember->NextInBundle = nullptr; 12545 BundleMember->TE = nullptr; 12546 if (BundleMember->unscheduledDepsInBundle() == 0) { 12547 ReadyInsts.insert(BundleMember); 12548 } 12549 BundleMember = Next; 12550 } 12551 } 12552 12553 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 12554 // Allocate a new ScheduleData for the instruction. 12555 if (ChunkPos >= ChunkSize) { 12556 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 12557 ChunkPos = 0; 12558 } 12559 return &(ScheduleDataChunks.back()[ChunkPos++]); 12560 } 12561 12562 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 12563 const InstructionsState &S) { 12564 if (getScheduleData(V, isOneOf(S, V))) 12565 return true; 12566 Instruction *I = dyn_cast<Instruction>(V); 12567 assert(I && "bundle member must be an instruction"); 12568 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 12569 !doesNotNeedToBeScheduled(I) && 12570 "phi nodes/insertelements/extractelements/extractvalues don't need to " 12571 "be scheduled"); 12572 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 12573 ScheduleData *ISD = getScheduleData(I); 12574 if (!ISD) 12575 return false; 12576 assert(isInSchedulingRegion(ISD) && 12577 "ScheduleData not in scheduling region"); 12578 ScheduleData *SD = allocateScheduleDataChunks(); 12579 SD->Inst = I; 12580 SD->init(SchedulingRegionID, S.OpValue); 12581 ExtraScheduleDataMap[I][S.OpValue] = SD; 12582 return true; 12583 }; 12584 if (CheckScheduleForI(I)) 12585 return true; 12586 if (!ScheduleStart) { 12587 // It's the first instruction in the new region. 12588 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 12589 ScheduleStart = I; 12590 ScheduleEnd = I->getNextNode(); 12591 if (isOneOf(S, I) != I) 12592 CheckScheduleForI(I); 12593 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12594 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 12595 return true; 12596 } 12597 // Search up and down at the same time, because we don't know if the new 12598 // instruction is above or below the existing scheduling region. 12599 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted 12600 // against the budget. Otherwise debug info could affect codegen. 12601 BasicBlock::reverse_iterator UpIter = 12602 ++ScheduleStart->getIterator().getReverse(); 12603 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 12604 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 12605 BasicBlock::iterator LowerEnd = BB->end(); 12606 auto IsAssumeLikeIntr = [](const Instruction &I) { 12607 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 12608 return II->isAssumeLikeIntrinsic(); 12609 return false; 12610 }; 12611 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12612 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12613 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 12614 &*DownIter != I) { 12615 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 12616 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 12617 return false; 12618 } 12619 12620 ++UpIter; 12621 ++DownIter; 12622 12623 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12624 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12625 } 12626 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 12627 assert(I->getParent() == ScheduleStart->getParent() && 12628 "Instruction is in wrong basic block."); 12629 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 12630 ScheduleStart = I; 12631 if (isOneOf(S, I) != I) 12632 CheckScheduleForI(I); 12633 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 12634 << "\n"); 12635 return true; 12636 } 12637 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 12638 "Expected to reach top of the basic block or instruction down the " 12639 "lower end."); 12640 assert(I->getParent() == ScheduleEnd->getParent() && 12641 "Instruction is in wrong basic block."); 12642 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 12643 nullptr); 12644 ScheduleEnd = I->getNextNode(); 12645 if (isOneOf(S, I) != I) 12646 CheckScheduleForI(I); 12647 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12648 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 12649 return true; 12650 } 12651 12652 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 12653 Instruction *ToI, 12654 ScheduleData *PrevLoadStore, 12655 ScheduleData *NextLoadStore) { 12656 ScheduleData *CurrentLoadStore = PrevLoadStore; 12657 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 12658 // No need to allocate data for non-schedulable instructions. 12659 if (doesNotNeedToBeScheduled(I)) 12660 continue; 12661 ScheduleData *SD = ScheduleDataMap.lookup(I); 12662 if (!SD) { 12663 SD = allocateScheduleDataChunks(); 12664 ScheduleDataMap[I] = SD; 12665 SD->Inst = I; 12666 } 12667 assert(!isInSchedulingRegion(SD) && 12668 "new ScheduleData already in scheduling region"); 12669 SD->init(SchedulingRegionID, I); 12670 12671 if (I->mayReadOrWriteMemory() && 12672 (!isa<IntrinsicInst>(I) || 12673 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 12674 cast<IntrinsicInst>(I)->getIntrinsicID() != 12675 Intrinsic::pseudoprobe))) { 12676 // Update the linked list of memory accessing instructions. 12677 if (CurrentLoadStore) { 12678 CurrentLoadStore->NextLoadStore = SD; 12679 } else { 12680 FirstLoadStoreInRegion = SD; 12681 } 12682 CurrentLoadStore = SD; 12683 } 12684 12685 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12686 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12687 RegionHasStackSave = true; 12688 } 12689 if (NextLoadStore) { 12690 if (CurrentLoadStore) 12691 CurrentLoadStore->NextLoadStore = NextLoadStore; 12692 } else { 12693 LastLoadStoreInRegion = CurrentLoadStore; 12694 } 12695 } 12696 12697 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 12698 bool InsertInReadyList, 12699 BoUpSLP *SLP) { 12700 assert(SD->isSchedulingEntity()); 12701 12702 SmallVector<ScheduleData *, 10> WorkList; 12703 WorkList.push_back(SD); 12704 12705 while (!WorkList.empty()) { 12706 ScheduleData *SD = WorkList.pop_back_val(); 12707 for (ScheduleData *BundleMember = SD; BundleMember; 12708 BundleMember = BundleMember->NextInBundle) { 12709 assert(isInSchedulingRegion(BundleMember)); 12710 if (BundleMember->hasValidDependencies()) 12711 continue; 12712 12713 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 12714 << "\n"); 12715 BundleMember->Dependencies = 0; 12716 BundleMember->resetUnscheduledDeps(); 12717 12718 // Handle def-use chain dependencies. 12719 if (BundleMember->OpValue != BundleMember->Inst) { 12720 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 12721 BundleMember->Dependencies++; 12722 ScheduleData *DestBundle = UseSD->FirstInBundle; 12723 if (!DestBundle->IsScheduled) 12724 BundleMember->incrementUnscheduledDeps(1); 12725 if (!DestBundle->hasValidDependencies()) 12726 WorkList.push_back(DestBundle); 12727 } 12728 } else { 12729 for (User *U : BundleMember->Inst->users()) { 12730 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 12731 BundleMember->Dependencies++; 12732 ScheduleData *DestBundle = UseSD->FirstInBundle; 12733 if (!DestBundle->IsScheduled) 12734 BundleMember->incrementUnscheduledDeps(1); 12735 if (!DestBundle->hasValidDependencies()) 12736 WorkList.push_back(DestBundle); 12737 } 12738 } 12739 } 12740 12741 auto MakeControlDependent = [&](Instruction *I) { 12742 auto *DepDest = getScheduleData(I); 12743 assert(DepDest && "must be in schedule window"); 12744 DepDest->ControlDependencies.push_back(BundleMember); 12745 BundleMember->Dependencies++; 12746 ScheduleData *DestBundle = DepDest->FirstInBundle; 12747 if (!DestBundle->IsScheduled) 12748 BundleMember->incrementUnscheduledDeps(1); 12749 if (!DestBundle->hasValidDependencies()) 12750 WorkList.push_back(DestBundle); 12751 }; 12752 12753 // Any instruction which isn't safe to speculate at the beginning of the 12754 // block is control dependend on any early exit or non-willreturn call 12755 // which proceeds it. 12756 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 12757 for (Instruction *I = BundleMember->Inst->getNextNode(); 12758 I != ScheduleEnd; I = I->getNextNode()) { 12759 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC)) 12760 continue; 12761 12762 // Add the dependency 12763 MakeControlDependent(I); 12764 12765 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 12766 // Everything past here must be control dependent on I. 12767 break; 12768 } 12769 } 12770 12771 if (RegionHasStackSave) { 12772 // If we have an inalloc alloca instruction, it needs to be scheduled 12773 // after any preceeding stacksave. We also need to prevent any alloca 12774 // from reordering above a preceeding stackrestore. 12775 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 12776 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 12777 for (Instruction *I = BundleMember->Inst->getNextNode(); 12778 I != ScheduleEnd; I = I->getNextNode()) { 12779 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12780 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12781 // Any allocas past here must be control dependent on I, and I 12782 // must be memory dependend on BundleMember->Inst. 12783 break; 12784 12785 if (!isa<AllocaInst>(I)) 12786 continue; 12787 12788 // Add the dependency 12789 MakeControlDependent(I); 12790 } 12791 } 12792 12793 // In addition to the cases handle just above, we need to prevent 12794 // allocas and loads/stores from moving below a stacksave or a 12795 // stackrestore. Avoiding moving allocas below stackrestore is currently 12796 // thought to be conservatism. Moving loads/stores below a stackrestore 12797 // can lead to incorrect code. 12798 if (isa<AllocaInst>(BundleMember->Inst) || 12799 BundleMember->Inst->mayReadOrWriteMemory()) { 12800 for (Instruction *I = BundleMember->Inst->getNextNode(); 12801 I != ScheduleEnd; I = I->getNextNode()) { 12802 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 12803 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12804 continue; 12805 12806 // Add the dependency 12807 MakeControlDependent(I); 12808 break; 12809 } 12810 } 12811 } 12812 12813 // Handle the memory dependencies (if any). 12814 ScheduleData *DepDest = BundleMember->NextLoadStore; 12815 if (!DepDest) 12816 continue; 12817 Instruction *SrcInst = BundleMember->Inst; 12818 assert(SrcInst->mayReadOrWriteMemory() && 12819 "NextLoadStore list for non memory effecting bundle?"); 12820 MemoryLocation SrcLoc = getLocation(SrcInst); 12821 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 12822 unsigned NumAliased = 0; 12823 unsigned DistToSrc = 1; 12824 12825 for (; DepDest; DepDest = DepDest->NextLoadStore) { 12826 assert(isInSchedulingRegion(DepDest)); 12827 12828 // We have two limits to reduce the complexity: 12829 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 12830 // SLP->isAliased (which is the expensive part in this loop). 12831 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 12832 // the whole loop (even if the loop is fast, it's quadratic). 12833 // It's important for the loop break condition (see below) to 12834 // check this limit even between two read-only instructions. 12835 if (DistToSrc >= MaxMemDepDistance || 12836 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 12837 (NumAliased >= AliasedCheckLimit || 12838 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 12839 12840 // We increment the counter only if the locations are aliased 12841 // (instead of counting all alias checks). This gives a better 12842 // balance between reduced runtime and accurate dependencies. 12843 NumAliased++; 12844 12845 DepDest->MemoryDependencies.push_back(BundleMember); 12846 BundleMember->Dependencies++; 12847 ScheduleData *DestBundle = DepDest->FirstInBundle; 12848 if (!DestBundle->IsScheduled) { 12849 BundleMember->incrementUnscheduledDeps(1); 12850 } 12851 if (!DestBundle->hasValidDependencies()) { 12852 WorkList.push_back(DestBundle); 12853 } 12854 } 12855 12856 // Example, explaining the loop break condition: Let's assume our 12857 // starting instruction is i0 and MaxMemDepDistance = 3. 12858 // 12859 // +--------v--v--v 12860 // i0,i1,i2,i3,i4,i5,i6,i7,i8 12861 // +--------^--^--^ 12862 // 12863 // MaxMemDepDistance let us stop alias-checking at i3 and we add 12864 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 12865 // Previously we already added dependencies from i3 to i6,i7,i8 12866 // (because of MaxMemDepDistance). As we added a dependency from 12867 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 12868 // and we can abort this loop at i6. 12869 if (DistToSrc >= 2 * MaxMemDepDistance) 12870 break; 12871 DistToSrc++; 12872 } 12873 } 12874 if (InsertInReadyList && SD->isReady()) { 12875 ReadyInsts.insert(SD); 12876 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 12877 << "\n"); 12878 } 12879 } 12880 } 12881 12882 void BoUpSLP::BlockScheduling::resetSchedule() { 12883 assert(ScheduleStart && 12884 "tried to reset schedule on block which has not been scheduled"); 12885 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 12886 doForAllOpcodes(I, [&](ScheduleData *SD) { 12887 assert(isInSchedulingRegion(SD) && 12888 "ScheduleData not in scheduling region"); 12889 SD->IsScheduled = false; 12890 SD->resetUnscheduledDeps(); 12891 }); 12892 } 12893 ReadyInsts.clear(); 12894 } 12895 12896 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 12897 if (!BS->ScheduleStart) 12898 return; 12899 12900 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 12901 12902 // A key point - if we got here, pre-scheduling was able to find a valid 12903 // scheduling of the sub-graph of the scheduling window which consists 12904 // of all vector bundles and their transitive users. As such, we do not 12905 // need to reschedule anything *outside of* that subgraph. 12906 12907 BS->resetSchedule(); 12908 12909 // For the real scheduling we use a more sophisticated ready-list: it is 12910 // sorted by the original instruction location. This lets the final schedule 12911 // be as close as possible to the original instruction order. 12912 // WARNING: If changing this order causes a correctness issue, that means 12913 // there is some missing dependence edge in the schedule data graph. 12914 struct ScheduleDataCompare { 12915 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 12916 return SD2->SchedulingPriority < SD1->SchedulingPriority; 12917 } 12918 }; 12919 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 12920 12921 // Ensure that all dependency data is updated (for nodes in the sub-graph) 12922 // and fill the ready-list with initial instructions. 12923 int Idx = 0; 12924 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 12925 I = I->getNextNode()) { 12926 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 12927 TreeEntry *SDTE = getTreeEntry(SD->Inst); 12928 (void)SDTE; 12929 assert((isVectorLikeInstWithConstOps(SD->Inst) || 12930 SD->isPartOfBundle() == 12931 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 12932 "scheduler and vectorizer bundle mismatch"); 12933 SD->FirstInBundle->SchedulingPriority = Idx++; 12934 12935 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 12936 BS->calculateDependencies(SD, false, this); 12937 }); 12938 } 12939 BS->initialFillReadyList(ReadyInsts); 12940 12941 Instruction *LastScheduledInst = BS->ScheduleEnd; 12942 12943 // Do the "real" scheduling. 12944 while (!ReadyInsts.empty()) { 12945 ScheduleData *Picked = *ReadyInsts.begin(); 12946 ReadyInsts.erase(ReadyInsts.begin()); 12947 12948 // Move the scheduled instruction(s) to their dedicated places, if not 12949 // there yet. 12950 for (ScheduleData *BundleMember = Picked; BundleMember; 12951 BundleMember = BundleMember->NextInBundle) { 12952 Instruction *PickedInst = BundleMember->Inst; 12953 if (PickedInst->getNextNode() != LastScheduledInst) 12954 PickedInst->moveBefore(LastScheduledInst); 12955 LastScheduledInst = PickedInst; 12956 } 12957 12958 BS->schedule(Picked, ReadyInsts); 12959 } 12960 12961 // Check that we didn't break any of our invariants. 12962 #ifdef EXPENSIVE_CHECKS 12963 BS->verify(); 12964 #endif 12965 12966 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 12967 // Check that all schedulable entities got scheduled 12968 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 12969 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 12970 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 12971 assert(SD->IsScheduled && "must be scheduled at this point"); 12972 } 12973 }); 12974 } 12975 #endif 12976 12977 // Avoid duplicate scheduling of the block. 12978 BS->ScheduleStart = nullptr; 12979 } 12980 12981 unsigned BoUpSLP::getVectorElementSize(Value *V) { 12982 // If V is a store, just return the width of the stored value (or value 12983 // truncated just before storing) without traversing the expression tree. 12984 // This is the common case. 12985 if (auto *Store = dyn_cast<StoreInst>(V)) 12986 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 12987 12988 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 12989 return getVectorElementSize(IEI->getOperand(1)); 12990 12991 auto E = InstrElementSize.find(V); 12992 if (E != InstrElementSize.end()) 12993 return E->second; 12994 12995 // If V is not a store, we can traverse the expression tree to find loads 12996 // that feed it. The type of the loaded value may indicate a more suitable 12997 // width than V's type. We want to base the vector element size on the width 12998 // of memory operations where possible. 12999 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 13000 SmallPtrSet<Instruction *, 16> Visited; 13001 if (auto *I = dyn_cast<Instruction>(V)) { 13002 Worklist.emplace_back(I, I->getParent()); 13003 Visited.insert(I); 13004 } 13005 13006 // Traverse the expression tree in bottom-up order looking for loads. If we 13007 // encounter an instruction we don't yet handle, we give up. 13008 auto Width = 0u; 13009 while (!Worklist.empty()) { 13010 Instruction *I; 13011 BasicBlock *Parent; 13012 std::tie(I, Parent) = Worklist.pop_back_val(); 13013 13014 // We should only be looking at scalar instructions here. If the current 13015 // instruction has a vector type, skip. 13016 auto *Ty = I->getType(); 13017 if (isa<VectorType>(Ty)) 13018 continue; 13019 13020 // If the current instruction is a load, update MaxWidth to reflect the 13021 // width of the loaded value. 13022 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I)) 13023 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 13024 13025 // Otherwise, we need to visit the operands of the instruction. We only 13026 // handle the interesting cases from buildTree here. If an operand is an 13027 // instruction we haven't yet visited and from the same basic block as the 13028 // user or the use is a PHI node, we add it to the worklist. 13029 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst, 13030 BinaryOperator, UnaryOperator>(I)) { 13031 for (Use &U : I->operands()) 13032 if (auto *J = dyn_cast<Instruction>(U.get())) 13033 if (Visited.insert(J).second && 13034 (isa<PHINode>(I) || J->getParent() == Parent)) 13035 Worklist.emplace_back(J, J->getParent()); 13036 } else { 13037 break; 13038 } 13039 } 13040 13041 // If we didn't encounter a memory access in the expression tree, or if we 13042 // gave up for some reason, just return the width of V. Otherwise, return the 13043 // maximum width we found. 13044 if (!Width) { 13045 if (auto *CI = dyn_cast<CmpInst>(V)) 13046 V = CI->getOperand(0); 13047 Width = DL->getTypeSizeInBits(V->getType()); 13048 } 13049 13050 for (Instruction *I : Visited) 13051 InstrElementSize[I] = Width; 13052 13053 return Width; 13054 } 13055 13056 // Determine if a value V in a vectorizable expression Expr can be demoted to a 13057 // smaller type with a truncation. We collect the values that will be demoted 13058 // in ToDemote and additional roots that require investigating in Roots. 13059 bool BoUpSLP::collectValuesToDemote( 13060 Value *V, SmallVectorImpl<Value *> &ToDemote, 13061 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 13062 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const { 13063 // We can always demote constants. 13064 if (isa<Constant>(V)) 13065 return true; 13066 13067 // If the value is not a vectorized instruction in the expression with only 13068 // one use, it cannot be demoted. 13069 auto *I = dyn_cast<Instruction>(V); 13070 if (!I || !I->hasOneUse() || !getTreeEntry(I) || !Visited.insert(I).second) 13071 return false; 13072 13073 unsigned Start = 0; 13074 unsigned End = I->getNumOperands(); 13075 switch (I->getOpcode()) { 13076 13077 // We can always demote truncations and extensions. Since truncations can 13078 // seed additional demotion, we save the truncated value. 13079 case Instruction::Trunc: 13080 Roots.push_back(I->getOperand(0)); 13081 break; 13082 case Instruction::ZExt: 13083 case Instruction::SExt: 13084 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0))) 13085 return false; 13086 break; 13087 13088 // We can demote certain binary operations if we can demote both of their 13089 // operands. 13090 case Instruction::Add: 13091 case Instruction::Sub: 13092 case Instruction::Mul: 13093 case Instruction::And: 13094 case Instruction::Or: 13095 case Instruction::Xor: 13096 if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots, 13097 Visited) || 13098 !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots, 13099 Visited)) 13100 return false; 13101 break; 13102 13103 // We can demote selects if we can demote their true and false values. 13104 case Instruction::Select: { 13105 Start = 1; 13106 SelectInst *SI = cast<SelectInst>(I); 13107 if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts, 13108 Roots, Visited) || 13109 !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts, 13110 Roots, Visited)) 13111 return false; 13112 break; 13113 } 13114 13115 // We can demote phis if we can demote all their incoming operands. Note that 13116 // we don't need to worry about cycles since we ensure single use above. 13117 case Instruction::PHI: { 13118 PHINode *PN = cast<PHINode>(I); 13119 for (Value *IncValue : PN->incoming_values()) 13120 if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots, 13121 Visited)) 13122 return false; 13123 break; 13124 } 13125 13126 // Otherwise, conservatively give up. 13127 default: 13128 return false; 13129 } 13130 13131 // Gather demoted constant operands. 13132 for (unsigned Idx : seq<unsigned>(Start, End)) 13133 if (isa<Constant>(I->getOperand(Idx))) 13134 DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx); 13135 // Record the value that we can demote. 13136 ToDemote.push_back(V); 13137 return true; 13138 } 13139 13140 void BoUpSLP::computeMinimumValueSizes() { 13141 // If there are no external uses, the expression tree must be rooted by a 13142 // store. We can't demote in-memory values, so there is nothing to do here. 13143 if (ExternalUses.empty()) 13144 return; 13145 13146 // We only attempt to truncate integer expressions. 13147 auto &TreeRoot = VectorizableTree[0]->Scalars; 13148 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 13149 if (!TreeRootIT) 13150 return; 13151 13152 // Ensure the roots of the vectorizable tree don't form a cycle. 13153 if (!VectorizableTree.front()->UserTreeIndices.empty()) 13154 return; 13155 13156 // Conservatively determine if we can actually truncate the roots of the 13157 // expression. Collect the values that can be demoted in ToDemote and 13158 // additional roots that require investigating in Roots. 13159 SmallVector<Value *, 32> ToDemote; 13160 DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts; 13161 SmallVector<Value *, 4> Roots; 13162 for (auto *Root : TreeRoot) { 13163 DenseSet<Value *> Visited; 13164 if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited)) 13165 return; 13166 } 13167 13168 // The maximum bit width required to represent all the values that can be 13169 // demoted without loss of precision. It would be safe to truncate the roots 13170 // of the expression to this width. 13171 auto MaxBitWidth = 1u; 13172 13173 // We first check if all the bits of the roots are demanded. If they're not, 13174 // we can truncate the roots to this narrower type. 13175 for (auto *Root : TreeRoot) { 13176 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 13177 MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(), 13178 MaxBitWidth); 13179 } 13180 13181 // True if the roots can be zero-extended back to their original type, rather 13182 // than sign-extended. We know that if the leading bits are not demanded, we 13183 // can safely zero-extend. So we initialize IsKnownPositive to True. 13184 bool IsKnownPositive = true; 13185 13186 // If all the bits of the roots are demanded, we can try a little harder to 13187 // compute a narrower type. This can happen, for example, if the roots are 13188 // getelementptr indices. InstCombine promotes these indices to the pointer 13189 // width. Thus, all their bits are technically demanded even though the 13190 // address computation might be vectorized in a smaller type. 13191 // 13192 // We start by looking at each entry that can be demoted. We compute the 13193 // maximum bit width required to store the scalar by using ValueTracking to 13194 // compute the number of high-order bits we can truncate. 13195 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 13196 all_of(TreeRoot, [](Value *V) { 13197 return all_of(V->users(), 13198 [](User *U) { return isa<GetElementPtrInst>(U); }); 13199 })) { 13200 MaxBitWidth = 8u; 13201 13202 // Determine if the sign bit of all the roots is known to be zero. If not, 13203 // IsKnownPositive is set to False. 13204 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 13205 KnownBits Known = computeKnownBits(R, *DL); 13206 return Known.isNonNegative(); 13207 }); 13208 13209 // Determine the maximum number of bits required to store the scalar 13210 // values. 13211 for (auto *Scalar : ToDemote) { 13212 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 13213 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 13214 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 13215 } 13216 13217 // If we can't prove that the sign bit is zero, we must add one to the 13218 // maximum bit width to account for the unknown sign bit. This preserves 13219 // the existing sign bit so we can safely sign-extend the root back to the 13220 // original type. Otherwise, if we know the sign bit is zero, we will 13221 // zero-extend the root instead. 13222 // 13223 // FIXME: This is somewhat suboptimal, as there will be cases where adding 13224 // one to the maximum bit width will yield a larger-than-necessary 13225 // type. In general, we need to add an extra bit only if we can't 13226 // prove that the upper bit of the original type is equal to the 13227 // upper bit of the proposed smaller type. If these two bits are the 13228 // same (either zero or one) we know that sign-extending from the 13229 // smaller type will result in the same value. Here, since we can't 13230 // yet prove this, we are just making the proposed smaller type 13231 // larger to ensure correctness. 13232 if (!IsKnownPositive) 13233 ++MaxBitWidth; 13234 } 13235 13236 // Round MaxBitWidth up to the next power-of-two. 13237 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 13238 13239 // If the maximum bit width we compute is less than the with of the roots' 13240 // type, we can proceed with the narrowing. Otherwise, do nothing. 13241 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 13242 return; 13243 13244 // If we can truncate the root, we must collect additional values that might 13245 // be demoted as a result. That is, those seeded by truncations we will 13246 // modify. 13247 while (!Roots.empty()) { 13248 DenseSet<Value *> Visited; 13249 collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots, 13250 Visited); 13251 } 13252 13253 // Finally, map the values we can demote to the maximum bit with we computed. 13254 for (auto *Scalar : ToDemote) { 13255 auto *TE = getTreeEntry(Scalar); 13256 assert(TE && "Expected vectorized scalar."); 13257 if (MinBWs.contains(TE)) 13258 continue; 13259 bool IsSigned = any_of(TE->Scalars, [&](Value *R) { 13260 KnownBits Known = computeKnownBits(R, *DL); 13261 return !Known.isNonNegative(); 13262 }); 13263 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned); 13264 const auto *I = cast<Instruction>(Scalar); 13265 auto DCIt = DemotedConsts.find(I); 13266 if (DCIt != DemotedConsts.end()) { 13267 for (unsigned Idx : DCIt->getSecond()) { 13268 // Check that all instructions operands are demoted. 13269 if (all_of(TE->Scalars, [&](Value *V) { 13270 auto SIt = DemotedConsts.find(cast<Instruction>(V)); 13271 return SIt != DemotedConsts.end() && 13272 is_contained(SIt->getSecond(), Idx); 13273 })) { 13274 const TreeEntry *CTE = getOperandEntry(TE, Idx); 13275 MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned); 13276 } 13277 } 13278 } 13279 } 13280 } 13281 13282 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 13283 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 13284 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 13285 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 13286 auto *AA = &AM.getResult<AAManager>(F); 13287 auto *LI = &AM.getResult<LoopAnalysis>(F); 13288 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 13289 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 13290 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 13291 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 13292 13293 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 13294 if (!Changed) 13295 return PreservedAnalyses::all(); 13296 13297 PreservedAnalyses PA; 13298 PA.preserveSet<CFGAnalyses>(); 13299 return PA; 13300 } 13301 13302 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 13303 TargetTransformInfo *TTI_, 13304 TargetLibraryInfo *TLI_, AAResults *AA_, 13305 LoopInfo *LI_, DominatorTree *DT_, 13306 AssumptionCache *AC_, DemandedBits *DB_, 13307 OptimizationRemarkEmitter *ORE_) { 13308 if (!RunSLPVectorization) 13309 return false; 13310 SE = SE_; 13311 TTI = TTI_; 13312 TLI = TLI_; 13313 AA = AA_; 13314 LI = LI_; 13315 DT = DT_; 13316 AC = AC_; 13317 DB = DB_; 13318 DL = &F.getParent()->getDataLayout(); 13319 13320 Stores.clear(); 13321 GEPs.clear(); 13322 bool Changed = false; 13323 13324 // If the target claims to have no vector registers don't attempt 13325 // vectorization. 13326 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 13327 LLVM_DEBUG( 13328 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 13329 return false; 13330 } 13331 13332 // Don't vectorize when the attribute NoImplicitFloat is used. 13333 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 13334 return false; 13335 13336 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 13337 13338 // Use the bottom up slp vectorizer to construct chains that start with 13339 // store instructions. 13340 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 13341 13342 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 13343 // delete instructions. 13344 13345 // Update DFS numbers now so that we can use them for ordering. 13346 DT->updateDFSNumbers(); 13347 13348 // Scan the blocks in the function in post order. 13349 for (auto *BB : post_order(&F.getEntryBlock())) { 13350 // Start new block - clear the list of reduction roots. 13351 R.clearReductionData(); 13352 collectSeedInstructions(BB); 13353 13354 // Vectorize trees that end at stores. 13355 if (!Stores.empty()) { 13356 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 13357 << " underlying objects.\n"); 13358 Changed |= vectorizeStoreChains(R); 13359 } 13360 13361 // Vectorize trees that end at reductions. 13362 Changed |= vectorizeChainsInBlock(BB, R); 13363 13364 // Vectorize the index computations of getelementptr instructions. This 13365 // is primarily intended to catch gather-like idioms ending at 13366 // non-consecutive loads. 13367 if (!GEPs.empty()) { 13368 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 13369 << " underlying objects.\n"); 13370 Changed |= vectorizeGEPIndices(BB, R); 13371 } 13372 } 13373 13374 if (Changed) { 13375 R.optimizeGatherSequence(); 13376 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 13377 } 13378 return Changed; 13379 } 13380 13381 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 13382 unsigned Idx, unsigned MinVF) { 13383 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 13384 << "\n"); 13385 const unsigned Sz = R.getVectorElementSize(Chain[0]); 13386 unsigned VF = Chain.size(); 13387 13388 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 13389 return false; 13390 13391 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 13392 << "\n"); 13393 13394 R.buildTree(Chain); 13395 if (R.isTreeTinyAndNotFullyVectorizable()) 13396 return false; 13397 if (R.isLoadCombineCandidate()) 13398 return false; 13399 R.reorderTopToBottom(); 13400 R.reorderBottomToTop(); 13401 R.buildExternalUses(); 13402 13403 R.computeMinimumValueSizes(); 13404 13405 InstructionCost Cost = R.getTreeCost(); 13406 13407 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n"); 13408 if (Cost < -SLPCostThreshold) { 13409 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 13410 13411 using namespace ore; 13412 13413 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 13414 cast<StoreInst>(Chain[0])) 13415 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 13416 << " and with tree size " 13417 << NV("TreeSize", R.getTreeSize())); 13418 13419 R.vectorizeTree(); 13420 return true; 13421 } 13422 13423 return false; 13424 } 13425 13426 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 13427 BoUpSLP &R) { 13428 // We may run into multiple chains that merge into a single chain. We mark the 13429 // stores that we vectorized so that we don't visit the same store twice. 13430 BoUpSLP::ValueSet VectorizedStores; 13431 bool Changed = false; 13432 13433 // Stores the pair of stores (first_store, last_store) in a range, that were 13434 // already tried to be vectorized. Allows to skip the store ranges that were 13435 // already tried to be vectorized but the attempts were unsuccessful. 13436 DenseSet<std::pair<Value *, Value *>> TriedSequences; 13437 struct StoreDistCompare { 13438 bool operator()(const std::pair<unsigned, int> &Op1, 13439 const std::pair<unsigned, int> &Op2) const { 13440 return Op1.second < Op2.second; 13441 } 13442 }; 13443 // A set of pairs (index of store in Stores array ref, Distance of the store 13444 // address relative to base store address in units). 13445 using StoreIndexToDistSet = 13446 std::set<std::pair<unsigned, int>, StoreDistCompare>; 13447 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) { 13448 int PrevDist = -1; 13449 BoUpSLP::ValueList Operands; 13450 // Collect the chain into a list. 13451 for (auto [Idx, Data] : enumerate(Set)) { 13452 if (Operands.empty() || Data.second - PrevDist == 1) { 13453 Operands.push_back(Stores[Data.first]); 13454 PrevDist = Data.second; 13455 if (Idx != Set.size() - 1) 13456 continue; 13457 } 13458 if (Operands.size() <= 1) { 13459 Operands.clear(); 13460 Operands.push_back(Stores[Data.first]); 13461 PrevDist = Data.second; 13462 continue; 13463 } 13464 13465 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 13466 unsigned EltSize = R.getVectorElementSize(Operands[0]); 13467 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize); 13468 13469 unsigned MaxVF = 13470 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts); 13471 auto *Store = cast<StoreInst>(Operands[0]); 13472 Type *StoreTy = Store->getValueOperand()->getType(); 13473 Type *ValueTy = StoreTy; 13474 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 13475 ValueTy = Trunc->getSrcTy(); 13476 unsigned MinVF = TTI->getStoreMinimumVF( 13477 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 13478 13479 if (MaxVF <= MinVF) { 13480 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF 13481 << ") <= " 13482 << "MinVF (" << MinVF << ")\n"); 13483 } 13484 13485 // FIXME: Is division-by-2 the correct step? Should we assert that the 13486 // register size is a power-of-2? 13487 unsigned StartIdx = 0; 13488 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 13489 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 13490 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size); 13491 assert( 13492 all_of( 13493 Slice, 13494 [&](Value *V) { 13495 return cast<StoreInst>(V)->getValueOperand()->getType() == 13496 cast<StoreInst>(Slice.front()) 13497 ->getValueOperand() 13498 ->getType(); 13499 }) && 13500 "Expected all operands of same type."); 13501 if (!VectorizedStores.count(Slice.front()) && 13502 !VectorizedStores.count(Slice.back()) && 13503 TriedSequences.insert(std::make_pair(Slice.front(), Slice.back())) 13504 .second && 13505 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 13506 // Mark the vectorized stores so that we don't vectorize them again. 13507 VectorizedStores.insert(Slice.begin(), Slice.end()); 13508 Changed = true; 13509 // If we vectorized initial block, no need to try to vectorize it 13510 // again. 13511 if (Cnt == StartIdx) 13512 StartIdx += Size; 13513 Cnt += Size; 13514 continue; 13515 } 13516 ++Cnt; 13517 } 13518 // Check if the whole array was vectorized already - exit. 13519 if (StartIdx >= Operands.size()) 13520 break; 13521 } 13522 Operands.clear(); 13523 Operands.push_back(Stores[Data.first]); 13524 PrevDist = Data.second; 13525 } 13526 }; 13527 13528 // Stores pair (first: index of the store into Stores array ref, address of 13529 // which taken as base, second: sorted set of pairs {index, dist}, which are 13530 // indices of stores in the set and their store location distances relative to 13531 // the base address). 13532 13533 // Need to store the index of the very first store separately, since the set 13534 // may be reordered after the insertion and the first store may be moved. This 13535 // container allows to reduce number of calls of getPointersDiff() function. 13536 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores; 13537 // Inserts the specified store SI with the given index Idx to the set of the 13538 // stores. If the store with the same distance is found already - stop 13539 // insertion, try to vectorize already found stores. If some stores from this 13540 // sequence were not vectorized - try to vectorize them with the new store 13541 // later. But this logic is applied only to the stores, that come before the 13542 // previous store with the same distance. 13543 // Example: 13544 // 1. store x, %p 13545 // 2. store y, %p+1 13546 // 3. store z, %p+2 13547 // 4. store a, %p 13548 // 5. store b, %p+3 13549 // - Scan this from the last to first store. The very first bunch of stores is 13550 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores 13551 // vector). 13552 // - The next store in the list - #1 - has the same distance from store #5 as 13553 // the store #4. 13554 // - Try to vectorize sequence of stores 4,2,3,5. 13555 // - If all these stores are vectorized - just drop them. 13556 // - If some of them are not vectorized (say, #3 and #5), do extra analysis. 13557 // - Start new stores sequence. 13558 // The new bunch of stores is {1, {1, 0}}. 13559 // - Add the stores from previous sequence, that were not vectorized. 13560 // Here we consider the stores in the reversed order, rather they are used in 13561 // the IR (Stores are reversed already, see vectorizeStoreChains() function). 13562 // Store #3 can be added -> comes after store #4 with the same distance as 13563 // store #1. 13564 // Store #5 cannot be added - comes before store #4. 13565 // This logic allows to improve the compile time, we assume that the stores 13566 // after previous store with the same distance most likely have memory 13567 // dependencies and no need to waste compile time to try to vectorize them. 13568 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}. 13569 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) { 13570 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) { 13571 std::optional<int> Diff = getPointersDiff( 13572 Stores[Set.first]->getValueOperand()->getType(), 13573 Stores[Set.first]->getPointerOperand(), 13574 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE, 13575 /*StrictCheck=*/true); 13576 if (!Diff) 13577 continue; 13578 auto It = Set.second.find(std::make_pair(Idx, *Diff)); 13579 if (It == Set.second.end()) { 13580 Set.second.emplace(Idx, *Diff); 13581 return; 13582 } 13583 // Try to vectorize the first found set to avoid duplicate analysis. 13584 TryToVectorize(Set.second); 13585 StoreIndexToDistSet PrevSet; 13586 PrevSet.swap(Set.second); 13587 Set.first = Idx; 13588 Set.second.emplace(Idx, 0); 13589 // Insert stores that followed previous match to try to vectorize them 13590 // with this store. 13591 unsigned StartIdx = It->first + 1; 13592 SmallBitVector UsedStores(Idx - StartIdx); 13593 // Distances to previously found dup store (or this store, since they 13594 // store to the same addresses). 13595 SmallVector<int> Dists(Idx - StartIdx, 0); 13596 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) { 13597 // Do not try to vectorize sequences, we already tried. 13598 if (Pair.first <= It->first || 13599 VectorizedStores.contains(Stores[Pair.first])) 13600 break; 13601 unsigned BI = Pair.first - StartIdx; 13602 UsedStores.set(BI); 13603 Dists[BI] = Pair.second - It->second; 13604 } 13605 for (unsigned I = StartIdx; I < Idx; ++I) { 13606 unsigned BI = I - StartIdx; 13607 if (UsedStores.test(BI)) 13608 Set.second.emplace(I, Dists[BI]); 13609 } 13610 return; 13611 } 13612 auto &Res = SortedStores.emplace_back(); 13613 Res.first = Idx; 13614 Res.second.emplace(Idx, 0); 13615 }; 13616 StoreInst *PrevStore = Stores.front(); 13617 for (auto [I, SI] : enumerate(Stores)) { 13618 // Check that we do not try to vectorize stores of different types. 13619 if (PrevStore->getValueOperand()->getType() != 13620 SI->getValueOperand()->getType()) { 13621 for (auto &Set : SortedStores) 13622 TryToVectorize(Set.second); 13623 SortedStores.clear(); 13624 PrevStore = SI; 13625 } 13626 FillStoresSet(I, SI); 13627 } 13628 13629 // Final vectorization attempt. 13630 for (auto &Set : SortedStores) 13631 TryToVectorize(Set.second); 13632 13633 return Changed; 13634 } 13635 13636 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 13637 // Initialize the collections. We will make a single pass over the block. 13638 Stores.clear(); 13639 GEPs.clear(); 13640 13641 // Visit the store and getelementptr instructions in BB and organize them in 13642 // Stores and GEPs according to the underlying objects of their pointer 13643 // operands. 13644 for (Instruction &I : *BB) { 13645 // Ignore store instructions that are volatile or have a pointer operand 13646 // that doesn't point to a scalar type. 13647 if (auto *SI = dyn_cast<StoreInst>(&I)) { 13648 if (!SI->isSimple()) 13649 continue; 13650 if (!isValidElementType(SI->getValueOperand()->getType())) 13651 continue; 13652 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 13653 } 13654 13655 // Ignore getelementptr instructions that have more than one index, a 13656 // constant index, or a pointer operand that doesn't point to a scalar 13657 // type. 13658 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 13659 if (GEP->getNumIndices() != 1) 13660 continue; 13661 Value *Idx = GEP->idx_begin()->get(); 13662 if (isa<Constant>(Idx)) 13663 continue; 13664 if (!isValidElementType(Idx->getType())) 13665 continue; 13666 if (GEP->getType()->isVectorTy()) 13667 continue; 13668 GEPs[GEP->getPointerOperand()].push_back(GEP); 13669 } 13670 } 13671 } 13672 13673 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 13674 bool MaxVFOnly) { 13675 if (VL.size() < 2) 13676 return false; 13677 13678 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 13679 << VL.size() << ".\n"); 13680 13681 // Check that all of the parts are instructions of the same type, 13682 // we permit an alternate opcode via InstructionsState. 13683 InstructionsState S = getSameOpcode(VL, *TLI); 13684 if (!S.getOpcode()) 13685 return false; 13686 13687 Instruction *I0 = cast<Instruction>(S.OpValue); 13688 // Make sure invalid types (including vector type) are rejected before 13689 // determining vectorization factor for scalar instructions. 13690 for (Value *V : VL) { 13691 Type *Ty = V->getType(); 13692 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 13693 // NOTE: the following will give user internal llvm type name, which may 13694 // not be useful. 13695 R.getORE()->emit([&]() { 13696 std::string TypeStr; 13697 llvm::raw_string_ostream rso(TypeStr); 13698 Ty->print(rso); 13699 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 13700 << "Cannot SLP vectorize list: type " 13701 << rso.str() + " is unsupported by vectorizer"; 13702 }); 13703 return false; 13704 } 13705 } 13706 13707 unsigned Sz = R.getVectorElementSize(I0); 13708 unsigned MinVF = R.getMinVF(Sz); 13709 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF); 13710 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 13711 if (MaxVF < 2) { 13712 R.getORE()->emit([&]() { 13713 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 13714 << "Cannot SLP vectorize list: vectorization factor " 13715 << "less than 2 is not supported"; 13716 }); 13717 return false; 13718 } 13719 13720 bool Changed = false; 13721 bool CandidateFound = false; 13722 InstructionCost MinCost = SLPCostThreshold.getValue(); 13723 Type *ScalarTy = VL[0]->getType(); 13724 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 13725 ScalarTy = IE->getOperand(1)->getType(); 13726 13727 unsigned NextInst = 0, MaxInst = VL.size(); 13728 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 13729 // No actual vectorization should happen, if number of parts is the same as 13730 // provided vectorization factor (i.e. the scalar type is used for vector 13731 // code during codegen). 13732 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 13733 if (TTI->getNumberOfParts(VecTy) == VF) 13734 continue; 13735 for (unsigned I = NextInst; I < MaxInst; ++I) { 13736 unsigned ActualVF = std::min(MaxInst - I, VF); 13737 13738 if (!isPowerOf2_32(ActualVF)) 13739 continue; 13740 13741 if (MaxVFOnly && ActualVF < MaxVF) 13742 break; 13743 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2)) 13744 break; 13745 13746 ArrayRef<Value *> Ops = VL.slice(I, ActualVF); 13747 // Check that a previous iteration of this loop did not delete the Value. 13748 if (llvm::any_of(Ops, [&R](Value *V) { 13749 auto *I = dyn_cast<Instruction>(V); 13750 return I && R.isDeleted(I); 13751 })) 13752 continue; 13753 13754 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations " 13755 << "\n"); 13756 13757 R.buildTree(Ops); 13758 if (R.isTreeTinyAndNotFullyVectorizable()) 13759 continue; 13760 R.reorderTopToBottom(); 13761 R.reorderBottomToTop( 13762 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) && 13763 !R.doesRootHaveInTreeUses()); 13764 R.buildExternalUses(); 13765 13766 R.computeMinimumValueSizes(); 13767 InstructionCost Cost = R.getTreeCost(); 13768 CandidateFound = true; 13769 MinCost = std::min(MinCost, Cost); 13770 13771 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 13772 << " for VF=" << ActualVF << "\n"); 13773 if (Cost < -SLPCostThreshold) { 13774 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 13775 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 13776 cast<Instruction>(Ops[0])) 13777 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 13778 << " and with tree size " 13779 << ore::NV("TreeSize", R.getTreeSize())); 13780 13781 R.vectorizeTree(); 13782 // Move to the next bundle. 13783 I += VF - 1; 13784 NextInst = I + 1; 13785 Changed = true; 13786 } 13787 } 13788 } 13789 13790 if (!Changed && CandidateFound) { 13791 R.getORE()->emit([&]() { 13792 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 13793 << "List vectorization was possible but not beneficial with cost " 13794 << ore::NV("Cost", MinCost) << " >= " 13795 << ore::NV("Treshold", -SLPCostThreshold); 13796 }); 13797 } else if (!Changed) { 13798 R.getORE()->emit([&]() { 13799 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 13800 << "Cannot SLP vectorize list: vectorization was impossible" 13801 << " with available vectorization factors"; 13802 }); 13803 } 13804 return Changed; 13805 } 13806 13807 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 13808 if (!I) 13809 return false; 13810 13811 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType())) 13812 return false; 13813 13814 Value *P = I->getParent(); 13815 13816 // Vectorize in current basic block only. 13817 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 13818 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 13819 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 13820 return false; 13821 13822 // First collect all possible candidates 13823 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 13824 Candidates.emplace_back(Op0, Op1); 13825 13826 auto *A = dyn_cast<BinaryOperator>(Op0); 13827 auto *B = dyn_cast<BinaryOperator>(Op1); 13828 // Try to skip B. 13829 if (A && B && B->hasOneUse()) { 13830 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 13831 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 13832 if (B0 && B0->getParent() == P) 13833 Candidates.emplace_back(A, B0); 13834 if (B1 && B1->getParent() == P) 13835 Candidates.emplace_back(A, B1); 13836 } 13837 // Try to skip A. 13838 if (B && A && A->hasOneUse()) { 13839 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 13840 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 13841 if (A0 && A0->getParent() == P) 13842 Candidates.emplace_back(A0, B); 13843 if (A1 && A1->getParent() == P) 13844 Candidates.emplace_back(A1, B); 13845 } 13846 13847 if (Candidates.size() == 1) 13848 return tryToVectorizeList({Op0, Op1}, R); 13849 13850 // We have multiple options. Try to pick the single best. 13851 std::optional<int> BestCandidate = R.findBestRootPair(Candidates); 13852 if (!BestCandidate) 13853 return false; 13854 return tryToVectorizeList( 13855 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R); 13856 } 13857 13858 namespace { 13859 13860 /// Model horizontal reductions. 13861 /// 13862 /// A horizontal reduction is a tree of reduction instructions that has values 13863 /// that can be put into a vector as its leaves. For example: 13864 /// 13865 /// mul mul mul mul 13866 /// \ / \ / 13867 /// + + 13868 /// \ / 13869 /// + 13870 /// This tree has "mul" as its leaf values and "+" as its reduction 13871 /// instructions. A reduction can feed into a store or a binary operation 13872 /// feeding a phi. 13873 /// ... 13874 /// \ / 13875 /// + 13876 /// | 13877 /// phi += 13878 /// 13879 /// Or: 13880 /// ... 13881 /// \ / 13882 /// + 13883 /// | 13884 /// *p = 13885 /// 13886 class HorizontalReduction { 13887 using ReductionOpsType = SmallVector<Value *, 16>; 13888 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 13889 ReductionOpsListType ReductionOps; 13890 /// List of possibly reduced values. 13891 SmallVector<SmallVector<Value *>> ReducedVals; 13892 /// Maps reduced value to the corresponding reduction operation. 13893 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 13894 // Use map vector to make stable output. 13895 MapVector<Instruction *, Value *> ExtraArgs; 13896 WeakTrackingVH ReductionRoot; 13897 /// The type of reduction operation. 13898 RecurKind RdxKind; 13899 /// Checks if the optimization of original scalar identity operations on 13900 /// matched horizontal reductions is enabled and allowed. 13901 bool IsSupportedHorRdxIdentityOp = false; 13902 13903 static bool isCmpSelMinMax(Instruction *I) { 13904 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 13905 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 13906 } 13907 13908 // And/or are potentially poison-safe logical patterns like: 13909 // select x, y, false 13910 // select x, true, y 13911 static bool isBoolLogicOp(Instruction *I) { 13912 return isa<SelectInst>(I) && 13913 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr())); 13914 } 13915 13916 /// Checks if instruction is associative and can be vectorized. 13917 static bool isVectorizable(RecurKind Kind, Instruction *I) { 13918 if (Kind == RecurKind::None) 13919 return false; 13920 13921 // Integer ops that map to select instructions or intrinsics are fine. 13922 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 13923 isBoolLogicOp(I)) 13924 return true; 13925 13926 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 13927 // FP min/max are associative except for NaN and -0.0. We do not 13928 // have to rule out -0.0 here because the intrinsic semantics do not 13929 // specify a fixed result for it. 13930 return I->getFastMathFlags().noNaNs(); 13931 } 13932 13933 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) 13934 return true; 13935 13936 return I->isAssociative(); 13937 } 13938 13939 static Value *getRdxOperand(Instruction *I, unsigned Index) { 13940 // Poison-safe 'or' takes the form: select X, true, Y 13941 // To make that work with the normal operand processing, we skip the 13942 // true value operand. 13943 // TODO: Change the code and data structures to handle this without a hack. 13944 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 13945 return I->getOperand(2); 13946 return I->getOperand(Index); 13947 } 13948 13949 /// Creates reduction operation with the current opcode. 13950 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 13951 Value *RHS, const Twine &Name, bool UseSelect) { 13952 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 13953 bool IsConstant = isConstant(LHS) && isConstant(RHS); 13954 switch (Kind) { 13955 case RecurKind::Or: 13956 if (UseSelect && 13957 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13958 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 13959 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13960 Name); 13961 case RecurKind::And: 13962 if (UseSelect && 13963 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13964 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 13965 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13966 Name); 13967 case RecurKind::Add: 13968 case RecurKind::Mul: 13969 case RecurKind::Xor: 13970 case RecurKind::FAdd: 13971 case RecurKind::FMul: 13972 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13973 Name); 13974 case RecurKind::FMax: 13975 if (IsConstant) 13976 return ConstantFP::get(LHS->getType(), 13977 maxnum(cast<ConstantFP>(LHS)->getValueAPF(), 13978 cast<ConstantFP>(RHS)->getValueAPF())); 13979 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 13980 case RecurKind::FMin: 13981 if (IsConstant) 13982 return ConstantFP::get(LHS->getType(), 13983 minnum(cast<ConstantFP>(LHS)->getValueAPF(), 13984 cast<ConstantFP>(RHS)->getValueAPF())); 13985 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 13986 case RecurKind::FMaximum: 13987 if (IsConstant) 13988 return ConstantFP::get(LHS->getType(), 13989 maximum(cast<ConstantFP>(LHS)->getValueAPF(), 13990 cast<ConstantFP>(RHS)->getValueAPF())); 13991 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); 13992 case RecurKind::FMinimum: 13993 if (IsConstant) 13994 return ConstantFP::get(LHS->getType(), 13995 minimum(cast<ConstantFP>(LHS)->getValueAPF(), 13996 cast<ConstantFP>(RHS)->getValueAPF())); 13997 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); 13998 case RecurKind::SMax: 13999 if (IsConstant || UseSelect) { 14000 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 14001 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14002 } 14003 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 14004 case RecurKind::SMin: 14005 if (IsConstant || UseSelect) { 14006 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 14007 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14008 } 14009 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 14010 case RecurKind::UMax: 14011 if (IsConstant || UseSelect) { 14012 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 14013 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14014 } 14015 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 14016 case RecurKind::UMin: 14017 if (IsConstant || UseSelect) { 14018 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 14019 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14020 } 14021 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 14022 default: 14023 llvm_unreachable("Unknown reduction operation."); 14024 } 14025 } 14026 14027 /// Creates reduction operation with the current opcode with the IR flags 14028 /// from \p ReductionOps, dropping nuw/nsw flags. 14029 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 14030 Value *RHS, const Twine &Name, 14031 const ReductionOpsListType &ReductionOps) { 14032 bool UseSelect = 14033 ReductionOps.size() == 2 || 14034 // Logical or/and. 14035 (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) { 14036 return isa<SelectInst>(V); 14037 })); 14038 assert((!UseSelect || ReductionOps.size() != 2 || 14039 isa<SelectInst>(ReductionOps[1][0])) && 14040 "Expected cmp + select pairs for reduction"); 14041 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 14042 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 14043 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 14044 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr, 14045 /*IncludeWrapFlags=*/false); 14046 propagateIRFlags(Op, ReductionOps[1], nullptr, 14047 /*IncludeWrapFlags=*/false); 14048 return Op; 14049 } 14050 } 14051 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false); 14052 return Op; 14053 } 14054 14055 public: 14056 static RecurKind getRdxKind(Value *V) { 14057 auto *I = dyn_cast<Instruction>(V); 14058 if (!I) 14059 return RecurKind::None; 14060 if (match(I, m_Add(m_Value(), m_Value()))) 14061 return RecurKind::Add; 14062 if (match(I, m_Mul(m_Value(), m_Value()))) 14063 return RecurKind::Mul; 14064 if (match(I, m_And(m_Value(), m_Value())) || 14065 match(I, m_LogicalAnd(m_Value(), m_Value()))) 14066 return RecurKind::And; 14067 if (match(I, m_Or(m_Value(), m_Value())) || 14068 match(I, m_LogicalOr(m_Value(), m_Value()))) 14069 return RecurKind::Or; 14070 if (match(I, m_Xor(m_Value(), m_Value()))) 14071 return RecurKind::Xor; 14072 if (match(I, m_FAdd(m_Value(), m_Value()))) 14073 return RecurKind::FAdd; 14074 if (match(I, m_FMul(m_Value(), m_Value()))) 14075 return RecurKind::FMul; 14076 14077 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 14078 return RecurKind::FMax; 14079 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 14080 return RecurKind::FMin; 14081 14082 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 14083 return RecurKind::FMaximum; 14084 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 14085 return RecurKind::FMinimum; 14086 // This matches either cmp+select or intrinsics. SLP is expected to handle 14087 // either form. 14088 // TODO: If we are canonicalizing to intrinsics, we can remove several 14089 // special-case paths that deal with selects. 14090 if (match(I, m_SMax(m_Value(), m_Value()))) 14091 return RecurKind::SMax; 14092 if (match(I, m_SMin(m_Value(), m_Value()))) 14093 return RecurKind::SMin; 14094 if (match(I, m_UMax(m_Value(), m_Value()))) 14095 return RecurKind::UMax; 14096 if (match(I, m_UMin(m_Value(), m_Value()))) 14097 return RecurKind::UMin; 14098 14099 if (auto *Select = dyn_cast<SelectInst>(I)) { 14100 // Try harder: look for min/max pattern based on instructions producing 14101 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 14102 // During the intermediate stages of SLP, it's very common to have 14103 // pattern like this (since optimizeGatherSequence is run only once 14104 // at the end): 14105 // %1 = extractelement <2 x i32> %a, i32 0 14106 // %2 = extractelement <2 x i32> %a, i32 1 14107 // %cond = icmp sgt i32 %1, %2 14108 // %3 = extractelement <2 x i32> %a, i32 0 14109 // %4 = extractelement <2 x i32> %a, i32 1 14110 // %select = select i1 %cond, i32 %3, i32 %4 14111 CmpInst::Predicate Pred; 14112 Instruction *L1; 14113 Instruction *L2; 14114 14115 Value *LHS = Select->getTrueValue(); 14116 Value *RHS = Select->getFalseValue(); 14117 Value *Cond = Select->getCondition(); 14118 14119 // TODO: Support inverse predicates. 14120 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 14121 if (!isa<ExtractElementInst>(RHS) || 14122 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14123 return RecurKind::None; 14124 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 14125 if (!isa<ExtractElementInst>(LHS) || 14126 !L1->isIdenticalTo(cast<Instruction>(LHS))) 14127 return RecurKind::None; 14128 } else { 14129 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 14130 return RecurKind::None; 14131 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 14132 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 14133 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14134 return RecurKind::None; 14135 } 14136 14137 switch (Pred) { 14138 default: 14139 return RecurKind::None; 14140 case CmpInst::ICMP_SGT: 14141 case CmpInst::ICMP_SGE: 14142 return RecurKind::SMax; 14143 case CmpInst::ICMP_SLT: 14144 case CmpInst::ICMP_SLE: 14145 return RecurKind::SMin; 14146 case CmpInst::ICMP_UGT: 14147 case CmpInst::ICMP_UGE: 14148 return RecurKind::UMax; 14149 case CmpInst::ICMP_ULT: 14150 case CmpInst::ICMP_ULE: 14151 return RecurKind::UMin; 14152 } 14153 } 14154 return RecurKind::None; 14155 } 14156 14157 /// Get the index of the first operand. 14158 static unsigned getFirstOperandIndex(Instruction *I) { 14159 return isCmpSelMinMax(I) ? 1 : 0; 14160 } 14161 14162 private: 14163 /// Total number of operands in the reduction operation. 14164 static unsigned getNumberOfOperands(Instruction *I) { 14165 return isCmpSelMinMax(I) ? 3 : 2; 14166 } 14167 14168 /// Checks if the instruction is in basic block \p BB. 14169 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 14170 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 14171 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) { 14172 auto *Sel = cast<SelectInst>(I); 14173 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 14174 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 14175 } 14176 return I->getParent() == BB; 14177 } 14178 14179 /// Expected number of uses for reduction operations/reduced values. 14180 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 14181 if (IsCmpSelMinMax) { 14182 // SelectInst must be used twice while the condition op must have single 14183 // use only. 14184 if (auto *Sel = dyn_cast<SelectInst>(I)) 14185 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 14186 return I->hasNUses(2); 14187 } 14188 14189 // Arithmetic reduction operation must be used once only. 14190 return I->hasOneUse(); 14191 } 14192 14193 /// Initializes the list of reduction operations. 14194 void initReductionOps(Instruction *I) { 14195 if (isCmpSelMinMax(I)) 14196 ReductionOps.assign(2, ReductionOpsType()); 14197 else 14198 ReductionOps.assign(1, ReductionOpsType()); 14199 } 14200 14201 /// Add all reduction operations for the reduction instruction \p I. 14202 void addReductionOps(Instruction *I) { 14203 if (isCmpSelMinMax(I)) { 14204 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 14205 ReductionOps[1].emplace_back(I); 14206 } else { 14207 ReductionOps[0].emplace_back(I); 14208 } 14209 } 14210 14211 static bool isGoodForReduction(ArrayRef<Value *> Data) { 14212 int Sz = Data.size(); 14213 auto *I = dyn_cast<Instruction>(Data.front()); 14214 return Sz > 1 || isConstant(Data.front()) || 14215 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode())); 14216 } 14217 14218 public: 14219 HorizontalReduction() = default; 14220 14221 /// Try to find a reduction tree. 14222 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root, 14223 ScalarEvolution &SE, const DataLayout &DL, 14224 const TargetLibraryInfo &TLI) { 14225 RdxKind = HorizontalReduction::getRdxKind(Root); 14226 if (!isVectorizable(RdxKind, Root)) 14227 return false; 14228 14229 // Analyze "regular" integer/FP types for reductions - no target-specific 14230 // types or pointers. 14231 Type *Ty = Root->getType(); 14232 if (!isValidElementType(Ty) || Ty->isPointerTy()) 14233 return false; 14234 14235 // Though the ultimate reduction may have multiple uses, its condition must 14236 // have only single use. 14237 if (auto *Sel = dyn_cast<SelectInst>(Root)) 14238 if (!Sel->getCondition()->hasOneUse()) 14239 return false; 14240 14241 ReductionRoot = Root; 14242 14243 // Iterate through all the operands of the possible reduction tree and 14244 // gather all the reduced values, sorting them by their value id. 14245 BasicBlock *BB = Root->getParent(); 14246 bool IsCmpSelMinMax = isCmpSelMinMax(Root); 14247 SmallVector<Instruction *> Worklist(1, Root); 14248 // Checks if the operands of the \p TreeN instruction are also reduction 14249 // operations or should be treated as reduced values or an extra argument, 14250 // which is not part of the reduction. 14251 auto CheckOperands = [&](Instruction *TreeN, 14252 SmallVectorImpl<Value *> &ExtraArgs, 14253 SmallVectorImpl<Value *> &PossibleReducedVals, 14254 SmallVectorImpl<Instruction *> &ReductionOps) { 14255 for (int I = getFirstOperandIndex(TreeN), 14256 End = getNumberOfOperands(TreeN); 14257 I < End; ++I) { 14258 Value *EdgeVal = getRdxOperand(TreeN, I); 14259 ReducedValsToOps[EdgeVal].push_back(TreeN); 14260 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 14261 // Edge has wrong parent - mark as an extra argument. 14262 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 14263 !hasSameParent(EdgeInst, BB)) { 14264 ExtraArgs.push_back(EdgeVal); 14265 continue; 14266 } 14267 // If the edge is not an instruction, or it is different from the main 14268 // reduction opcode or has too many uses - possible reduced value. 14269 // Also, do not try to reduce const values, if the operation is not 14270 // foldable. 14271 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 14272 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 14273 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 14274 !isVectorizable(RdxKind, EdgeInst) || 14275 (R.isAnalyzedReductionRoot(EdgeInst) && 14276 all_of(EdgeInst->operands(), Constant::classof))) { 14277 PossibleReducedVals.push_back(EdgeVal); 14278 continue; 14279 } 14280 ReductionOps.push_back(EdgeInst); 14281 } 14282 }; 14283 // Try to regroup reduced values so that it gets more profitable to try to 14284 // reduce them. Values are grouped by their value ids, instructions - by 14285 // instruction op id and/or alternate op id, plus do extra analysis for 14286 // loads (grouping them by the distabce between pointers) and cmp 14287 // instructions (grouping them by the predicate). 14288 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 14289 PossibleReducedVals; 14290 initReductionOps(Root); 14291 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap; 14292 SmallSet<size_t, 2> LoadKeyUsed; 14293 SmallPtrSet<Value *, 4> DoNotReverseVals; 14294 14295 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) { 14296 Value *Ptr = getUnderlyingObject(LI->getPointerOperand()); 14297 if (LoadKeyUsed.contains(Key)) { 14298 auto LIt = LoadsMap.find(Ptr); 14299 if (LIt != LoadsMap.end()) { 14300 for (LoadInst *RLI : LIt->second) { 14301 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 14302 LI->getType(), LI->getPointerOperand(), DL, SE, 14303 /*StrictCheck=*/true)) 14304 return hash_value(RLI->getPointerOperand()); 14305 } 14306 for (LoadInst *RLI : LIt->second) { 14307 if (arePointersCompatible(RLI->getPointerOperand(), 14308 LI->getPointerOperand(), TLI)) { 14309 hash_code SubKey = hash_value(RLI->getPointerOperand()); 14310 DoNotReverseVals.insert(RLI); 14311 return SubKey; 14312 } 14313 } 14314 if (LIt->second.size() > 2) { 14315 hash_code SubKey = 14316 hash_value(LIt->second.back()->getPointerOperand()); 14317 DoNotReverseVals.insert(LIt->second.back()); 14318 return SubKey; 14319 } 14320 } 14321 } 14322 LoadKeyUsed.insert(Key); 14323 LoadsMap.try_emplace(Ptr).first->second.push_back(LI); 14324 return hash_value(LI->getPointerOperand()); 14325 }; 14326 14327 while (!Worklist.empty()) { 14328 Instruction *TreeN = Worklist.pop_back_val(); 14329 SmallVector<Value *> Args; 14330 SmallVector<Value *> PossibleRedVals; 14331 SmallVector<Instruction *> PossibleReductionOps; 14332 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 14333 // If too many extra args - mark the instruction itself as a reduction 14334 // value, not a reduction operation. 14335 if (Args.size() < 2) { 14336 addReductionOps(TreeN); 14337 // Add extra args. 14338 if (!Args.empty()) { 14339 assert(Args.size() == 1 && "Expected only single argument."); 14340 ExtraArgs[TreeN] = Args.front(); 14341 } 14342 // Add reduction values. The values are sorted for better vectorization 14343 // results. 14344 for (Value *V : PossibleRedVals) { 14345 size_t Key, Idx; 14346 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey, 14347 /*AllowAlternate=*/false); 14348 ++PossibleReducedVals[Key][Idx] 14349 .insert(std::make_pair(V, 0)) 14350 .first->second; 14351 } 14352 Worklist.append(PossibleReductionOps.rbegin(), 14353 PossibleReductionOps.rend()); 14354 } else { 14355 size_t Key, Idx; 14356 std::tie(Key, Idx) = generateKeySubkey(TreeN, &TLI, GenerateLoadsSubkey, 14357 /*AllowAlternate=*/false); 14358 ++PossibleReducedVals[Key][Idx] 14359 .insert(std::make_pair(TreeN, 0)) 14360 .first->second; 14361 } 14362 } 14363 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 14364 // Sort values by the total number of values kinds to start the reduction 14365 // from the longest possible reduced values sequences. 14366 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 14367 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 14368 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 14369 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 14370 It != E; ++It) { 14371 PossibleRedValsVect.emplace_back(); 14372 auto RedValsVect = It->second.takeVector(); 14373 stable_sort(RedValsVect, llvm::less_second()); 14374 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 14375 PossibleRedValsVect.back().append(Data.second, Data.first); 14376 } 14377 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 14378 return P1.size() > P2.size(); 14379 }); 14380 int NewIdx = -1; 14381 for (ArrayRef<Value *> Data : PossibleRedValsVect) { 14382 if (isGoodForReduction(Data) || 14383 (isa<LoadInst>(Data.front()) && NewIdx >= 0 && 14384 isa<LoadInst>(ReducedVals[NewIdx].front()) && 14385 getUnderlyingObject( 14386 cast<LoadInst>(Data.front())->getPointerOperand()) == 14387 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front()) 14388 ->getPointerOperand()))) { 14389 if (NewIdx < 0) { 14390 NewIdx = ReducedVals.size(); 14391 ReducedVals.emplace_back(); 14392 } 14393 if (DoNotReverseVals.contains(Data.front())) 14394 ReducedVals[NewIdx].append(Data.begin(), Data.end()); 14395 else 14396 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend()); 14397 } else { 14398 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend()); 14399 } 14400 } 14401 } 14402 // Sort the reduced values by number of same/alternate opcode and/or pointer 14403 // operand. 14404 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 14405 return P1.size() > P2.size(); 14406 }); 14407 return true; 14408 } 14409 14410 /// Attempt to vectorize the tree found by matchAssociativeReduction. 14411 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI, 14412 const TargetLibraryInfo &TLI) { 14413 constexpr int ReductionLimit = 4; 14414 constexpr unsigned RegMaxNumber = 4; 14415 constexpr unsigned RedValsMaxNumber = 128; 14416 // If there are a sufficient number of reduction values, reduce 14417 // to a nearby power-of-2. We can safely generate oversized 14418 // vectors and rely on the backend to split them to legal sizes. 14419 unsigned NumReducedVals = 14420 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0, 14421 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned { 14422 if (!isGoodForReduction(Vals)) 14423 return Num; 14424 return Num + Vals.size(); 14425 }); 14426 if (NumReducedVals < ReductionLimit && 14427 (!AllowHorRdxIdenityOptimization || 14428 all_of(ReducedVals, [](ArrayRef<Value *> RedV) { 14429 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV); 14430 }))) { 14431 for (ReductionOpsType &RdxOps : ReductionOps) 14432 for (Value *RdxOp : RdxOps) 14433 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 14434 return nullptr; 14435 } 14436 14437 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 14438 14439 // Track the reduced values in case if they are replaced by extractelement 14440 // because of the vectorization. 14441 DenseMap<Value *, WeakTrackingVH> TrackedVals( 14442 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size()); 14443 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 14444 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 14445 ExternallyUsedValues.reserve(ExtraArgs.size() + 1); 14446 // The same extra argument may be used several times, so log each attempt 14447 // to use it. 14448 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 14449 assert(Pair.first && "DebugLoc must be set."); 14450 ExternallyUsedValues[Pair.second].push_back(Pair.first); 14451 TrackedVals.try_emplace(Pair.second, Pair.second); 14452 } 14453 14454 // The compare instruction of a min/max is the insertion point for new 14455 // instructions and may be replaced with a new compare instruction. 14456 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 14457 assert(isa<SelectInst>(RdxRootInst) && 14458 "Expected min/max reduction to have select root instruction"); 14459 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 14460 assert(isa<Instruction>(ScalarCond) && 14461 "Expected min/max reduction to have compare condition"); 14462 return cast<Instruction>(ScalarCond); 14463 }; 14464 14465 // Return new VectorizedTree, based on previous value. 14466 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) { 14467 if (VectorizedTree) { 14468 // Update the final value in the reduction. 14469 Builder.SetCurrentDebugLocation( 14470 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 14471 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) || 14472 (isGuaranteedNotToBePoison(Res) && 14473 !isGuaranteedNotToBePoison(VectorizedTree))) { 14474 auto It = ReducedValsToOps.find(Res); 14475 if (It != ReducedValsToOps.end() && 14476 any_of(It->getSecond(), 14477 [](Instruction *I) { return isBoolLogicOp(I); })) 14478 std::swap(VectorizedTree, Res); 14479 } 14480 14481 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx", 14482 ReductionOps); 14483 } 14484 // Initialize the final value in the reduction. 14485 return Res; 14486 }; 14487 bool AnyBoolLogicOp = 14488 any_of(ReductionOps.back(), [](Value *V) { 14489 return isBoolLogicOp(cast<Instruction>(V)); 14490 }); 14491 // The reduction root is used as the insertion point for new instructions, 14492 // so set it as externally used to prevent it from being deleted. 14493 ExternallyUsedValues[ReductionRoot]; 14494 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() * 14495 ReductionOps.front().size()); 14496 for (ReductionOpsType &RdxOps : ReductionOps) 14497 for (Value *RdxOp : RdxOps) { 14498 if (!RdxOp) 14499 continue; 14500 IgnoreList.insert(RdxOp); 14501 } 14502 // Intersect the fast-math-flags from all reduction operations. 14503 FastMathFlags RdxFMF; 14504 RdxFMF.set(); 14505 for (Value *U : IgnoreList) 14506 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 14507 RdxFMF &= FPMO->getFastMathFlags(); 14508 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 14509 14510 // Need to track reduced vals, they may be changed during vectorization of 14511 // subvectors. 14512 for (ArrayRef<Value *> Candidates : ReducedVals) 14513 for (Value *V : Candidates) 14514 TrackedVals.try_emplace(V, V); 14515 14516 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size()); 14517 // List of the values that were reduced in other trees as part of gather 14518 // nodes and thus requiring extract if fully vectorized in other trees. 14519 SmallPtrSet<Value *, 4> RequiredExtract; 14520 Value *VectorizedTree = nullptr; 14521 bool CheckForReusedReductionOps = false; 14522 // Try to vectorize elements based on their type. 14523 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 14524 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 14525 InstructionsState S = getSameOpcode(OrigReducedVals, TLI); 14526 SmallVector<Value *> Candidates; 14527 Candidates.reserve(2 * OrigReducedVals.size()); 14528 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size()); 14529 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 14530 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 14531 // Check if the reduction value was not overriden by the extractelement 14532 // instruction because of the vectorization and exclude it, if it is not 14533 // compatible with other values. 14534 // Also check if the instruction was folded to constant/other value. 14535 auto *Inst = dyn_cast<Instruction>(RdxVal); 14536 if ((Inst && isVectorLikeInstWithConstOps(Inst) && 14537 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) || 14538 (S.getOpcode() && !Inst)) 14539 continue; 14540 Candidates.push_back(RdxVal); 14541 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 14542 } 14543 bool ShuffledExtracts = false; 14544 // Try to handle shuffled extractelements. 14545 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 14546 I + 1 < E) { 14547 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI); 14548 if (NextS.getOpcode() == Instruction::ExtractElement && 14549 !NextS.isAltShuffle()) { 14550 SmallVector<Value *> CommonCandidates(Candidates); 14551 for (Value *RV : ReducedVals[I + 1]) { 14552 Value *RdxVal = TrackedVals.find(RV)->second; 14553 // Check if the reduction value was not overriden by the 14554 // extractelement instruction because of the vectorization and 14555 // exclude it, if it is not compatible with other values. 14556 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 14557 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 14558 continue; 14559 CommonCandidates.push_back(RdxVal); 14560 TrackedToOrig.try_emplace(RdxVal, RV); 14561 } 14562 SmallVector<int> Mask; 14563 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 14564 ++I; 14565 Candidates.swap(CommonCandidates); 14566 ShuffledExtracts = true; 14567 } 14568 } 14569 } 14570 14571 // Emit code for constant values. 14572 if (AllowHorRdxIdenityOptimization && Candidates.size() > 1 && 14573 allConstant(Candidates)) { 14574 Value *Res = Candidates.front(); 14575 ++VectorizedVals.try_emplace(Candidates.front(), 0).first->getSecond(); 14576 for (Value *VC : ArrayRef(Candidates).drop_front()) { 14577 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps); 14578 ++VectorizedVals.try_emplace(VC, 0).first->getSecond(); 14579 if (auto *ResI = dyn_cast<Instruction>(Res)) 14580 V.analyzedReductionRoot(ResI); 14581 } 14582 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res); 14583 continue; 14584 } 14585 14586 unsigned NumReducedVals = Candidates.size(); 14587 if (NumReducedVals < ReductionLimit && 14588 (NumReducedVals < 2 || !AllowHorRdxIdenityOptimization || 14589 !isSplat(Candidates))) 14590 continue; 14591 14592 // Check if we support repeated scalar values processing (optimization of 14593 // original scalar identity operations on matched horizontal reductions). 14594 IsSupportedHorRdxIdentityOp = 14595 AllowHorRdxIdenityOptimization && RdxKind != RecurKind::Mul && 14596 RdxKind != RecurKind::FMul && RdxKind != RecurKind::FMulAdd; 14597 // Gather same values. 14598 MapVector<Value *, unsigned> SameValuesCounter; 14599 if (IsSupportedHorRdxIdentityOp) 14600 for (Value *V : Candidates) 14601 ++SameValuesCounter.insert(std::make_pair(V, 0)).first->second; 14602 // Used to check if the reduced values used same number of times. In this 14603 // case the compiler may produce better code. E.g. if reduced values are 14604 // aabbccdd (8 x values), then the first node of the tree will have a node 14605 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>. 14606 // Plus, the final reduction will be performed on <8 x aabbccdd>. 14607 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4 14608 // x abcd) * 2. 14609 // Currently it only handles add/fadd/xor. and/or/min/max do not require 14610 // this analysis, other operations may require an extra estimation of 14611 // the profitability. 14612 bool SameScaleFactor = false; 14613 bool OptReusedScalars = IsSupportedHorRdxIdentityOp && 14614 SameValuesCounter.size() != Candidates.size(); 14615 if (OptReusedScalars) { 14616 SameScaleFactor = 14617 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd || 14618 RdxKind == RecurKind::Xor) && 14619 all_of(drop_begin(SameValuesCounter), 14620 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) { 14621 return P.second == SameValuesCounter.front().second; 14622 }); 14623 Candidates.resize(SameValuesCounter.size()); 14624 transform(SameValuesCounter, Candidates.begin(), 14625 [](const auto &P) { return P.first; }); 14626 NumReducedVals = Candidates.size(); 14627 // Have a reduction of the same element. 14628 if (NumReducedVals == 1) { 14629 Value *OrigV = TrackedToOrig.find(Candidates.front())->second; 14630 unsigned Cnt = SameValuesCounter.lookup(OrigV); 14631 Value *RedVal = 14632 emitScaleForReusedOps(Candidates.front(), Builder, Cnt); 14633 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14634 VectorizedVals.try_emplace(OrigV, Cnt); 14635 continue; 14636 } 14637 } 14638 14639 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 14640 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 14641 unsigned MaxElts = 14642 RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize); 14643 14644 unsigned ReduxWidth = std::min<unsigned>( 14645 llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 14646 unsigned Start = 0; 14647 unsigned Pos = Start; 14648 // Restarts vectorization attempt with lower vector factor. 14649 unsigned PrevReduxWidth = ReduxWidth; 14650 bool CheckForReusedReductionOpsLocal = false; 14651 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 14652 &CheckForReusedReductionOpsLocal, 14653 &PrevReduxWidth, &V, 14654 &IgnoreList](bool IgnoreVL = false) { 14655 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 14656 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 14657 // Check if any of the reduction ops are gathered. If so, worth 14658 // trying again with less number of reduction ops. 14659 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 14660 } 14661 ++Pos; 14662 if (Pos < NumReducedVals - ReduxWidth + 1) 14663 return IsAnyRedOpGathered; 14664 Pos = Start; 14665 ReduxWidth /= 2; 14666 return IsAnyRedOpGathered; 14667 }; 14668 bool AnyVectorized = false; 14669 while (Pos < NumReducedVals - ReduxWidth + 1 && 14670 ReduxWidth >= ReductionLimit) { 14671 // Dependency in tree of the reduction ops - drop this attempt, try 14672 // later. 14673 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 14674 Start == 0) { 14675 CheckForReusedReductionOps = true; 14676 break; 14677 } 14678 PrevReduxWidth = ReduxWidth; 14679 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 14680 // Beeing analyzed already - skip. 14681 if (V.areAnalyzedReductionVals(VL)) { 14682 (void)AdjustReducedVals(/*IgnoreVL=*/true); 14683 continue; 14684 } 14685 // Early exit if any of the reduction values were deleted during 14686 // previous vectorization attempts. 14687 if (any_of(VL, [&V](Value *RedVal) { 14688 auto *RedValI = dyn_cast<Instruction>(RedVal); 14689 if (!RedValI) 14690 return false; 14691 return V.isDeleted(RedValI); 14692 })) 14693 break; 14694 V.buildTree(VL, IgnoreList); 14695 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 14696 if (!AdjustReducedVals()) 14697 V.analyzedReductionVals(VL); 14698 continue; 14699 } 14700 if (V.isLoadCombineReductionCandidate(RdxKind)) { 14701 if (!AdjustReducedVals()) 14702 V.analyzedReductionVals(VL); 14703 continue; 14704 } 14705 V.reorderTopToBottom(); 14706 // No need to reorder the root node at all. 14707 V.reorderBottomToTop(/*IgnoreReorder=*/true); 14708 // Keep extracted other reduction values, if they are used in the 14709 // vectorization trees. 14710 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 14711 ExternallyUsedValues); 14712 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 14713 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 14714 continue; 14715 for (Value *V : ReducedVals[Cnt]) 14716 if (isa<Instruction>(V)) 14717 LocalExternallyUsedValues[TrackedVals[V]]; 14718 } 14719 if (!IsSupportedHorRdxIdentityOp) { 14720 // Number of uses of the candidates in the vector of values. 14721 assert(SameValuesCounter.empty() && 14722 "Reused values counter map is not empty"); 14723 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14724 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14725 continue; 14726 Value *V = Candidates[Cnt]; 14727 Value *OrigV = TrackedToOrig.find(V)->second; 14728 ++SameValuesCounter[OrigV]; 14729 } 14730 } 14731 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end()); 14732 // Gather externally used values. 14733 SmallPtrSet<Value *, 4> Visited; 14734 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14735 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14736 continue; 14737 Value *RdxVal = Candidates[Cnt]; 14738 if (!Visited.insert(RdxVal).second) 14739 continue; 14740 // Check if the scalar was vectorized as part of the vectorization 14741 // tree but not the top node. 14742 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) { 14743 LocalExternallyUsedValues[RdxVal]; 14744 continue; 14745 } 14746 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14747 unsigned NumOps = 14748 VectorizedVals.lookup(RdxVal) + SameValuesCounter[OrigV]; 14749 if (NumOps != ReducedValsToOps.find(OrigV)->second.size()) 14750 LocalExternallyUsedValues[RdxVal]; 14751 } 14752 // Do not need the list of reused scalars in regular mode anymore. 14753 if (!IsSupportedHorRdxIdentityOp) 14754 SameValuesCounter.clear(); 14755 for (Value *RdxVal : VL) 14756 if (RequiredExtract.contains(RdxVal)) 14757 LocalExternallyUsedValues[RdxVal]; 14758 // Update LocalExternallyUsedValues for the scalar, replaced by 14759 // extractelement instructions. 14760 for (const std::pair<Value *, Value *> &Pair : ReplacedExternals) { 14761 auto *It = ExternallyUsedValues.find(Pair.first); 14762 if (It == ExternallyUsedValues.end()) 14763 continue; 14764 LocalExternallyUsedValues[Pair.second].append(It->second); 14765 } 14766 V.buildExternalUses(LocalExternallyUsedValues); 14767 14768 V.computeMinimumValueSizes(); 14769 14770 // Estimate cost. 14771 InstructionCost TreeCost = V.getTreeCost(VL); 14772 InstructionCost ReductionCost = 14773 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF); 14774 InstructionCost Cost = TreeCost + ReductionCost; 14775 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 14776 << " for reduction\n"); 14777 if (!Cost.isValid()) 14778 return nullptr; 14779 if (Cost >= -SLPCostThreshold) { 14780 V.getORE()->emit([&]() { 14781 return OptimizationRemarkMissed( 14782 SV_NAME, "HorSLPNotBeneficial", 14783 ReducedValsToOps.find(VL[0])->second.front()) 14784 << "Vectorizing horizontal reduction is possible " 14785 << "but not beneficial with cost " << ore::NV("Cost", Cost) 14786 << " and threshold " 14787 << ore::NV("Threshold", -SLPCostThreshold); 14788 }); 14789 if (!AdjustReducedVals()) 14790 V.analyzedReductionVals(VL); 14791 continue; 14792 } 14793 14794 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 14795 << Cost << ". (HorRdx)\n"); 14796 V.getORE()->emit([&]() { 14797 return OptimizationRemark( 14798 SV_NAME, "VectorizedHorizontalReduction", 14799 ReducedValsToOps.find(VL[0])->second.front()) 14800 << "Vectorized horizontal reduction with cost " 14801 << ore::NV("Cost", Cost) << " and with tree size " 14802 << ore::NV("TreeSize", V.getTreeSize()); 14803 }); 14804 14805 Builder.setFastMathFlags(RdxFMF); 14806 14807 // Emit a reduction. If the root is a select (min/max idiom), the insert 14808 // point is the compare condition of that select. 14809 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 14810 Instruction *InsertPt = RdxRootInst; 14811 if (IsCmpSelMinMax) 14812 InsertPt = GetCmpForMinMaxReduction(RdxRootInst); 14813 14814 // Vectorize a tree. 14815 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues, 14816 ReplacedExternals, InsertPt); 14817 14818 Builder.SetInsertPoint(InsertPt); 14819 14820 // To prevent poison from leaking across what used to be sequential, 14821 // safe, scalar boolean logic operations, the reduction operand must be 14822 // frozen. 14823 if ((isBoolLogicOp(RdxRootInst) || 14824 (AnyBoolLogicOp && VL.size() != TrackedVals.size())) && 14825 !isGuaranteedNotToBePoison(VectorizedRoot)) 14826 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 14827 14828 // Emit code to correctly handle reused reduced values, if required. 14829 if (OptReusedScalars && !SameScaleFactor) { 14830 VectorizedRoot = 14831 emitReusedOps(VectorizedRoot, Builder, V.getRootNodeScalars(), 14832 SameValuesCounter, TrackedToOrig); 14833 } 14834 14835 Value *ReducedSubTree = 14836 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 14837 if (ReducedSubTree->getType() != VL.front()->getType()) { 14838 ReducedSubTree = Builder.CreateIntCast( 14839 ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) { 14840 KnownBits Known = computeKnownBits( 14841 R, cast<Instruction>(ReductionOps.front().front()) 14842 ->getModule() 14843 ->getDataLayout()); 14844 return !Known.isNonNegative(); 14845 })); 14846 } 14847 14848 // Improved analysis for add/fadd/xor reductions with same scale factor 14849 // for all operands of reductions. We can emit scalar ops for them 14850 // instead. 14851 if (OptReusedScalars && SameScaleFactor) 14852 ReducedSubTree = emitScaleForReusedOps( 14853 ReducedSubTree, Builder, SameValuesCounter.front().second); 14854 14855 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree); 14856 // Count vectorized reduced values to exclude them from final reduction. 14857 for (Value *RdxVal : VL) { 14858 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14859 if (IsSupportedHorRdxIdentityOp) { 14860 VectorizedVals.try_emplace(OrigV, SameValuesCounter[RdxVal]); 14861 continue; 14862 } 14863 ++VectorizedVals.try_emplace(OrigV, 0).first->getSecond(); 14864 if (!V.isVectorized(RdxVal)) 14865 RequiredExtract.insert(RdxVal); 14866 } 14867 Pos += ReduxWidth; 14868 Start = Pos; 14869 ReduxWidth = llvm::bit_floor(NumReducedVals - Pos); 14870 AnyVectorized = true; 14871 } 14872 if (OptReusedScalars && !AnyVectorized) { 14873 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) { 14874 Value *RedVal = emitScaleForReusedOps(P.first, Builder, P.second); 14875 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14876 Value *OrigV = TrackedToOrig.find(P.first)->second; 14877 VectorizedVals.try_emplace(OrigV, P.second); 14878 } 14879 continue; 14880 } 14881 } 14882 if (VectorizedTree) { 14883 // Reorder operands of bool logical op in the natural order to avoid 14884 // possible problem with poison propagation. If not possible to reorder 14885 // (both operands are originally RHS), emit an extra freeze instruction 14886 // for the LHS operand. 14887 // I.e., if we have original code like this: 14888 // RedOp1 = select i1 ?, i1 LHS, i1 false 14889 // RedOp2 = select i1 RHS, i1 ?, i1 false 14890 14891 // Then, we swap LHS/RHS to create a new op that matches the poison 14892 // semantics of the original code. 14893 14894 // If we have original code like this and both values could be poison: 14895 // RedOp1 = select i1 ?, i1 LHS, i1 false 14896 // RedOp2 = select i1 ?, i1 RHS, i1 false 14897 14898 // Then, we must freeze LHS in the new op. 14899 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS, 14900 Instruction *RedOp1, 14901 Instruction *RedOp2, 14902 bool InitStep) { 14903 if (!AnyBoolLogicOp) 14904 return; 14905 if (isBoolLogicOp(RedOp1) && 14906 ((!InitStep && LHS == VectorizedTree) || 14907 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS))) 14908 return; 14909 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) || 14910 getRdxOperand(RedOp2, 0) == RHS || 14911 isGuaranteedNotToBePoison(RHS))) { 14912 std::swap(LHS, RHS); 14913 return; 14914 } 14915 if (LHS != VectorizedTree) 14916 LHS = Builder.CreateFreeze(LHS); 14917 }; 14918 // Finish the reduction. 14919 // Need to add extra arguments and not vectorized possible reduction 14920 // values. 14921 // Try to avoid dependencies between the scalar remainders after 14922 // reductions. 14923 auto FinalGen = 14924 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals, 14925 bool InitStep) { 14926 unsigned Sz = InstVals.size(); 14927 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 14928 Sz % 2); 14929 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 14930 Instruction *RedOp = InstVals[I + 1].first; 14931 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 14932 Value *RdxVal1 = InstVals[I].second; 14933 Value *StableRdxVal1 = RdxVal1; 14934 auto It1 = TrackedVals.find(RdxVal1); 14935 if (It1 != TrackedVals.end()) 14936 StableRdxVal1 = It1->second; 14937 Value *RdxVal2 = InstVals[I + 1].second; 14938 Value *StableRdxVal2 = RdxVal2; 14939 auto It2 = TrackedVals.find(RdxVal2); 14940 if (It2 != TrackedVals.end()) 14941 StableRdxVal2 = It2->second; 14942 // To prevent poison from leaking across what used to be 14943 // sequential, safe, scalar boolean logic operations, the 14944 // reduction operand must be frozen. 14945 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, 14946 RedOp, InitStep); 14947 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 14948 StableRdxVal2, "op.rdx", ReductionOps); 14949 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 14950 } 14951 if (Sz % 2 == 1) 14952 ExtraReds[Sz / 2] = InstVals.back(); 14953 return ExtraReds; 14954 }; 14955 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 14956 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot), 14957 VectorizedTree); 14958 SmallPtrSet<Value *, 8> Visited; 14959 for (ArrayRef<Value *> Candidates : ReducedVals) { 14960 for (Value *RdxVal : Candidates) { 14961 if (!Visited.insert(RdxVal).second) 14962 continue; 14963 unsigned NumOps = VectorizedVals.lookup(RdxVal); 14964 for (Instruction *RedOp : 14965 ArrayRef(ReducedValsToOps.find(RdxVal)->second) 14966 .drop_back(NumOps)) 14967 ExtraReductions.emplace_back(RedOp, RdxVal); 14968 } 14969 } 14970 for (auto &Pair : ExternallyUsedValues) { 14971 // Add each externally used value to the final reduction. 14972 for (auto *I : Pair.second) 14973 ExtraReductions.emplace_back(I, Pair.first); 14974 } 14975 // Iterate through all not-vectorized reduction values/extra arguments. 14976 bool InitStep = true; 14977 while (ExtraReductions.size() > 1) { 14978 VectorizedTree = ExtraReductions.front().second; 14979 SmallVector<std::pair<Instruction *, Value *>> NewReds = 14980 FinalGen(ExtraReductions, InitStep); 14981 ExtraReductions.swap(NewReds); 14982 InitStep = false; 14983 } 14984 VectorizedTree = ExtraReductions.front().second; 14985 14986 ReductionRoot->replaceAllUsesWith(VectorizedTree); 14987 14988 // The original scalar reduction is expected to have no remaining 14989 // uses outside the reduction tree itself. Assert that we got this 14990 // correct, replace internal uses with undef, and mark for eventual 14991 // deletion. 14992 #ifndef NDEBUG 14993 SmallSet<Value *, 4> IgnoreSet; 14994 for (ArrayRef<Value *> RdxOps : ReductionOps) 14995 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 14996 #endif 14997 for (ArrayRef<Value *> RdxOps : ReductionOps) { 14998 for (Value *Ignore : RdxOps) { 14999 if (!Ignore) 15000 continue; 15001 #ifndef NDEBUG 15002 for (auto *U : Ignore->users()) { 15003 assert(IgnoreSet.count(U) && 15004 "All users must be either in the reduction ops list."); 15005 } 15006 #endif 15007 if (!Ignore->use_empty()) { 15008 Value *Undef = UndefValue::get(Ignore->getType()); 15009 Ignore->replaceAllUsesWith(Undef); 15010 } 15011 V.eraseInstruction(cast<Instruction>(Ignore)); 15012 } 15013 } 15014 } else if (!CheckForReusedReductionOps) { 15015 for (ReductionOpsType &RdxOps : ReductionOps) 15016 for (Value *RdxOp : RdxOps) 15017 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 15018 } 15019 return VectorizedTree; 15020 } 15021 15022 private: 15023 /// Calculate the cost of a reduction. 15024 InstructionCost getReductionCost(TargetTransformInfo *TTI, 15025 ArrayRef<Value *> ReducedVals, 15026 bool IsCmpSelMinMax, unsigned ReduxWidth, 15027 FastMathFlags FMF) { 15028 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 15029 Type *ScalarTy = ReducedVals.front()->getType(); 15030 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 15031 InstructionCost VectorCost = 0, ScalarCost; 15032 // If all of the reduced values are constant, the vector cost is 0, since 15033 // the reduction value can be calculated at the compile time. 15034 bool AllConsts = allConstant(ReducedVals); 15035 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) { 15036 InstructionCost Cost = 0; 15037 // Scalar cost is repeated for N-1 elements. 15038 int Cnt = ReducedVals.size(); 15039 for (Value *RdxVal : ReducedVals) { 15040 if (Cnt == 1) 15041 break; 15042 --Cnt; 15043 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) { 15044 Cost += GenCostFn(); 15045 continue; 15046 } 15047 InstructionCost ScalarCost = 0; 15048 for (User *U : RdxVal->users()) { 15049 auto *RdxOp = cast<Instruction>(U); 15050 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) { 15051 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind); 15052 continue; 15053 } 15054 ScalarCost = InstructionCost::getInvalid(); 15055 break; 15056 } 15057 if (ScalarCost.isValid()) 15058 Cost += ScalarCost; 15059 else 15060 Cost += GenCostFn(); 15061 } 15062 return Cost; 15063 }; 15064 switch (RdxKind) { 15065 case RecurKind::Add: 15066 case RecurKind::Mul: 15067 case RecurKind::Or: 15068 case RecurKind::And: 15069 case RecurKind::Xor: 15070 case RecurKind::FAdd: 15071 case RecurKind::FMul: { 15072 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 15073 if (!AllConsts) 15074 VectorCost = 15075 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 15076 ScalarCost = EvaluateScalarCost([&]() { 15077 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 15078 }); 15079 break; 15080 } 15081 case RecurKind::FMax: 15082 case RecurKind::FMin: 15083 case RecurKind::FMaximum: 15084 case RecurKind::FMinimum: 15085 case RecurKind::SMax: 15086 case RecurKind::SMin: 15087 case RecurKind::UMax: 15088 case RecurKind::UMin: { 15089 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind); 15090 if (!AllConsts) 15091 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind); 15092 ScalarCost = EvaluateScalarCost([&]() { 15093 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF); 15094 return TTI->getIntrinsicInstrCost(ICA, CostKind); 15095 }); 15096 break; 15097 } 15098 default: 15099 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 15100 } 15101 15102 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 15103 << " for reduction of " << shortBundleName(ReducedVals) 15104 << " (It is a splitting reduction)\n"); 15105 return VectorCost - ScalarCost; 15106 } 15107 15108 /// Emit a horizontal reduction of the vectorized value. 15109 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 15110 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 15111 assert(VectorizedValue && "Need to have a vectorized tree node"); 15112 assert(isPowerOf2_32(ReduxWidth) && 15113 "We only handle power-of-two reductions for now"); 15114 assert(RdxKind != RecurKind::FMulAdd && 15115 "A call to the llvm.fmuladd intrinsic is not handled yet"); 15116 15117 ++NumVectorInstructions; 15118 return createSimpleTargetReduction(Builder, VectorizedValue, RdxKind); 15119 } 15120 15121 /// Emits optimized code for unique scalar value reused \p Cnt times. 15122 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15123 unsigned Cnt) { 15124 assert(IsSupportedHorRdxIdentityOp && 15125 "The optimization of matched scalar identity horizontal reductions " 15126 "must be supported."); 15127 switch (RdxKind) { 15128 case RecurKind::Add: { 15129 // res = mul vv, n 15130 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt); 15131 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of " 15132 << VectorizedValue << ". (HorRdx)\n"); 15133 return Builder.CreateMul(VectorizedValue, Scale); 15134 } 15135 case RecurKind::Xor: { 15136 // res = n % 2 ? 0 : vv 15137 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue 15138 << ". (HorRdx)\n"); 15139 if (Cnt % 2 == 0) 15140 return Constant::getNullValue(VectorizedValue->getType()); 15141 return VectorizedValue; 15142 } 15143 case RecurKind::FAdd: { 15144 // res = fmul v, n 15145 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt); 15146 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of " 15147 << VectorizedValue << ". (HorRdx)\n"); 15148 return Builder.CreateFMul(VectorizedValue, Scale); 15149 } 15150 case RecurKind::And: 15151 case RecurKind::Or: 15152 case RecurKind::SMax: 15153 case RecurKind::SMin: 15154 case RecurKind::UMax: 15155 case RecurKind::UMin: 15156 case RecurKind::FMax: 15157 case RecurKind::FMin: 15158 case RecurKind::FMaximum: 15159 case RecurKind::FMinimum: 15160 // res = vv 15161 return VectorizedValue; 15162 case RecurKind::Mul: 15163 case RecurKind::FMul: 15164 case RecurKind::FMulAdd: 15165 case RecurKind::IAnyOf: 15166 case RecurKind::FAnyOf: 15167 case RecurKind::None: 15168 llvm_unreachable("Unexpected reduction kind for repeated scalar."); 15169 } 15170 return nullptr; 15171 } 15172 15173 /// Emits actual operation for the scalar identity values, found during 15174 /// horizontal reduction analysis. 15175 Value *emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15176 ArrayRef<Value *> VL, 15177 const MapVector<Value *, unsigned> &SameValuesCounter, 15178 const DenseMap<Value *, Value *> &TrackedToOrig) { 15179 assert(IsSupportedHorRdxIdentityOp && 15180 "The optimization of matched scalar identity horizontal reductions " 15181 "must be supported."); 15182 switch (RdxKind) { 15183 case RecurKind::Add: { 15184 // root = mul prev_root, <1, 1, n, 1> 15185 SmallVector<Constant *> Vals; 15186 for (Value *V : VL) { 15187 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15188 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false)); 15189 } 15190 auto *Scale = ConstantVector::get(Vals); 15191 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of " 15192 << VectorizedValue << ". (HorRdx)\n"); 15193 return Builder.CreateMul(VectorizedValue, Scale); 15194 } 15195 case RecurKind::And: 15196 case RecurKind::Or: 15197 // No need for multiple or/and(s). 15198 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue 15199 << ". (HorRdx)\n"); 15200 return VectorizedValue; 15201 case RecurKind::SMax: 15202 case RecurKind::SMin: 15203 case RecurKind::UMax: 15204 case RecurKind::UMin: 15205 case RecurKind::FMax: 15206 case RecurKind::FMin: 15207 case RecurKind::FMaximum: 15208 case RecurKind::FMinimum: 15209 // No need for multiple min/max(s) of the same value. 15210 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue 15211 << ". (HorRdx)\n"); 15212 return VectorizedValue; 15213 case RecurKind::Xor: { 15214 // Replace values with even number of repeats with 0, since 15215 // x xor x = 0. 15216 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6, 15217 // 7>, if elements 4th and 6th elements have even number of repeats. 15218 SmallVector<int> Mask( 15219 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(), 15220 PoisonMaskElem); 15221 std::iota(Mask.begin(), Mask.end(), 0); 15222 bool NeedShuffle = false; 15223 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) { 15224 Value *V = VL[I]; 15225 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15226 if (Cnt % 2 == 0) { 15227 Mask[I] = VF; 15228 NeedShuffle = true; 15229 } 15230 } 15231 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I 15232 : Mask) dbgs() 15233 << I << " "; 15234 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n"); 15235 if (NeedShuffle) 15236 VectorizedValue = Builder.CreateShuffleVector( 15237 VectorizedValue, 15238 ConstantVector::getNullValue(VectorizedValue->getType()), Mask); 15239 return VectorizedValue; 15240 } 15241 case RecurKind::FAdd: { 15242 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0> 15243 SmallVector<Constant *> Vals; 15244 for (Value *V : VL) { 15245 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15246 Vals.push_back(ConstantFP::get(V->getType(), Cnt)); 15247 } 15248 auto *Scale = ConstantVector::get(Vals); 15249 return Builder.CreateFMul(VectorizedValue, Scale); 15250 } 15251 case RecurKind::Mul: 15252 case RecurKind::FMul: 15253 case RecurKind::FMulAdd: 15254 case RecurKind::IAnyOf: 15255 case RecurKind::FAnyOf: 15256 case RecurKind::None: 15257 llvm_unreachable("Unexpected reduction kind for reused scalars."); 15258 } 15259 return nullptr; 15260 } 15261 }; 15262 } // end anonymous namespace 15263 15264 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) { 15265 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 15266 return cast<FixedVectorType>(IE->getType())->getNumElements(); 15267 15268 unsigned AggregateSize = 1; 15269 auto *IV = cast<InsertValueInst>(InsertInst); 15270 Type *CurrentType = IV->getType(); 15271 do { 15272 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 15273 for (auto *Elt : ST->elements()) 15274 if (Elt != ST->getElementType(0)) // check homogeneity 15275 return std::nullopt; 15276 AggregateSize *= ST->getNumElements(); 15277 CurrentType = ST->getElementType(0); 15278 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 15279 AggregateSize *= AT->getNumElements(); 15280 CurrentType = AT->getElementType(); 15281 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 15282 AggregateSize *= VT->getNumElements(); 15283 return AggregateSize; 15284 } else if (CurrentType->isSingleValueType()) { 15285 return AggregateSize; 15286 } else { 15287 return std::nullopt; 15288 } 15289 } while (true); 15290 } 15291 15292 static void findBuildAggregate_rec(Instruction *LastInsertInst, 15293 TargetTransformInfo *TTI, 15294 SmallVectorImpl<Value *> &BuildVectorOpds, 15295 SmallVectorImpl<Value *> &InsertElts, 15296 unsigned OperandOffset) { 15297 do { 15298 Value *InsertedOperand = LastInsertInst->getOperand(1); 15299 std::optional<unsigned> OperandIndex = 15300 getInsertIndex(LastInsertInst, OperandOffset); 15301 if (!OperandIndex) 15302 return; 15303 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) { 15304 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 15305 BuildVectorOpds, InsertElts, *OperandIndex); 15306 15307 } else { 15308 BuildVectorOpds[*OperandIndex] = InsertedOperand; 15309 InsertElts[*OperandIndex] = LastInsertInst; 15310 } 15311 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 15312 } while (LastInsertInst != nullptr && 15313 isa<InsertValueInst, InsertElementInst>(LastInsertInst) && 15314 LastInsertInst->hasOneUse()); 15315 } 15316 15317 /// Recognize construction of vectors like 15318 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 15319 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 15320 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 15321 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 15322 /// starting from the last insertelement or insertvalue instruction. 15323 /// 15324 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 15325 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 15326 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 15327 /// 15328 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 15329 /// 15330 /// \return true if it matches. 15331 static bool findBuildAggregate(Instruction *LastInsertInst, 15332 TargetTransformInfo *TTI, 15333 SmallVectorImpl<Value *> &BuildVectorOpds, 15334 SmallVectorImpl<Value *> &InsertElts) { 15335 15336 assert((isa<InsertElementInst>(LastInsertInst) || 15337 isa<InsertValueInst>(LastInsertInst)) && 15338 "Expected insertelement or insertvalue instruction!"); 15339 15340 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 15341 "Expected empty result vectors!"); 15342 15343 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 15344 if (!AggregateSize) 15345 return false; 15346 BuildVectorOpds.resize(*AggregateSize); 15347 InsertElts.resize(*AggregateSize); 15348 15349 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 15350 llvm::erase(BuildVectorOpds, nullptr); 15351 llvm::erase(InsertElts, nullptr); 15352 if (BuildVectorOpds.size() >= 2) 15353 return true; 15354 15355 return false; 15356 } 15357 15358 /// Try and get a reduction instruction from a phi node. 15359 /// 15360 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 15361 /// if they come from either \p ParentBB or a containing loop latch. 15362 /// 15363 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 15364 /// if not possible. 15365 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P, 15366 BasicBlock *ParentBB, LoopInfo *LI) { 15367 // There are situations where the reduction value is not dominated by the 15368 // reduction phi. Vectorizing such cases has been reported to cause 15369 // miscompiles. See PR25787. 15370 auto DominatedReduxValue = [&](Value *R) { 15371 return isa<Instruction>(R) && 15372 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 15373 }; 15374 15375 Instruction *Rdx = nullptr; 15376 15377 // Return the incoming value if it comes from the same BB as the phi node. 15378 if (P->getIncomingBlock(0) == ParentBB) { 15379 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15380 } else if (P->getIncomingBlock(1) == ParentBB) { 15381 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15382 } 15383 15384 if (Rdx && DominatedReduxValue(Rdx)) 15385 return Rdx; 15386 15387 // Otherwise, check whether we have a loop latch to look at. 15388 Loop *BBL = LI->getLoopFor(ParentBB); 15389 if (!BBL) 15390 return nullptr; 15391 BasicBlock *BBLatch = BBL->getLoopLatch(); 15392 if (!BBLatch) 15393 return nullptr; 15394 15395 // There is a loop latch, return the incoming value if it comes from 15396 // that. This reduction pattern occasionally turns up. 15397 if (P->getIncomingBlock(0) == BBLatch) { 15398 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15399 } else if (P->getIncomingBlock(1) == BBLatch) { 15400 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15401 } 15402 15403 if (Rdx && DominatedReduxValue(Rdx)) 15404 return Rdx; 15405 15406 return nullptr; 15407 } 15408 15409 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 15410 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 15411 return true; 15412 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 15413 return true; 15414 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 15415 return true; 15416 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1)))) 15417 return true; 15418 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1)))) 15419 return true; 15420 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 15421 return true; 15422 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 15423 return true; 15424 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 15425 return true; 15426 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 15427 return true; 15428 return false; 15429 } 15430 15431 /// We could have an initial reduction that is not an add. 15432 /// r *= v1 + v2 + v3 + v4 15433 /// In such a case start looking for a tree rooted in the first '+'. 15434 /// \Returns the new root if found, which may be nullptr if not an instruction. 15435 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi, 15436 Instruction *Root) { 15437 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) || 15438 isa<IntrinsicInst>(Root)) && 15439 "Expected binop, select, or intrinsic for reduction matching"); 15440 Value *LHS = 15441 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root)); 15442 Value *RHS = 15443 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1); 15444 if (LHS == Phi) 15445 return dyn_cast<Instruction>(RHS); 15446 if (RHS == Phi) 15447 return dyn_cast<Instruction>(LHS); 15448 return nullptr; 15449 } 15450 15451 /// \p Returns the first operand of \p I that does not match \p Phi. If 15452 /// operand is not an instruction it returns nullptr. 15453 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) { 15454 Value *Op0 = nullptr; 15455 Value *Op1 = nullptr; 15456 if (!matchRdxBop(I, Op0, Op1)) 15457 return nullptr; 15458 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0); 15459 } 15460 15461 /// \Returns true if \p I is a candidate instruction for reduction vectorization. 15462 static bool isReductionCandidate(Instruction *I) { 15463 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value())); 15464 Value *B0 = nullptr, *B1 = nullptr; 15465 bool IsBinop = matchRdxBop(I, B0, B1); 15466 return IsBinop || IsSelect; 15467 } 15468 15469 bool SLPVectorizerPass::vectorizeHorReduction( 15470 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI, 15471 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) { 15472 if (!ShouldVectorizeHor) 15473 return false; 15474 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root); 15475 15476 if (Root->getParent() != BB || isa<PHINode>(Root)) 15477 return false; 15478 15479 // If we can find a secondary reduction root, use that instead. 15480 auto SelectRoot = [&]() { 15481 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) && 15482 HorizontalReduction::getRdxKind(Root) != RecurKind::None) 15483 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root)) 15484 return NewRoot; 15485 return Root; 15486 }; 15487 15488 // Start analysis starting from Root instruction. If horizontal reduction is 15489 // found, try to vectorize it. If it is not a horizontal reduction or 15490 // vectorization is not possible or not effective, and currently analyzed 15491 // instruction is a binary operation, try to vectorize the operands, using 15492 // pre-order DFS traversal order. If the operands were not vectorized, repeat 15493 // the same procedure considering each operand as a possible root of the 15494 // horizontal reduction. 15495 // Interrupt the process if the Root instruction itself was vectorized or all 15496 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 15497 // If a horizintal reduction was not matched or vectorized we collect 15498 // instructions for possible later attempts for vectorization. 15499 std::queue<std::pair<Instruction *, unsigned>> Stack; 15500 Stack.emplace(SelectRoot(), 0); 15501 SmallPtrSet<Value *, 8> VisitedInstrs; 15502 bool Res = false; 15503 auto &&TryToReduce = [this, TTI, &R](Instruction *Inst) -> Value * { 15504 if (R.isAnalyzedReductionRoot(Inst)) 15505 return nullptr; 15506 if (!isReductionCandidate(Inst)) 15507 return nullptr; 15508 HorizontalReduction HorRdx; 15509 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI)) 15510 return nullptr; 15511 return HorRdx.tryToReduce(R, TTI, *TLI); 15512 }; 15513 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) { 15514 if (TryOperandsAsNewSeeds && FutureSeed == Root) { 15515 FutureSeed = getNonPhiOperand(Root, P); 15516 if (!FutureSeed) 15517 return false; 15518 } 15519 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their 15520 // analysis is done separately. 15521 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed)) 15522 PostponedInsts.push_back(FutureSeed); 15523 return true; 15524 }; 15525 15526 while (!Stack.empty()) { 15527 Instruction *Inst; 15528 unsigned Level; 15529 std::tie(Inst, Level) = Stack.front(); 15530 Stack.pop(); 15531 // Do not try to analyze instruction that has already been vectorized. 15532 // This may happen when we vectorize instruction operands on a previous 15533 // iteration while stack was populated before that happened. 15534 if (R.isDeleted(Inst)) 15535 continue; 15536 if (Value *VectorizedV = TryToReduce(Inst)) { 15537 Res = true; 15538 if (auto *I = dyn_cast<Instruction>(VectorizedV)) { 15539 // Try to find another reduction. 15540 Stack.emplace(I, Level); 15541 continue; 15542 } 15543 } else { 15544 // We could not vectorize `Inst` so try to use it as a future seed. 15545 if (!TryAppendToPostponedInsts(Inst)) { 15546 assert(Stack.empty() && "Expected empty stack"); 15547 break; 15548 } 15549 } 15550 15551 // Try to vectorize operands. 15552 // Continue analysis for the instruction from the same basic block only to 15553 // save compile time. 15554 if (++Level < RecursionMaxDepth) 15555 for (auto *Op : Inst->operand_values()) 15556 if (VisitedInstrs.insert(Op).second) 15557 if (auto *I = dyn_cast<Instruction>(Op)) 15558 // Do not try to vectorize CmpInst operands, this is done 15559 // separately. 15560 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 15561 !R.isDeleted(I) && I->getParent() == BB) 15562 Stack.emplace(I, Level); 15563 } 15564 return Res; 15565 } 15566 15567 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root, 15568 BasicBlock *BB, BoUpSLP &R, 15569 TargetTransformInfo *TTI) { 15570 SmallVector<WeakTrackingVH> PostponedInsts; 15571 bool Res = vectorizeHorReduction(P, Root, BB, R, TTI, PostponedInsts); 15572 Res |= tryToVectorize(PostponedInsts, R); 15573 return Res; 15574 } 15575 15576 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts, 15577 BoUpSLP &R) { 15578 bool Res = false; 15579 for (Value *V : Insts) 15580 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst)) 15581 Res |= tryToVectorize(Inst, R); 15582 return Res; 15583 } 15584 15585 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 15586 BasicBlock *BB, BoUpSLP &R) { 15587 if (!R.canMapToVector(IVI->getType())) 15588 return false; 15589 15590 SmallVector<Value *, 16> BuildVectorOpds; 15591 SmallVector<Value *, 16> BuildVectorInsts; 15592 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 15593 return false; 15594 15595 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 15596 // Aggregate value is unlikely to be processed in vector register. 15597 return tryToVectorizeList(BuildVectorOpds, R); 15598 } 15599 15600 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 15601 BasicBlock *BB, BoUpSLP &R) { 15602 SmallVector<Value *, 16> BuildVectorInsts; 15603 SmallVector<Value *, 16> BuildVectorOpds; 15604 SmallVector<int> Mask; 15605 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 15606 (llvm::all_of( 15607 BuildVectorOpds, 15608 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 15609 isFixedVectorShuffle(BuildVectorOpds, Mask))) 15610 return false; 15611 15612 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 15613 return tryToVectorizeList(BuildVectorInsts, R); 15614 } 15615 15616 template <typename T> 15617 static bool tryToVectorizeSequence( 15618 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator, 15619 function_ref<bool(T *, T *)> AreCompatible, 15620 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 15621 bool MaxVFOnly, BoUpSLP &R) { 15622 bool Changed = false; 15623 // Sort by type, parent, operands. 15624 stable_sort(Incoming, Comparator); 15625 15626 // Try to vectorize elements base on their type. 15627 SmallVector<T *> Candidates; 15628 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 15629 // Look for the next elements with the same type, parent and operand 15630 // kinds. 15631 auto *SameTypeIt = IncIt; 15632 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 15633 ++SameTypeIt; 15634 15635 // Try to vectorize them. 15636 unsigned NumElts = (SameTypeIt - IncIt); 15637 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 15638 << NumElts << ")\n"); 15639 // The vectorization is a 3-state attempt: 15640 // 1. Try to vectorize instructions with the same/alternate opcodes with the 15641 // size of maximal register at first. 15642 // 2. Try to vectorize remaining instructions with the same type, if 15643 // possible. This may result in the better vectorization results rather than 15644 // if we try just to vectorize instructions with the same/alternate opcodes. 15645 // 3. Final attempt to try to vectorize all instructions with the 15646 // same/alternate ops only, this may result in some extra final 15647 // vectorization. 15648 if (NumElts > 1 && 15649 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), MaxVFOnly)) { 15650 // Success start over because instructions might have been changed. 15651 Changed = true; 15652 } else { 15653 /// \Returns the minimum number of elements that we will attempt to 15654 /// vectorize. 15655 auto GetMinNumElements = [&R](Value *V) { 15656 unsigned EltSize = R.getVectorElementSize(V); 15657 return std::max(2U, R.getMaxVecRegSize() / EltSize); 15658 }; 15659 if (NumElts < GetMinNumElements(*IncIt) && 15660 (Candidates.empty() || 15661 Candidates.front()->getType() == (*IncIt)->getType())) { 15662 Candidates.append(IncIt, std::next(IncIt, NumElts)); 15663 } 15664 } 15665 // Final attempt to vectorize instructions with the same types. 15666 if (Candidates.size() > 1 && 15667 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 15668 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) { 15669 // Success start over because instructions might have been changed. 15670 Changed = true; 15671 } else if (MaxVFOnly) { 15672 // Try to vectorize using small vectors. 15673 for (auto *It = Candidates.begin(), *End = Candidates.end(); 15674 It != End;) { 15675 auto *SameTypeIt = It; 15676 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 15677 ++SameTypeIt; 15678 unsigned NumElts = (SameTypeIt - It); 15679 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(It, NumElts), 15680 /*MaxVFOnly=*/false)) 15681 Changed = true; 15682 It = SameTypeIt; 15683 } 15684 } 15685 Candidates.clear(); 15686 } 15687 15688 // Start over at the next instruction of a different type (or the end). 15689 IncIt = SameTypeIt; 15690 } 15691 return Changed; 15692 } 15693 15694 /// Compare two cmp instructions. If IsCompatibility is true, function returns 15695 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 15696 /// operands. If IsCompatibility is false, function implements strict weak 15697 /// ordering relation between two cmp instructions, returning true if the first 15698 /// instruction is "less" than the second, i.e. its predicate is less than the 15699 /// predicate of the second or the operands IDs are less than the operands IDs 15700 /// of the second cmp instruction. 15701 template <bool IsCompatibility> 15702 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI, 15703 const DominatorTree &DT) { 15704 assert(isValidElementType(V->getType()) && 15705 isValidElementType(V2->getType()) && 15706 "Expected valid element types only."); 15707 if (V == V2) 15708 return IsCompatibility; 15709 auto *CI1 = cast<CmpInst>(V); 15710 auto *CI2 = cast<CmpInst>(V2); 15711 if (CI1->getOperand(0)->getType()->getTypeID() < 15712 CI2->getOperand(0)->getType()->getTypeID()) 15713 return !IsCompatibility; 15714 if (CI1->getOperand(0)->getType()->getTypeID() > 15715 CI2->getOperand(0)->getType()->getTypeID()) 15716 return false; 15717 CmpInst::Predicate Pred1 = CI1->getPredicate(); 15718 CmpInst::Predicate Pred2 = CI2->getPredicate(); 15719 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 15720 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 15721 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 15722 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 15723 if (BasePred1 < BasePred2) 15724 return !IsCompatibility; 15725 if (BasePred1 > BasePred2) 15726 return false; 15727 // Compare operands. 15728 bool CI1Preds = Pred1 == BasePred1; 15729 bool CI2Preds = Pred2 == BasePred1; 15730 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 15731 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1); 15732 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1); 15733 if (Op1 == Op2) 15734 continue; 15735 if (Op1->getValueID() < Op2->getValueID()) 15736 return !IsCompatibility; 15737 if (Op1->getValueID() > Op2->getValueID()) 15738 return false; 15739 if (auto *I1 = dyn_cast<Instruction>(Op1)) 15740 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 15741 if (IsCompatibility) { 15742 if (I1->getParent() != I2->getParent()) 15743 return false; 15744 } else { 15745 // Try to compare nodes with same parent. 15746 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent()); 15747 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent()); 15748 if (!NodeI1) 15749 return NodeI2 != nullptr; 15750 if (!NodeI2) 15751 return false; 15752 assert((NodeI1 == NodeI2) == 15753 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15754 "Different nodes should have different DFS numbers"); 15755 if (NodeI1 != NodeI2) 15756 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15757 } 15758 InstructionsState S = getSameOpcode({I1, I2}, TLI); 15759 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle())) 15760 continue; 15761 if (IsCompatibility) 15762 return false; 15763 if (I1->getOpcode() != I2->getOpcode()) 15764 return I1->getOpcode() < I2->getOpcode(); 15765 } 15766 } 15767 return IsCompatibility; 15768 } 15769 15770 template <typename ItT> 15771 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts, 15772 BasicBlock *BB, BoUpSLP &R) { 15773 bool Changed = false; 15774 // Try to find reductions first. 15775 for (CmpInst *I : CmpInsts) { 15776 if (R.isDeleted(I)) 15777 continue; 15778 for (Value *Op : I->operands()) 15779 if (auto *RootOp = dyn_cast<Instruction>(Op)) 15780 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R, TTI); 15781 } 15782 // Try to vectorize operands as vector bundles. 15783 for (CmpInst *I : CmpInsts) { 15784 if (R.isDeleted(I)) 15785 continue; 15786 Changed |= tryToVectorize(I, R); 15787 } 15788 // Try to vectorize list of compares. 15789 // Sort by type, compare predicate, etc. 15790 auto CompareSorter = [&](Value *V, Value *V2) { 15791 if (V == V2) 15792 return false; 15793 return compareCmp<false>(V, V2, *TLI, *DT); 15794 }; 15795 15796 auto AreCompatibleCompares = [&](Value *V1, Value *V2) { 15797 if (V1 == V2) 15798 return true; 15799 return compareCmp<true>(V1, V2, *TLI, *DT); 15800 }; 15801 15802 SmallVector<Value *> Vals; 15803 for (Instruction *V : CmpInsts) 15804 if (!R.isDeleted(V) && isValidElementType(V->getType())) 15805 Vals.push_back(V); 15806 if (Vals.size() <= 1) 15807 return Changed; 15808 Changed |= tryToVectorizeSequence<Value>( 15809 Vals, CompareSorter, AreCompatibleCompares, 15810 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 15811 // Exclude possible reductions from other blocks. 15812 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) { 15813 return any_of(V->users(), [V](User *U) { 15814 auto *Select = dyn_cast<SelectInst>(U); 15815 return Select && 15816 Select->getParent() != cast<Instruction>(V)->getParent(); 15817 }); 15818 }); 15819 if (ArePossiblyReducedInOtherBlock) 15820 return false; 15821 return tryToVectorizeList(Candidates, R, MaxVFOnly); 15822 }, 15823 /*MaxVFOnly=*/true, R); 15824 return Changed; 15825 } 15826 15827 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions, 15828 BasicBlock *BB, BoUpSLP &R) { 15829 assert(all_of(Instructions, 15830 [](auto *I) { 15831 return isa<InsertElementInst, InsertValueInst>(I); 15832 }) && 15833 "This function only accepts Insert instructions"); 15834 bool OpsChanged = false; 15835 SmallVector<WeakTrackingVH> PostponedInsts; 15836 // pass1 - try to vectorize reductions only 15837 for (auto *I : reverse(Instructions)) { 15838 if (R.isDeleted(I)) 15839 continue; 15840 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts); 15841 } 15842 // pass2 - try to match and vectorize a buildvector sequence. 15843 for (auto *I : reverse(Instructions)) { 15844 if (R.isDeleted(I) || isa<CmpInst>(I)) 15845 continue; 15846 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 15847 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 15848 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 15849 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 15850 } 15851 } 15852 // Now try to vectorize postponed instructions. 15853 OpsChanged |= tryToVectorize(PostponedInsts, R); 15854 15855 Instructions.clear(); 15856 return OpsChanged; 15857 } 15858 15859 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 15860 bool Changed = false; 15861 SmallVector<Value *, 4> Incoming; 15862 SmallPtrSet<Value *, 16> VisitedInstrs; 15863 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 15864 // node. Allows better to identify the chains that can be vectorized in the 15865 // better way. 15866 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 15867 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 15868 assert(isValidElementType(V1->getType()) && 15869 isValidElementType(V2->getType()) && 15870 "Expected vectorizable types only."); 15871 // It is fine to compare type IDs here, since we expect only vectorizable 15872 // types, like ints, floats and pointers, we don't care about other type. 15873 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 15874 return true; 15875 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 15876 return false; 15877 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15878 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15879 if (Opcodes1.size() < Opcodes2.size()) 15880 return true; 15881 if (Opcodes1.size() > Opcodes2.size()) 15882 return false; 15883 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15884 // Undefs are compatible with any other value. 15885 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 15886 if (isa<Instruction>(Opcodes1[I])) 15887 return true; 15888 if (isa<Instruction>(Opcodes2[I])) 15889 return false; 15890 if (isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I])) 15891 return true; 15892 if (isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I])) 15893 return false; 15894 if (isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I])) 15895 continue; 15896 return isa<UndefValue>(Opcodes2[I]); 15897 } 15898 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15899 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15900 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 15901 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 15902 if (!NodeI1) 15903 return NodeI2 != nullptr; 15904 if (!NodeI2) 15905 return false; 15906 assert((NodeI1 == NodeI2) == 15907 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15908 "Different nodes should have different DFS numbers"); 15909 if (NodeI1 != NodeI2) 15910 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15911 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15912 if (S.getOpcode() && !S.isAltShuffle()) 15913 continue; 15914 return I1->getOpcode() < I2->getOpcode(); 15915 } 15916 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15917 return Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 15918 if (isa<Instruction>(Opcodes1[I])) 15919 return true; 15920 if (isa<Instruction>(Opcodes2[I])) 15921 return false; 15922 if (isa<Constant>(Opcodes1[I])) 15923 return true; 15924 if (isa<Constant>(Opcodes2[I])) 15925 return false; 15926 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 15927 return true; 15928 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 15929 return false; 15930 } 15931 return false; 15932 }; 15933 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) { 15934 if (V1 == V2) 15935 return true; 15936 if (V1->getType() != V2->getType()) 15937 return false; 15938 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15939 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15940 if (Opcodes1.size() != Opcodes2.size()) 15941 return false; 15942 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15943 // Undefs are compatible with any other value. 15944 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 15945 continue; 15946 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15947 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15948 if (I1->getParent() != I2->getParent()) 15949 return false; 15950 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15951 if (S.getOpcode()) 15952 continue; 15953 return false; 15954 } 15955 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15956 continue; 15957 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 15958 return false; 15959 } 15960 return true; 15961 }; 15962 15963 bool HaveVectorizedPhiNodes = false; 15964 do { 15965 // Collect the incoming values from the PHIs. 15966 Incoming.clear(); 15967 for (Instruction &I : *BB) { 15968 PHINode *P = dyn_cast<PHINode>(&I); 15969 if (!P) 15970 break; 15971 15972 // No need to analyze deleted, vectorized and non-vectorizable 15973 // instructions. 15974 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 15975 isValidElementType(P->getType())) 15976 Incoming.push_back(P); 15977 } 15978 15979 if (Incoming.size() <= 1) 15980 break; 15981 15982 // Find the corresponding non-phi nodes for better matching when trying to 15983 // build the tree. 15984 for (Value *V : Incoming) { 15985 SmallVectorImpl<Value *> &Opcodes = 15986 PHIToOpcodes.try_emplace(V).first->getSecond(); 15987 if (!Opcodes.empty()) 15988 continue; 15989 SmallVector<Value *, 4> Nodes(1, V); 15990 SmallPtrSet<Value *, 4> Visited; 15991 while (!Nodes.empty()) { 15992 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 15993 if (!Visited.insert(PHI).second) 15994 continue; 15995 for (Value *V : PHI->incoming_values()) { 15996 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 15997 Nodes.push_back(PHI1); 15998 continue; 15999 } 16000 Opcodes.emplace_back(V); 16001 } 16002 } 16003 } 16004 16005 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 16006 Incoming, PHICompare, AreCompatiblePHIs, 16007 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 16008 return tryToVectorizeList(Candidates, R, MaxVFOnly); 16009 }, 16010 /*MaxVFOnly=*/true, R); 16011 Changed |= HaveVectorizedPhiNodes; 16012 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 16013 } while (HaveVectorizedPhiNodes); 16014 16015 VisitedInstrs.clear(); 16016 16017 InstSetVector PostProcessInserts; 16018 SmallSetVector<CmpInst *, 8> PostProcessCmps; 16019 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true 16020 // also vectorizes `PostProcessCmps`. 16021 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) { 16022 bool Changed = vectorizeInserts(PostProcessInserts, BB, R); 16023 if (VectorizeCmps) { 16024 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R); 16025 PostProcessCmps.clear(); 16026 } 16027 PostProcessInserts.clear(); 16028 return Changed; 16029 }; 16030 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`. 16031 auto IsInPostProcessInstrs = [&](Instruction *I) { 16032 if (auto *Cmp = dyn_cast<CmpInst>(I)) 16033 return PostProcessCmps.contains(Cmp); 16034 return isa<InsertElementInst, InsertValueInst>(I) && 16035 PostProcessInserts.contains(I); 16036 }; 16037 // Returns true if `I` is an instruction without users, like terminator, or 16038 // function call with ignored return value, store. Ignore unused instructions 16039 // (basing on instruction type, except for CallInst and InvokeInst). 16040 auto HasNoUsers = [](Instruction *I) { 16041 return I->use_empty() && 16042 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I)); 16043 }; 16044 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) { 16045 // Skip instructions with scalable type. The num of elements is unknown at 16046 // compile-time for scalable type. 16047 if (isa<ScalableVectorType>(It->getType())) 16048 continue; 16049 16050 // Skip instructions marked for the deletion. 16051 if (R.isDeleted(&*It)) 16052 continue; 16053 // We may go through BB multiple times so skip the one we have checked. 16054 if (!VisitedInstrs.insert(&*It).second) { 16055 if (HasNoUsers(&*It) && 16056 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) { 16057 // We would like to start over since some instructions are deleted 16058 // and the iterator may become invalid value. 16059 Changed = true; 16060 It = BB->begin(); 16061 E = BB->end(); 16062 } 16063 continue; 16064 } 16065 16066 if (isa<DbgInfoIntrinsic>(It)) 16067 continue; 16068 16069 // Try to vectorize reductions that use PHINodes. 16070 if (PHINode *P = dyn_cast<PHINode>(It)) { 16071 // Check that the PHI is a reduction PHI. 16072 if (P->getNumIncomingValues() == 2) { 16073 // Try to match and vectorize a horizontal reduction. 16074 Instruction *Root = getReductionInstr(DT, P, BB, LI); 16075 if (Root && vectorizeRootInstruction(P, Root, BB, R, TTI)) { 16076 Changed = true; 16077 It = BB->begin(); 16078 E = BB->end(); 16079 continue; 16080 } 16081 } 16082 // Try to vectorize the incoming values of the PHI, to catch reductions 16083 // that feed into PHIs. 16084 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 16085 // Skip if the incoming block is the current BB for now. Also, bypass 16086 // unreachable IR for efficiency and to avoid crashing. 16087 // TODO: Collect the skipped incoming values and try to vectorize them 16088 // after processing BB. 16089 if (BB == P->getIncomingBlock(I) || 16090 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 16091 continue; 16092 16093 // Postponed instructions should not be vectorized here, delay their 16094 // vectorization. 16095 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I)); 16096 PI && !IsInPostProcessInstrs(PI)) 16097 Changed |= vectorizeRootInstruction(nullptr, PI, 16098 P->getIncomingBlock(I), R, TTI); 16099 } 16100 continue; 16101 } 16102 16103 if (HasNoUsers(&*It)) { 16104 bool OpsChanged = false; 16105 auto *SI = dyn_cast<StoreInst>(It); 16106 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI; 16107 if (SI) { 16108 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand())); 16109 // Try to vectorize chain in store, if this is the only store to the 16110 // address in the block. 16111 // TODO: This is just a temporarily solution to save compile time. Need 16112 // to investigate if we can safely turn on slp-vectorize-hor-store 16113 // instead to allow lookup for reduction chains in all non-vectorized 16114 // stores (need to check side effects and compile time). 16115 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) && 16116 SI->getValueOperand()->hasOneUse(); 16117 } 16118 if (TryToVectorizeRoot) { 16119 for (auto *V : It->operand_values()) { 16120 // Postponed instructions should not be vectorized here, delay their 16121 // vectorization. 16122 if (auto *VI = dyn_cast<Instruction>(V); 16123 VI && !IsInPostProcessInstrs(VI)) 16124 // Try to match and vectorize a horizontal reduction. 16125 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R, TTI); 16126 } 16127 } 16128 // Start vectorization of post-process list of instructions from the 16129 // top-tree instructions to try to vectorize as many instructions as 16130 // possible. 16131 OpsChanged |= 16132 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator()); 16133 if (OpsChanged) { 16134 // We would like to start over since some instructions are deleted 16135 // and the iterator may become invalid value. 16136 Changed = true; 16137 It = BB->begin(); 16138 E = BB->end(); 16139 continue; 16140 } 16141 } 16142 16143 if (isa<InsertElementInst, InsertValueInst>(It)) 16144 PostProcessInserts.insert(&*It); 16145 else if (isa<CmpInst>(It)) 16146 PostProcessCmps.insert(cast<CmpInst>(&*It)); 16147 } 16148 16149 return Changed; 16150 } 16151 16152 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 16153 auto Changed = false; 16154 for (auto &Entry : GEPs) { 16155 // If the getelementptr list has fewer than two elements, there's nothing 16156 // to do. 16157 if (Entry.second.size() < 2) 16158 continue; 16159 16160 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 16161 << Entry.second.size() << ".\n"); 16162 16163 // Process the GEP list in chunks suitable for the target's supported 16164 // vector size. If a vector register can't hold 1 element, we are done. We 16165 // are trying to vectorize the index computations, so the maximum number of 16166 // elements is based on the size of the index expression, rather than the 16167 // size of the GEP itself (the target's pointer size). 16168 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 16169 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 16170 if (MaxVecRegSize < EltSize) 16171 continue; 16172 16173 unsigned MaxElts = MaxVecRegSize / EltSize; 16174 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 16175 auto Len = std::min<unsigned>(BE - BI, MaxElts); 16176 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 16177 16178 // Initialize a set a candidate getelementptrs. Note that we use a 16179 // SetVector here to preserve program order. If the index computations 16180 // are vectorizable and begin with loads, we want to minimize the chance 16181 // of having to reorder them later. 16182 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 16183 16184 // Some of the candidates may have already been vectorized after we 16185 // initially collected them. If so, they are marked as deleted, so remove 16186 // them from the set of candidates. 16187 Candidates.remove_if( 16188 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 16189 16190 // Remove from the set of candidates all pairs of getelementptrs with 16191 // constant differences. Such getelementptrs are likely not good 16192 // candidates for vectorization in a bottom-up phase since one can be 16193 // computed from the other. We also ensure all candidate getelementptr 16194 // indices are unique. 16195 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 16196 auto *GEPI = GEPList[I]; 16197 if (!Candidates.count(GEPI)) 16198 continue; 16199 auto *SCEVI = SE->getSCEV(GEPList[I]); 16200 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 16201 auto *GEPJ = GEPList[J]; 16202 auto *SCEVJ = SE->getSCEV(GEPList[J]); 16203 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 16204 Candidates.remove(GEPI); 16205 Candidates.remove(GEPJ); 16206 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 16207 Candidates.remove(GEPJ); 16208 } 16209 } 16210 } 16211 16212 // We break out of the above computation as soon as we know there are 16213 // fewer than two candidates remaining. 16214 if (Candidates.size() < 2) 16215 continue; 16216 16217 // Add the single, non-constant index of each candidate to the bundle. We 16218 // ensured the indices met these constraints when we originally collected 16219 // the getelementptrs. 16220 SmallVector<Value *, 16> Bundle(Candidates.size()); 16221 auto BundleIndex = 0u; 16222 for (auto *V : Candidates) { 16223 auto *GEP = cast<GetElementPtrInst>(V); 16224 auto *GEPIdx = GEP->idx_begin()->get(); 16225 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 16226 Bundle[BundleIndex++] = GEPIdx; 16227 } 16228 16229 // Try and vectorize the indices. We are currently only interested in 16230 // gather-like cases of the form: 16231 // 16232 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 16233 // 16234 // where the loads of "a", the loads of "b", and the subtractions can be 16235 // performed in parallel. It's likely that detecting this pattern in a 16236 // bottom-up phase will be simpler and less costly than building a 16237 // full-blown top-down phase beginning at the consecutive loads. 16238 Changed |= tryToVectorizeList(Bundle, R); 16239 } 16240 } 16241 return Changed; 16242 } 16243 16244 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 16245 bool Changed = false; 16246 // Sort by type, base pointers and values operand. Value operands must be 16247 // compatible (have the same opcode, same parent), otherwise it is 16248 // definitely not profitable to try to vectorize them. 16249 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 16250 if (V->getValueOperand()->getType()->getTypeID() < 16251 V2->getValueOperand()->getType()->getTypeID()) 16252 return true; 16253 if (V->getValueOperand()->getType()->getTypeID() > 16254 V2->getValueOperand()->getType()->getTypeID()) 16255 return false; 16256 if (V->getPointerOperandType()->getTypeID() < 16257 V2->getPointerOperandType()->getTypeID()) 16258 return true; 16259 if (V->getPointerOperandType()->getTypeID() > 16260 V2->getPointerOperandType()->getTypeID()) 16261 return false; 16262 // UndefValues are compatible with all other values. 16263 if (isa<UndefValue>(V->getValueOperand()) || 16264 isa<UndefValue>(V2->getValueOperand())) 16265 return false; 16266 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 16267 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16268 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 16269 DT->getNode(I1->getParent()); 16270 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 16271 DT->getNode(I2->getParent()); 16272 assert(NodeI1 && "Should only process reachable instructions"); 16273 assert(NodeI2 && "Should only process reachable instructions"); 16274 assert((NodeI1 == NodeI2) == 16275 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 16276 "Different nodes should have different DFS numbers"); 16277 if (NodeI1 != NodeI2) 16278 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 16279 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16280 if (S.getOpcode()) 16281 return false; 16282 return I1->getOpcode() < I2->getOpcode(); 16283 } 16284 if (isa<Constant>(V->getValueOperand()) && 16285 isa<Constant>(V2->getValueOperand())) 16286 return false; 16287 return V->getValueOperand()->getValueID() < 16288 V2->getValueOperand()->getValueID(); 16289 }; 16290 16291 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) { 16292 if (V1 == V2) 16293 return true; 16294 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType()) 16295 return false; 16296 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 16297 return false; 16298 // Undefs are compatible with any other value. 16299 if (isa<UndefValue>(V1->getValueOperand()) || 16300 isa<UndefValue>(V2->getValueOperand())) 16301 return true; 16302 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 16303 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16304 if (I1->getParent() != I2->getParent()) 16305 return false; 16306 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16307 return S.getOpcode() > 0; 16308 } 16309 if (isa<Constant>(V1->getValueOperand()) && 16310 isa<Constant>(V2->getValueOperand())) 16311 return true; 16312 return V1->getValueOperand()->getValueID() == 16313 V2->getValueOperand()->getValueID(); 16314 }; 16315 16316 // Attempt to sort and vectorize each of the store-groups. 16317 for (auto &Pair : Stores) { 16318 if (Pair.second.size() < 2) 16319 continue; 16320 16321 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 16322 << Pair.second.size() << ".\n"); 16323 16324 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 16325 continue; 16326 16327 // Reverse stores to do bottom-to-top analysis. This is important if the 16328 // values are stores to the same addresses several times, in this case need 16329 // to follow the stores order (reversed to meet the memory dependecies). 16330 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(), 16331 Pair.second.rend()); 16332 Changed |= tryToVectorizeSequence<StoreInst>( 16333 ReversedStores, StoreSorter, AreCompatibleStores, 16334 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 16335 return vectorizeStores(Candidates, R); 16336 }, 16337 /*MaxVFOnly=*/false, R); 16338 } 16339 return Changed; 16340 } 16341