1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/PriorityQueue.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetOperations.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/ConstantFolding.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/IVDescriptors.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #ifdef EXPENSIVE_CHECKS 73 #include "llvm/IR/Verifier.h" 74 #endif 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/LoopUtils.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <optional> 96 #include <set> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 static cl::opt<bool> 111 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 112 cl::desc("Run the SLP vectorization passes")); 113 114 static cl::opt<int> 115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 116 cl::desc("Only vectorize if you gain more than this " 117 "number ")); 118 119 static cl::opt<bool> 120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 121 cl::desc("Attempt to vectorize horizontal reductions")); 122 123 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 125 cl::desc( 126 "Attempt to vectorize horizontal reductions feeding into a store")); 127 128 // NOTE: If AllowHorRdxIdenityOptimization is true, the optimization will run 129 // even if we match a reduction but do not vectorize in the end. 130 static cl::opt<bool> AllowHorRdxIdenityOptimization( 131 "slp-optimize-identity-hor-reduction-ops", cl::init(true), cl::Hidden, 132 cl::desc("Allow optimization of original scalar identity operations on " 133 "matched horizontal reductions.")); 134 135 static cl::opt<int> 136 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> 140 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 141 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 142 143 /// Limits the size of scheduling regions in a block. 144 /// It avoid long compile times for _very_ large blocks where vector 145 /// instructions are spread over a wide range. 146 /// This limit is way higher than needed by real-world functions. 147 static cl::opt<int> 148 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 149 cl::desc("Limit the size of the SLP scheduling region per block")); 150 151 static cl::opt<int> MinVectorRegSizeOption( 152 "slp-min-reg-size", cl::init(128), cl::Hidden, 153 cl::desc("Attempt to vectorize for this register size in bits")); 154 155 static cl::opt<unsigned> RecursionMaxDepth( 156 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 157 cl::desc("Limit the recursion depth when building a vectorizable tree")); 158 159 static cl::opt<unsigned> MinTreeSize( 160 "slp-min-tree-size", cl::init(3), cl::Hidden, 161 cl::desc("Only vectorize small trees if they are fully vectorizable")); 162 163 // The maximum depth that the look-ahead score heuristic will explore. 164 // The higher this value, the higher the compilation time overhead. 165 static cl::opt<int> LookAheadMaxDepth( 166 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 167 cl::desc("The maximum look-ahead depth for operand reordering scores")); 168 169 // The maximum depth that the look-ahead score heuristic will explore 170 // when it probing among candidates for vectorization tree roots. 171 // The higher this value, the higher the compilation time overhead but unlike 172 // similar limit for operands ordering this is less frequently used, hence 173 // impact of higher value is less noticeable. 174 static cl::opt<int> RootLookAheadMaxDepth( 175 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 176 cl::desc("The maximum look-ahead depth for searching best rooting option")); 177 178 static cl::opt<bool> 179 ViewSLPTree("view-slp-tree", cl::Hidden, 180 cl::desc("Display the SLP trees with Graphviz")); 181 182 // Limit the number of alias checks. The limit is chosen so that 183 // it has no negative effect on the llvm benchmarks. 184 static const unsigned AliasedCheckLimit = 10; 185 186 // Another limit for the alias checks: The maximum distance between load/store 187 // instructions where alias checks are done. 188 // This limit is useful for very large basic blocks. 189 static const unsigned MaxMemDepDistance = 160; 190 191 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 192 /// regions to be handled. 193 static const int MinScheduleRegionSize = 16; 194 195 /// Predicate for the element types that the SLP vectorizer supports. 196 /// 197 /// The most important thing to filter here are types which are invalid in LLVM 198 /// vectors. We also filter target specific types which have absolutely no 199 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 200 /// avoids spending time checking the cost model and realizing that they will 201 /// be inevitably scalarized. 202 static bool isValidElementType(Type *Ty) { 203 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 204 !Ty->isPPC_FP128Ty(); 205 } 206 207 /// \returns True if the value is a constant (but not globals/constant 208 /// expressions). 209 static bool isConstant(Value *V) { 210 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V); 211 } 212 213 /// Checks if \p V is one of vector-like instructions, i.e. undef, 214 /// insertelement/extractelement with constant indices for fixed vector type or 215 /// extractvalue instruction. 216 static bool isVectorLikeInstWithConstOps(Value *V) { 217 if (!isa<InsertElementInst, ExtractElementInst>(V) && 218 !isa<ExtractValueInst, UndefValue>(V)) 219 return false; 220 auto *I = dyn_cast<Instruction>(V); 221 if (!I || isa<ExtractValueInst>(I)) 222 return true; 223 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 224 return false; 225 if (isa<ExtractElementInst>(I)) 226 return isConstant(I->getOperand(1)); 227 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 228 return isConstant(I->getOperand(2)); 229 } 230 231 #if !defined(NDEBUG) 232 /// Print a short descriptor of the instruction bundle suitable for debug output. 233 static std::string shortBundleName(ArrayRef<Value *> VL) { 234 std::string Result; 235 raw_string_ostream OS(Result); 236 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]"; 237 OS.flush(); 238 return Result; 239 } 240 #endif 241 242 /// \returns true if all of the instructions in \p VL are in the same block or 243 /// false otherwise. 244 static bool allSameBlock(ArrayRef<Value *> VL) { 245 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 246 if (!I0) 247 return false; 248 if (all_of(VL, isVectorLikeInstWithConstOps)) 249 return true; 250 251 BasicBlock *BB = I0->getParent(); 252 for (int I = 1, E = VL.size(); I < E; I++) { 253 auto *II = dyn_cast<Instruction>(VL[I]); 254 if (!II) 255 return false; 256 257 if (BB != II->getParent()) 258 return false; 259 } 260 return true; 261 } 262 263 /// \returns True if all of the values in \p VL are constants (but not 264 /// globals/constant expressions). 265 static bool allConstant(ArrayRef<Value *> VL) { 266 // Constant expressions and globals can't be vectorized like normal integer/FP 267 // constants. 268 return all_of(VL, isConstant); 269 } 270 271 /// \returns True if all of the values in \p VL are identical or some of them 272 /// are UndefValue. 273 static bool isSplat(ArrayRef<Value *> VL) { 274 Value *FirstNonUndef = nullptr; 275 for (Value *V : VL) { 276 if (isa<UndefValue>(V)) 277 continue; 278 if (!FirstNonUndef) { 279 FirstNonUndef = V; 280 continue; 281 } 282 if (V != FirstNonUndef) 283 return false; 284 } 285 return FirstNonUndef != nullptr; 286 } 287 288 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 289 static bool isCommutative(Instruction *I) { 290 if (auto *Cmp = dyn_cast<CmpInst>(I)) 291 return Cmp->isCommutative(); 292 if (auto *BO = dyn_cast<BinaryOperator>(I)) 293 return BO->isCommutative(); 294 // TODO: This should check for generic Instruction::isCommutative(), but 295 // we need to confirm that the caller code correctly handles Intrinsics 296 // for example (does not have 2 operands). 297 return false; 298 } 299 300 /// \returns inserting index of InsertElement or InsertValue instruction, 301 /// using Offset as base offset for index. 302 static std::optional<unsigned> getInsertIndex(const Value *InsertInst, 303 unsigned Offset = 0) { 304 int Index = Offset; 305 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 306 const auto *VT = dyn_cast<FixedVectorType>(IE->getType()); 307 if (!VT) 308 return std::nullopt; 309 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)); 310 if (!CI) 311 return std::nullopt; 312 if (CI->getValue().uge(VT->getNumElements())) 313 return std::nullopt; 314 Index *= VT->getNumElements(); 315 Index += CI->getZExtValue(); 316 return Index; 317 } 318 319 const auto *IV = cast<InsertValueInst>(InsertInst); 320 Type *CurrentType = IV->getType(); 321 for (unsigned I : IV->indices()) { 322 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 323 Index *= ST->getNumElements(); 324 CurrentType = ST->getElementType(I); 325 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 326 Index *= AT->getNumElements(); 327 CurrentType = AT->getElementType(); 328 } else { 329 return std::nullopt; 330 } 331 Index += I; 332 } 333 return Index; 334 } 335 336 namespace { 337 /// Specifies the way the mask should be analyzed for undefs/poisonous elements 338 /// in the shuffle mask. 339 enum class UseMask { 340 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors, 341 ///< check for the mask elements for the first argument (mask 342 ///< indices are in range [0:VF)). 343 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check 344 ///< for the mask elements for the second argument (mask indices 345 ///< are in range [VF:2*VF)) 346 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for 347 ///< future shuffle elements and mark them as ones as being used 348 ///< in future. Non-undef elements are considered as unused since 349 ///< they're already marked as used in the mask. 350 }; 351 } // namespace 352 353 /// Prepares a use bitset for the given mask either for the first argument or 354 /// for the second. 355 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask, 356 UseMask MaskArg) { 357 SmallBitVector UseMask(VF, true); 358 for (auto [Idx, Value] : enumerate(Mask)) { 359 if (Value == PoisonMaskElem) { 360 if (MaskArg == UseMask::UndefsAsMask) 361 UseMask.reset(Idx); 362 continue; 363 } 364 if (MaskArg == UseMask::FirstArg && Value < VF) 365 UseMask.reset(Value); 366 else if (MaskArg == UseMask::SecondArg && Value >= VF) 367 UseMask.reset(Value - VF); 368 } 369 return UseMask; 370 } 371 372 /// Checks if the given value is actually an undefined constant vector. 373 /// Also, if the \p UseMask is not empty, tries to check if the non-masked 374 /// elements actually mask the insertelement buildvector, if any. 375 template <bool IsPoisonOnly = false> 376 static SmallBitVector isUndefVector(const Value *V, 377 const SmallBitVector &UseMask = {}) { 378 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true); 379 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>; 380 if (isa<T>(V)) 381 return Res; 382 auto *VecTy = dyn_cast<FixedVectorType>(V->getType()); 383 if (!VecTy) 384 return Res.reset(); 385 auto *C = dyn_cast<Constant>(V); 386 if (!C) { 387 if (!UseMask.empty()) { 388 const Value *Base = V; 389 while (auto *II = dyn_cast<InsertElementInst>(Base)) { 390 Base = II->getOperand(0); 391 if (isa<T>(II->getOperand(1))) 392 continue; 393 std::optional<unsigned> Idx = getInsertIndex(II); 394 if (!Idx) { 395 Res.reset(); 396 return Res; 397 } 398 if (*Idx < UseMask.size() && !UseMask.test(*Idx)) 399 Res.reset(*Idx); 400 } 401 // TODO: Add analysis for shuffles here too. 402 if (V == Base) { 403 Res.reset(); 404 } else { 405 SmallBitVector SubMask(UseMask.size(), false); 406 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask); 407 } 408 } else { 409 Res.reset(); 410 } 411 return Res; 412 } 413 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 414 if (Constant *Elem = C->getAggregateElement(I)) 415 if (!isa<T>(Elem) && 416 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I)))) 417 Res.reset(I); 418 } 419 return Res; 420 } 421 422 /// Checks if the vector of instructions can be represented as a shuffle, like: 423 /// %x0 = extractelement <4 x i8> %x, i32 0 424 /// %x3 = extractelement <4 x i8> %x, i32 3 425 /// %y1 = extractelement <4 x i8> %y, i32 1 426 /// %y2 = extractelement <4 x i8> %y, i32 2 427 /// %x0x0 = mul i8 %x0, %x0 428 /// %x3x3 = mul i8 %x3, %x3 429 /// %y1y1 = mul i8 %y1, %y1 430 /// %y2y2 = mul i8 %y2, %y2 431 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 432 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 433 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 434 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 435 /// ret <4 x i8> %ins4 436 /// can be transformed into: 437 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 438 /// i32 6> 439 /// %2 = mul <4 x i8> %1, %1 440 /// ret <4 x i8> %2 441 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 442 /// TODO: Can we split off and reuse the shuffle mask detection from 443 /// ShuffleVectorInst/getShuffleCost? 444 static std::optional<TargetTransformInfo::ShuffleKind> 445 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 446 const auto *It = 447 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 448 if (It == VL.end()) 449 return std::nullopt; 450 auto *EI0 = cast<ExtractElementInst>(*It); 451 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 452 return std::nullopt; 453 unsigned Size = 454 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 455 Value *Vec1 = nullptr; 456 Value *Vec2 = nullptr; 457 enum ShuffleMode { Unknown, Select, Permute }; 458 ShuffleMode CommonShuffleMode = Unknown; 459 Mask.assign(VL.size(), PoisonMaskElem); 460 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 461 // Undef can be represented as an undef element in a vector. 462 if (isa<UndefValue>(VL[I])) 463 continue; 464 auto *EI = cast<ExtractElementInst>(VL[I]); 465 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 466 return std::nullopt; 467 auto *Vec = EI->getVectorOperand(); 468 // We can extractelement from undef or poison vector. 469 if (isUndefVector(Vec).all()) 470 continue; 471 // All vector operands must have the same number of vector elements. 472 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 473 return std::nullopt; 474 if (isa<UndefValue>(EI->getIndexOperand())) 475 continue; 476 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 477 if (!Idx) 478 return std::nullopt; 479 // Undefined behavior if Idx is negative or >= Size. 480 if (Idx->getValue().uge(Size)) 481 continue; 482 unsigned IntIdx = Idx->getValue().getZExtValue(); 483 Mask[I] = IntIdx; 484 // For correct shuffling we have to have at most 2 different vector operands 485 // in all extractelement instructions. 486 if (!Vec1 || Vec1 == Vec) { 487 Vec1 = Vec; 488 } else if (!Vec2 || Vec2 == Vec) { 489 Vec2 = Vec; 490 Mask[I] += Size; 491 } else { 492 return std::nullopt; 493 } 494 if (CommonShuffleMode == Permute) 495 continue; 496 // If the extract index is not the same as the operation number, it is a 497 // permutation. 498 if (IntIdx != I) { 499 CommonShuffleMode = Permute; 500 continue; 501 } 502 CommonShuffleMode = Select; 503 } 504 // If we're not crossing lanes in different vectors, consider it as blending. 505 if (CommonShuffleMode == Select && Vec2) 506 return TargetTransformInfo::SK_Select; 507 // If Vec2 was never used, we have a permutation of a single vector, otherwise 508 // we have permutation of 2 vectors. 509 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 510 : TargetTransformInfo::SK_PermuteSingleSrc; 511 } 512 513 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 514 static std::optional<unsigned> getExtractIndex(Instruction *E) { 515 unsigned Opcode = E->getOpcode(); 516 assert((Opcode == Instruction::ExtractElement || 517 Opcode == Instruction::ExtractValue) && 518 "Expected extractelement or extractvalue instruction."); 519 if (Opcode == Instruction::ExtractElement) { 520 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 521 if (!CI) 522 return std::nullopt; 523 return CI->getZExtValue(); 524 } 525 auto *EI = cast<ExtractValueInst>(E); 526 if (EI->getNumIndices() != 1) 527 return std::nullopt; 528 return *EI->idx_begin(); 529 } 530 531 namespace { 532 533 /// Main data required for vectorization of instructions. 534 struct InstructionsState { 535 /// The very first instruction in the list with the main opcode. 536 Value *OpValue = nullptr; 537 538 /// The main/alternate instruction. 539 Instruction *MainOp = nullptr; 540 Instruction *AltOp = nullptr; 541 542 /// The main/alternate opcodes for the list of instructions. 543 unsigned getOpcode() const { 544 return MainOp ? MainOp->getOpcode() : 0; 545 } 546 547 unsigned getAltOpcode() const { 548 return AltOp ? AltOp->getOpcode() : 0; 549 } 550 551 /// Some of the instructions in the list have alternate opcodes. 552 bool isAltShuffle() const { return AltOp != MainOp; } 553 554 bool isOpcodeOrAlt(Instruction *I) const { 555 unsigned CheckedOpcode = I->getOpcode(); 556 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 557 } 558 559 InstructionsState() = delete; 560 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 561 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 562 }; 563 564 } // end anonymous namespace 565 566 /// Chooses the correct key for scheduling data. If \p Op has the same (or 567 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 568 /// OpValue. 569 static Value *isOneOf(const InstructionsState &S, Value *Op) { 570 auto *I = dyn_cast<Instruction>(Op); 571 if (I && S.isOpcodeOrAlt(I)) 572 return Op; 573 return S.OpValue; 574 } 575 576 /// \returns true if \p Opcode is allowed as part of the main/alternate 577 /// instruction for SLP vectorization. 578 /// 579 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 580 /// "shuffled out" lane would result in division by zero. 581 static bool isValidForAlternation(unsigned Opcode) { 582 if (Instruction::isIntDivRem(Opcode)) 583 return false; 584 585 return true; 586 } 587 588 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 589 const TargetLibraryInfo &TLI, 590 unsigned BaseIndex = 0); 591 592 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 593 /// compatible instructions or constants, or just some other regular values. 594 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 595 Value *Op1, const TargetLibraryInfo &TLI) { 596 return (isConstant(BaseOp0) && isConstant(Op0)) || 597 (isConstant(BaseOp1) && isConstant(Op1)) || 598 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 599 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 600 BaseOp0 == Op0 || BaseOp1 == Op1 || 601 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() || 602 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode(); 603 } 604 605 /// \returns true if a compare instruction \p CI has similar "look" and 606 /// same predicate as \p BaseCI, "as is" or with its operands and predicate 607 /// swapped, false otherwise. 608 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI, 609 const TargetLibraryInfo &TLI) { 610 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && 611 "Assessing comparisons of different types?"); 612 CmpInst::Predicate BasePred = BaseCI->getPredicate(); 613 CmpInst::Predicate Pred = CI->getPredicate(); 614 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred); 615 616 Value *BaseOp0 = BaseCI->getOperand(0); 617 Value *BaseOp1 = BaseCI->getOperand(1); 618 Value *Op0 = CI->getOperand(0); 619 Value *Op1 = CI->getOperand(1); 620 621 return (BasePred == Pred && 622 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) || 623 (BasePred == SwappedPred && 624 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI)); 625 } 626 627 /// \returns analysis of the Instructions in \p VL described in 628 /// InstructionsState, the Opcode that we suppose the whole list 629 /// could be vectorized even if its structure is diverse. 630 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 631 const TargetLibraryInfo &TLI, 632 unsigned BaseIndex) { 633 // Make sure these are all Instructions. 634 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 635 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 636 637 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 638 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 639 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 640 CmpInst::Predicate BasePred = 641 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 642 : CmpInst::BAD_ICMP_PREDICATE; 643 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 644 unsigned AltOpcode = Opcode; 645 unsigned AltIndex = BaseIndex; 646 647 // Check for one alternate opcode from another BinaryOperator. 648 // TODO - generalize to support all operators (types, calls etc.). 649 auto *IBase = cast<Instruction>(VL[BaseIndex]); 650 Intrinsic::ID BaseID = 0; 651 SmallVector<VFInfo> BaseMappings; 652 if (auto *CallBase = dyn_cast<CallInst>(IBase)) { 653 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI); 654 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase); 655 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty()) 656 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 657 } 658 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 659 auto *I = cast<Instruction>(VL[Cnt]); 660 unsigned InstOpcode = I->getOpcode(); 661 if (IsBinOp && isa<BinaryOperator>(I)) { 662 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 663 continue; 664 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 665 isValidForAlternation(Opcode)) { 666 AltOpcode = InstOpcode; 667 AltIndex = Cnt; 668 continue; 669 } 670 } else if (IsCastOp && isa<CastInst>(I)) { 671 Value *Op0 = IBase->getOperand(0); 672 Type *Ty0 = Op0->getType(); 673 Value *Op1 = I->getOperand(0); 674 Type *Ty1 = Op1->getType(); 675 if (Ty0 == Ty1) { 676 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 677 continue; 678 if (Opcode == AltOpcode) { 679 assert(isValidForAlternation(Opcode) && 680 isValidForAlternation(InstOpcode) && 681 "Cast isn't safe for alternation, logic needs to be updated!"); 682 AltOpcode = InstOpcode; 683 AltIndex = Cnt; 684 continue; 685 } 686 } 687 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) { 688 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]); 689 Type *Ty0 = BaseInst->getOperand(0)->getType(); 690 Type *Ty1 = Inst->getOperand(0)->getType(); 691 if (Ty0 == Ty1) { 692 assert(InstOpcode == Opcode && "Expected same CmpInst opcode."); 693 // Check for compatible operands. If the corresponding operands are not 694 // compatible - need to perform alternate vectorization. 695 CmpInst::Predicate CurrentPred = Inst->getPredicate(); 696 CmpInst::Predicate SwappedCurrentPred = 697 CmpInst::getSwappedPredicate(CurrentPred); 698 699 if (E == 2 && 700 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 701 continue; 702 703 if (isCmpSameOrSwapped(BaseInst, Inst, TLI)) 704 continue; 705 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 706 if (AltIndex != BaseIndex) { 707 if (isCmpSameOrSwapped(AltInst, Inst, TLI)) 708 continue; 709 } else if (BasePred != CurrentPred) { 710 assert( 711 isValidForAlternation(InstOpcode) && 712 "CmpInst isn't safe for alternation, logic needs to be updated!"); 713 AltIndex = Cnt; 714 continue; 715 } 716 CmpInst::Predicate AltPred = AltInst->getPredicate(); 717 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 718 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 719 continue; 720 } 721 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) { 722 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 723 if (Gep->getNumOperands() != 2 || 724 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType()) 725 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 726 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) { 727 if (!isVectorLikeInstWithConstOps(EI)) 728 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 729 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 730 auto *BaseLI = cast<LoadInst>(IBase); 731 if (!LI->isSimple() || !BaseLI->isSimple()) 732 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 733 } else if (auto *Call = dyn_cast<CallInst>(I)) { 734 auto *CallBase = cast<CallInst>(IBase); 735 if (Call->getCalledFunction() != CallBase->getCalledFunction()) 736 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 737 if (Call->hasOperandBundles() && 738 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(), 739 Call->op_begin() + Call->getBundleOperandsEndIndex(), 740 CallBase->op_begin() + 741 CallBase->getBundleOperandsStartIndex())) 742 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 743 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI); 744 if (ID != BaseID) 745 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 746 if (!ID) { 747 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call); 748 if (Mappings.size() != BaseMappings.size() || 749 Mappings.front().ISA != BaseMappings.front().ISA || 750 Mappings.front().ScalarName != BaseMappings.front().ScalarName || 751 Mappings.front().VectorName != BaseMappings.front().VectorName || 752 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF || 753 Mappings.front().Shape.Parameters != 754 BaseMappings.front().Shape.Parameters) 755 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 756 } 757 } 758 continue; 759 } 760 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 761 } 762 763 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 764 cast<Instruction>(VL[AltIndex])); 765 } 766 767 /// \returns true if all of the values in \p VL have the same type or false 768 /// otherwise. 769 static bool allSameType(ArrayRef<Value *> VL) { 770 Type *Ty = VL.front()->getType(); 771 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; }); 772 } 773 774 /// \returns True if in-tree use also needs extract. This refers to 775 /// possible scalar operand in vectorized instruction. 776 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 777 TargetLibraryInfo *TLI) { 778 unsigned Opcode = UserInst->getOpcode(); 779 switch (Opcode) { 780 case Instruction::Load: { 781 LoadInst *LI = cast<LoadInst>(UserInst); 782 return (LI->getPointerOperand() == Scalar); 783 } 784 case Instruction::Store: { 785 StoreInst *SI = cast<StoreInst>(UserInst); 786 return (SI->getPointerOperand() == Scalar); 787 } 788 case Instruction::Call: { 789 CallInst *CI = cast<CallInst>(UserInst); 790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 791 return any_of(enumerate(CI->args()), [&](auto &&Arg) { 792 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) && 793 Arg.value().get() == Scalar; 794 }); 795 } 796 default: 797 return false; 798 } 799 } 800 801 /// \returns the AA location that is being access by the instruction. 802 static MemoryLocation getLocation(Instruction *I) { 803 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 804 return MemoryLocation::get(SI); 805 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 806 return MemoryLocation::get(LI); 807 return MemoryLocation(); 808 } 809 810 /// \returns True if the instruction is not a volatile or atomic load/store. 811 static bool isSimple(Instruction *I) { 812 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 813 return LI->isSimple(); 814 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 815 return SI->isSimple(); 816 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 817 return !MI->isVolatile(); 818 return true; 819 } 820 821 /// Shuffles \p Mask in accordance with the given \p SubMask. 822 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only 823 /// one but two input vectors. 824 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask, 825 bool ExtendingManyInputs = false) { 826 if (SubMask.empty()) 827 return; 828 assert( 829 (!ExtendingManyInputs || SubMask.size() > Mask.size() || 830 // Check if input scalars were extended to match the size of other node. 831 (SubMask.size() == Mask.size() && 832 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(), 833 [](int Idx) { return Idx == PoisonMaskElem; }))) && 834 "SubMask with many inputs support must be larger than the mask."); 835 if (Mask.empty()) { 836 Mask.append(SubMask.begin(), SubMask.end()); 837 return; 838 } 839 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem); 840 int TermValue = std::min(Mask.size(), SubMask.size()); 841 for (int I = 0, E = SubMask.size(); I < E; ++I) { 842 if (SubMask[I] == PoisonMaskElem || 843 (!ExtendingManyInputs && 844 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue))) 845 continue; 846 NewMask[I] = Mask[SubMask[I]]; 847 } 848 Mask.swap(NewMask); 849 } 850 851 /// Order may have elements assigned special value (size) which is out of 852 /// bounds. Such indices only appear on places which correspond to undef values 853 /// (see canReuseExtract for details) and used in order to avoid undef values 854 /// have effect on operands ordering. 855 /// The first loop below simply finds all unused indices and then the next loop 856 /// nest assigns these indices for undef values positions. 857 /// As an example below Order has two undef positions and they have assigned 858 /// values 3 and 7 respectively: 859 /// before: 6 9 5 4 9 2 1 0 860 /// after: 6 3 5 4 7 2 1 0 861 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 862 const unsigned Sz = Order.size(); 863 SmallBitVector UnusedIndices(Sz, /*t=*/true); 864 SmallBitVector MaskedIndices(Sz); 865 for (unsigned I = 0; I < Sz; ++I) { 866 if (Order[I] < Sz) 867 UnusedIndices.reset(Order[I]); 868 else 869 MaskedIndices.set(I); 870 } 871 if (MaskedIndices.none()) 872 return; 873 assert(UnusedIndices.count() == MaskedIndices.count() && 874 "Non-synced masked/available indices."); 875 int Idx = UnusedIndices.find_first(); 876 int MIdx = MaskedIndices.find_first(); 877 while (MIdx >= 0) { 878 assert(Idx >= 0 && "Indices must be synced."); 879 Order[MIdx] = Idx; 880 Idx = UnusedIndices.find_next(Idx); 881 MIdx = MaskedIndices.find_next(MIdx); 882 } 883 } 884 885 namespace llvm { 886 887 static void inversePermutation(ArrayRef<unsigned> Indices, 888 SmallVectorImpl<int> &Mask) { 889 Mask.clear(); 890 const unsigned E = Indices.size(); 891 Mask.resize(E, PoisonMaskElem); 892 for (unsigned I = 0; I < E; ++I) 893 Mask[Indices[I]] = I; 894 } 895 896 /// Reorders the list of scalars in accordance with the given \p Mask. 897 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 898 ArrayRef<int> Mask) { 899 assert(!Mask.empty() && "Expected non-empty mask."); 900 SmallVector<Value *> Prev(Scalars.size(), 901 UndefValue::get(Scalars.front()->getType())); 902 Prev.swap(Scalars); 903 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 904 if (Mask[I] != PoisonMaskElem) 905 Scalars[Mask[I]] = Prev[I]; 906 } 907 908 /// Checks if the provided value does not require scheduling. It does not 909 /// require scheduling if this is not an instruction or it is an instruction 910 /// that does not read/write memory and all operands are either not instructions 911 /// or phi nodes or instructions from different blocks. 912 static bool areAllOperandsNonInsts(Value *V) { 913 auto *I = dyn_cast<Instruction>(V); 914 if (!I) 915 return true; 916 return !mayHaveNonDefUseDependency(*I) && 917 all_of(I->operands(), [I](Value *V) { 918 auto *IO = dyn_cast<Instruction>(V); 919 if (!IO) 920 return true; 921 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 922 }); 923 } 924 925 /// Checks if the provided value does not require scheduling. It does not 926 /// require scheduling if this is not an instruction or it is an instruction 927 /// that does not read/write memory and all users are phi nodes or instructions 928 /// from the different blocks. 929 static bool isUsedOutsideBlock(Value *V) { 930 auto *I = dyn_cast<Instruction>(V); 931 if (!I) 932 return true; 933 // Limits the number of uses to save compile time. 934 constexpr int UsesLimit = 8; 935 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 936 all_of(I->users(), [I](User *U) { 937 auto *IU = dyn_cast<Instruction>(U); 938 if (!IU) 939 return true; 940 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 941 }); 942 } 943 944 /// Checks if the specified value does not require scheduling. It does not 945 /// require scheduling if all operands and all users do not need to be scheduled 946 /// in the current basic block. 947 static bool doesNotNeedToBeScheduled(Value *V) { 948 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 949 } 950 951 /// Checks if the specified array of instructions does not require scheduling. 952 /// It is so if all either instructions have operands that do not require 953 /// scheduling or their users do not require scheduling since they are phis or 954 /// in other basic blocks. 955 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 956 return !VL.empty() && 957 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 958 } 959 960 namespace slpvectorizer { 961 962 /// Bottom Up SLP Vectorizer. 963 class BoUpSLP { 964 struct TreeEntry; 965 struct ScheduleData; 966 class ShuffleCostEstimator; 967 class ShuffleInstructionBuilder; 968 969 public: 970 using ValueList = SmallVector<Value *, 8>; 971 using InstrList = SmallVector<Instruction *, 16>; 972 using ValueSet = SmallPtrSet<Value *, 16>; 973 using StoreList = SmallVector<StoreInst *, 8>; 974 using ExtraValueToDebugLocsMap = 975 MapVector<Value *, SmallVector<Instruction *, 2>>; 976 using OrdersType = SmallVector<unsigned, 4>; 977 978 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 979 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 980 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 981 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 982 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 983 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 984 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 985 // Use the vector register size specified by the target unless overridden 986 // by a command-line option. 987 // TODO: It would be better to limit the vectorization factor based on 988 // data type rather than just register size. For example, x86 AVX has 989 // 256-bit registers, but it does not support integer operations 990 // at that width (that requires AVX2). 991 if (MaxVectorRegSizeOption.getNumOccurrences()) 992 MaxVecRegSize = MaxVectorRegSizeOption; 993 else 994 MaxVecRegSize = 995 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 996 .getFixedValue(); 997 998 if (MinVectorRegSizeOption.getNumOccurrences()) 999 MinVecRegSize = MinVectorRegSizeOption; 1000 else 1001 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 1002 } 1003 1004 /// Vectorize the tree that starts with the elements in \p VL. 1005 /// Returns the vectorized root. 1006 Value *vectorizeTree(); 1007 1008 /// Vectorize the tree but with the list of externally used values \p 1009 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 1010 /// generated extractvalue instructions. 1011 /// \param ReplacedExternals containd list of replaced external values 1012 /// {scalar, replace} after emitting extractelement for external uses. 1013 Value * 1014 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, 1015 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 1016 Instruction *ReductionRoot = nullptr); 1017 1018 /// \returns the cost incurred by unwanted spills and fills, caused by 1019 /// holding live values over call sites. 1020 InstructionCost getSpillCost() const; 1021 1022 /// \returns the vectorization cost of the subtree that starts at \p VL. 1023 /// A negative number means that this is profitable. 1024 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt); 1025 1026 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 1027 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 1028 void buildTree(ArrayRef<Value *> Roots, 1029 const SmallDenseSet<Value *> &UserIgnoreLst); 1030 1031 /// Construct a vectorizable tree that starts at \p Roots. 1032 void buildTree(ArrayRef<Value *> Roots); 1033 1034 /// Returns whether the root node has in-tree uses. 1035 bool doesRootHaveInTreeUses() const { 1036 return !VectorizableTree.empty() && 1037 !VectorizableTree.front()->UserTreeIndices.empty(); 1038 } 1039 1040 /// Return the scalars of the root node. 1041 ArrayRef<Value *> getRootNodeScalars() const { 1042 assert(!VectorizableTree.empty() && "No graph to get the first node from"); 1043 return VectorizableTree.front()->Scalars; 1044 } 1045 1046 /// Builds external uses of the vectorized scalars, i.e. the list of 1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 1048 /// ExternallyUsedValues contains additional list of external uses to handle 1049 /// vectorization of reductions. 1050 void 1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 1052 1053 /// Clear the internal data structures that are created by 'buildTree'. 1054 void deleteTree() { 1055 VectorizableTree.clear(); 1056 ScalarToTreeEntry.clear(); 1057 MultiNodeScalars.clear(); 1058 MustGather.clear(); 1059 EntryToLastInstruction.clear(); 1060 ExternalUses.clear(); 1061 for (auto &Iter : BlocksSchedules) { 1062 BlockScheduling *BS = Iter.second.get(); 1063 BS->clear(); 1064 } 1065 MinBWs.clear(); 1066 InstrElementSize.clear(); 1067 UserIgnoreList = nullptr; 1068 PostponedGathers.clear(); 1069 ValueToGatherNodes.clear(); 1070 } 1071 1072 unsigned getTreeSize() const { return VectorizableTree.size(); } 1073 1074 /// Perform LICM and CSE on the newly generated gather sequences. 1075 void optimizeGatherSequence(); 1076 1077 /// Checks if the specified gather tree entry \p TE can be represented as a 1078 /// shuffled vector entry + (possibly) permutation with other gathers. It 1079 /// implements the checks only for possibly ordered scalars (Loads, 1080 /// ExtractElement, ExtractValue), which can be part of the graph. 1081 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 1082 1083 /// Sort loads into increasing pointers offsets to allow greater clustering. 1084 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 1085 1086 /// Gets reordering data for the given tree entry. If the entry is vectorized 1087 /// - just return ReorderIndices, otherwise check if the scalars can be 1088 /// reordered and return the most optimal order. 1089 /// \return std::nullopt if ordering is not important, empty order, if 1090 /// identity order is important, or the actual order. 1091 /// \param TopToBottom If true, include the order of vectorized stores and 1092 /// insertelement nodes, otherwise skip them. 1093 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, 1094 bool TopToBottom); 1095 1096 /// Reorders the current graph to the most profitable order starting from the 1097 /// root node to the leaf nodes. The best order is chosen only from the nodes 1098 /// of the same size (vectorization factor). Smaller nodes are considered 1099 /// parts of subgraph with smaller VF and they are reordered independently. We 1100 /// can make it because we still need to extend smaller nodes to the wider VF 1101 /// and we can merge reordering shuffles with the widening shuffles. 1102 void reorderTopToBottom(); 1103 1104 /// Reorders the current graph to the most profitable order starting from 1105 /// leaves to the root. It allows to rotate small subgraphs and reduce the 1106 /// number of reshuffles if the leaf nodes use the same order. In this case we 1107 /// can merge the orders and just shuffle user node instead of shuffling its 1108 /// operands. Plus, even the leaf nodes have different orders, it allows to 1109 /// sink reordering in the graph closer to the root node and merge it later 1110 /// during analysis. 1111 void reorderBottomToTop(bool IgnoreReorder = false); 1112 1113 /// \return The vector element size in bits to use when vectorizing the 1114 /// expression tree ending at \p V. If V is a store, the size is the width of 1115 /// the stored value. Otherwise, the size is the width of the largest loaded 1116 /// value reaching V. This method is used by the vectorizer to calculate 1117 /// vectorization factors. 1118 unsigned getVectorElementSize(Value *V); 1119 1120 /// Compute the minimum type sizes required to represent the entries in a 1121 /// vectorizable tree. 1122 void computeMinimumValueSizes(); 1123 1124 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 1125 unsigned getMaxVecRegSize() const { 1126 return MaxVecRegSize; 1127 } 1128 1129 // \returns minimum vector register size as set by cl::opt. 1130 unsigned getMinVecRegSize() const { 1131 return MinVecRegSize; 1132 } 1133 1134 unsigned getMinVF(unsigned Sz) const { 1135 return std::max(2U, getMinVecRegSize() / Sz); 1136 } 1137 1138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 1139 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 1140 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 1141 return MaxVF ? MaxVF : UINT_MAX; 1142 } 1143 1144 /// Check if homogeneous aggregate is isomorphic to some VectorType. 1145 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 1146 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 1147 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 1148 /// 1149 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1150 unsigned canMapToVector(Type *T) const; 1151 1152 /// \returns True if the VectorizableTree is both tiny and not fully 1153 /// vectorizable. We do not vectorize such trees. 1154 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1155 1156 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1157 /// can be load combined in the backend. Load combining may not be allowed in 1158 /// the IR optimizer, so we do not want to alter the pattern. For example, 1159 /// partially transforming a scalar bswap() pattern into vector code is 1160 /// effectively impossible for the backend to undo. 1161 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1162 /// may not be necessary. 1163 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1164 1165 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1166 /// can be load combined in the backend. Load combining may not be allowed in 1167 /// the IR optimizer, so we do not want to alter the pattern. For example, 1168 /// partially transforming a scalar bswap() pattern into vector code is 1169 /// effectively impossible for the backend to undo. 1170 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1171 /// may not be necessary. 1172 bool isLoadCombineCandidate() const; 1173 1174 OptimizationRemarkEmitter *getORE() { return ORE; } 1175 1176 /// This structure holds any data we need about the edges being traversed 1177 /// during buildTree_rec(). We keep track of: 1178 /// (i) the user TreeEntry index, and 1179 /// (ii) the index of the edge. 1180 struct EdgeInfo { 1181 EdgeInfo() = default; 1182 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1183 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1184 /// The user TreeEntry. 1185 TreeEntry *UserTE = nullptr; 1186 /// The operand index of the use. 1187 unsigned EdgeIdx = UINT_MAX; 1188 #ifndef NDEBUG 1189 friend inline raw_ostream &operator<<(raw_ostream &OS, 1190 const BoUpSLP::EdgeInfo &EI) { 1191 EI.dump(OS); 1192 return OS; 1193 } 1194 /// Debug print. 1195 void dump(raw_ostream &OS) const { 1196 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1197 << " EdgeIdx:" << EdgeIdx << "}"; 1198 } 1199 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1200 #endif 1201 bool operator == (const EdgeInfo &Other) const { 1202 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx; 1203 } 1204 }; 1205 1206 /// A helper class used for scoring candidates for two consecutive lanes. 1207 class LookAheadHeuristics { 1208 const TargetLibraryInfo &TLI; 1209 const DataLayout &DL; 1210 ScalarEvolution &SE; 1211 const BoUpSLP &R; 1212 int NumLanes; // Total number of lanes (aka vectorization factor). 1213 int MaxLevel; // The maximum recursion depth for accumulating score. 1214 1215 public: 1216 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL, 1217 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes, 1218 int MaxLevel) 1219 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes), 1220 MaxLevel(MaxLevel) {} 1221 1222 // The hard-coded scores listed here are not very important, though it shall 1223 // be higher for better matches to improve the resulting cost. When 1224 // computing the scores of matching one sub-tree with another, we are 1225 // basically counting the number of values that are matching. So even if all 1226 // scores are set to 1, we would still get a decent matching result. 1227 // However, sometimes we have to break ties. For example we may have to 1228 // choose between matching loads vs matching opcodes. This is what these 1229 // scores are helping us with: they provide the order of preference. Also, 1230 // this is important if the scalar is externally used or used in another 1231 // tree entry node in the different lane. 1232 1233 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1234 static const int ScoreConsecutiveLoads = 4; 1235 /// The same load multiple times. This should have a better score than 1236 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1237 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1238 /// a vector load and 1.0 for a broadcast. 1239 static const int ScoreSplatLoads = 3; 1240 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1241 static const int ScoreReversedLoads = 3; 1242 /// A load candidate for masked gather. 1243 static const int ScoreMaskedGatherCandidate = 1; 1244 /// ExtractElementInst from same vector and consecutive indexes. 1245 static const int ScoreConsecutiveExtracts = 4; 1246 /// ExtractElementInst from same vector and reversed indices. 1247 static const int ScoreReversedExtracts = 3; 1248 /// Constants. 1249 static const int ScoreConstants = 2; 1250 /// Instructions with the same opcode. 1251 static const int ScoreSameOpcode = 2; 1252 /// Instructions with alt opcodes (e.g, add + sub). 1253 static const int ScoreAltOpcodes = 1; 1254 /// Identical instructions (a.k.a. splat or broadcast). 1255 static const int ScoreSplat = 1; 1256 /// Matching with an undef is preferable to failing. 1257 static const int ScoreUndef = 1; 1258 /// Score for failing to find a decent match. 1259 static const int ScoreFail = 0; 1260 /// Score if all users are vectorized. 1261 static const int ScoreAllUserVectorized = 1; 1262 1263 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1264 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1265 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1266 /// MainAltOps. 1267 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1268 ArrayRef<Value *> MainAltOps) const { 1269 if (!isValidElementType(V1->getType()) || 1270 !isValidElementType(V2->getType())) 1271 return LookAheadHeuristics::ScoreFail; 1272 1273 if (V1 == V2) { 1274 if (isa<LoadInst>(V1)) { 1275 // Retruns true if the users of V1 and V2 won't need to be extracted. 1276 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1277 // Bail out if we have too many uses to save compilation time. 1278 static constexpr unsigned Limit = 8; 1279 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1280 return false; 1281 1282 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1283 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1284 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1285 }); 1286 }; 1287 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1288 }; 1289 // A broadcast of a load can be cheaper on some targets. 1290 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1291 ElementCount::getFixed(NumLanes)) && 1292 ((int)V1->getNumUses() == NumLanes || 1293 AllUsersAreInternal(V1, V2))) 1294 return LookAheadHeuristics::ScoreSplatLoads; 1295 } 1296 return LookAheadHeuristics::ScoreSplat; 1297 } 1298 1299 auto *LI1 = dyn_cast<LoadInst>(V1); 1300 auto *LI2 = dyn_cast<LoadInst>(V2); 1301 if (LI1 && LI2) { 1302 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || 1303 !LI2->isSimple()) 1304 return LookAheadHeuristics::ScoreFail; 1305 1306 std::optional<int> Dist = getPointersDiff( 1307 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1308 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1309 if (!Dist || *Dist == 0) { 1310 if (getUnderlyingObject(LI1->getPointerOperand()) == 1311 getUnderlyingObject(LI2->getPointerOperand()) && 1312 R.TTI->isLegalMaskedGather( 1313 FixedVectorType::get(LI1->getType(), NumLanes), 1314 LI1->getAlign())) 1315 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1316 return LookAheadHeuristics::ScoreFail; 1317 } 1318 // The distance is too large - still may be profitable to use masked 1319 // loads/gathers. 1320 if (std::abs(*Dist) > NumLanes / 2) 1321 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1322 // This still will detect consecutive loads, but we might have "holes" 1323 // in some cases. It is ok for non-power-2 vectorization and may produce 1324 // better results. It should not affect current vectorization. 1325 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1326 : LookAheadHeuristics::ScoreReversedLoads; 1327 } 1328 1329 auto *C1 = dyn_cast<Constant>(V1); 1330 auto *C2 = dyn_cast<Constant>(V2); 1331 if (C1 && C2) 1332 return LookAheadHeuristics::ScoreConstants; 1333 1334 // Extracts from consecutive indexes of the same vector better score as 1335 // the extracts could be optimized away. 1336 Value *EV1; 1337 ConstantInt *Ex1Idx; 1338 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1339 // Undefs are always profitable for extractelements. 1340 // Compiler can easily combine poison and extractelement <non-poison> or 1341 // undef and extractelement <poison>. But combining undef + 1342 // extractelement <non-poison-but-may-produce-poison> requires some 1343 // extra operations. 1344 if (isa<UndefValue>(V2)) 1345 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all()) 1346 ? LookAheadHeuristics::ScoreConsecutiveExtracts 1347 : LookAheadHeuristics::ScoreSameOpcode; 1348 Value *EV2 = nullptr; 1349 ConstantInt *Ex2Idx = nullptr; 1350 if (match(V2, 1351 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1352 m_Undef())))) { 1353 // Undefs are always profitable for extractelements. 1354 if (!Ex2Idx) 1355 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1356 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType()) 1357 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1358 if (EV2 == EV1) { 1359 int Idx1 = Ex1Idx->getZExtValue(); 1360 int Idx2 = Ex2Idx->getZExtValue(); 1361 int Dist = Idx2 - Idx1; 1362 // The distance is too large - still may be profitable to use 1363 // shuffles. 1364 if (std::abs(Dist) == 0) 1365 return LookAheadHeuristics::ScoreSplat; 1366 if (std::abs(Dist) > NumLanes / 2) 1367 return LookAheadHeuristics::ScoreSameOpcode; 1368 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1369 : LookAheadHeuristics::ScoreReversedExtracts; 1370 } 1371 return LookAheadHeuristics::ScoreAltOpcodes; 1372 } 1373 return LookAheadHeuristics::ScoreFail; 1374 } 1375 1376 auto *I1 = dyn_cast<Instruction>(V1); 1377 auto *I2 = dyn_cast<Instruction>(V2); 1378 if (I1 && I2) { 1379 if (I1->getParent() != I2->getParent()) 1380 return LookAheadHeuristics::ScoreFail; 1381 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1382 Ops.push_back(I1); 1383 Ops.push_back(I2); 1384 InstructionsState S = getSameOpcode(Ops, TLI); 1385 // Note: Only consider instructions with <= 2 operands to avoid 1386 // complexity explosion. 1387 if (S.getOpcode() && 1388 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1389 !S.isAltShuffle()) && 1390 all_of(Ops, [&S](Value *V) { 1391 return cast<Instruction>(V)->getNumOperands() == 1392 S.MainOp->getNumOperands(); 1393 })) 1394 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1395 : LookAheadHeuristics::ScoreSameOpcode; 1396 } 1397 1398 if (isa<UndefValue>(V2)) 1399 return LookAheadHeuristics::ScoreUndef; 1400 1401 return LookAheadHeuristics::ScoreFail; 1402 } 1403 1404 /// Go through the operands of \p LHS and \p RHS recursively until 1405 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1406 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1407 /// of \p U1 and \p U2), except at the beginning of the recursion where 1408 /// these are set to nullptr. 1409 /// 1410 /// For example: 1411 /// \verbatim 1412 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1413 /// \ / \ / \ / \ / 1414 /// + + + + 1415 /// G1 G2 G3 G4 1416 /// \endverbatim 1417 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1418 /// each level recursively, accumulating the score. It starts from matching 1419 /// the additions at level 0, then moves on to the loads (level 1). The 1420 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1421 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1422 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1423 /// Please note that the order of the operands does not matter, as we 1424 /// evaluate the score of all profitable combinations of operands. In 1425 /// other words the score of G1 and G4 is the same as G1 and G2. This 1426 /// heuristic is based on ideas described in: 1427 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1428 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1429 /// Luís F. W. Góes 1430 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1431 Instruction *U2, int CurrLevel, 1432 ArrayRef<Value *> MainAltOps) const { 1433 1434 // Get the shallow score of V1 and V2. 1435 int ShallowScoreAtThisLevel = 1436 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1437 1438 // If reached MaxLevel, 1439 // or if V1 and V2 are not instructions, 1440 // or if they are SPLAT, 1441 // or if they are not consecutive, 1442 // or if profitable to vectorize loads or extractelements, early return 1443 // the current cost. 1444 auto *I1 = dyn_cast<Instruction>(LHS); 1445 auto *I2 = dyn_cast<Instruction>(RHS); 1446 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1447 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1448 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1449 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1450 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1451 ShallowScoreAtThisLevel)) 1452 return ShallowScoreAtThisLevel; 1453 assert(I1 && I2 && "Should have early exited."); 1454 1455 // Contains the I2 operand indexes that got matched with I1 operands. 1456 SmallSet<unsigned, 4> Op2Used; 1457 1458 // Recursion towards the operands of I1 and I2. We are trying all possible 1459 // operand pairs, and keeping track of the best score. 1460 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1461 OpIdx1 != NumOperands1; ++OpIdx1) { 1462 // Try to pair op1I with the best operand of I2. 1463 int MaxTmpScore = 0; 1464 unsigned MaxOpIdx2 = 0; 1465 bool FoundBest = false; 1466 // If I2 is commutative try all combinations. 1467 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1468 unsigned ToIdx = isCommutative(I2) 1469 ? I2->getNumOperands() 1470 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1471 assert(FromIdx <= ToIdx && "Bad index"); 1472 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1473 // Skip operands already paired with OpIdx1. 1474 if (Op2Used.count(OpIdx2)) 1475 continue; 1476 // Recursively calculate the cost at each level 1477 int TmpScore = 1478 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1479 I1, I2, CurrLevel + 1, std::nullopt); 1480 // Look for the best score. 1481 if (TmpScore > LookAheadHeuristics::ScoreFail && 1482 TmpScore > MaxTmpScore) { 1483 MaxTmpScore = TmpScore; 1484 MaxOpIdx2 = OpIdx2; 1485 FoundBest = true; 1486 } 1487 } 1488 if (FoundBest) { 1489 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1490 Op2Used.insert(MaxOpIdx2); 1491 ShallowScoreAtThisLevel += MaxTmpScore; 1492 } 1493 } 1494 return ShallowScoreAtThisLevel; 1495 } 1496 }; 1497 /// A helper data structure to hold the operands of a vector of instructions. 1498 /// This supports a fixed vector length for all operand vectors. 1499 class VLOperands { 1500 /// For each operand we need (i) the value, and (ii) the opcode that it 1501 /// would be attached to if the expression was in a left-linearized form. 1502 /// This is required to avoid illegal operand reordering. 1503 /// For example: 1504 /// \verbatim 1505 /// 0 Op1 1506 /// |/ 1507 /// Op1 Op2 Linearized + Op2 1508 /// \ / ----------> |/ 1509 /// - - 1510 /// 1511 /// Op1 - Op2 (0 + Op1) - Op2 1512 /// \endverbatim 1513 /// 1514 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1515 /// 1516 /// Another way to think of this is to track all the operations across the 1517 /// path from the operand all the way to the root of the tree and to 1518 /// calculate the operation that corresponds to this path. For example, the 1519 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1520 /// corresponding operation is a '-' (which matches the one in the 1521 /// linearized tree, as shown above). 1522 /// 1523 /// For lack of a better term, we refer to this operation as Accumulated 1524 /// Path Operation (APO). 1525 struct OperandData { 1526 OperandData() = default; 1527 OperandData(Value *V, bool APO, bool IsUsed) 1528 : V(V), APO(APO), IsUsed(IsUsed) {} 1529 /// The operand value. 1530 Value *V = nullptr; 1531 /// TreeEntries only allow a single opcode, or an alternate sequence of 1532 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1533 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1534 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1535 /// (e.g., Add/Mul) 1536 bool APO = false; 1537 /// Helper data for the reordering function. 1538 bool IsUsed = false; 1539 }; 1540 1541 /// During operand reordering, we are trying to select the operand at lane 1542 /// that matches best with the operand at the neighboring lane. Our 1543 /// selection is based on the type of value we are looking for. For example, 1544 /// if the neighboring lane has a load, we need to look for a load that is 1545 /// accessing a consecutive address. These strategies are summarized in the 1546 /// 'ReorderingMode' enumerator. 1547 enum class ReorderingMode { 1548 Load, ///< Matching loads to consecutive memory addresses 1549 Opcode, ///< Matching instructions based on opcode (same or alternate) 1550 Constant, ///< Matching constants 1551 Splat, ///< Matching the same instruction multiple times (broadcast) 1552 Failed, ///< We failed to create a vectorizable group 1553 }; 1554 1555 using OperandDataVec = SmallVector<OperandData, 2>; 1556 1557 /// A vector of operand vectors. 1558 SmallVector<OperandDataVec, 4> OpsVec; 1559 1560 const TargetLibraryInfo &TLI; 1561 const DataLayout &DL; 1562 ScalarEvolution &SE; 1563 const BoUpSLP &R; 1564 1565 /// \returns the operand data at \p OpIdx and \p Lane. 1566 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1567 return OpsVec[OpIdx][Lane]; 1568 } 1569 1570 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1571 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1572 return OpsVec[OpIdx][Lane]; 1573 } 1574 1575 /// Clears the used flag for all entries. 1576 void clearUsed() { 1577 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1578 OpIdx != NumOperands; ++OpIdx) 1579 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1580 ++Lane) 1581 OpsVec[OpIdx][Lane].IsUsed = false; 1582 } 1583 1584 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1585 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1586 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1587 } 1588 1589 /// \param Lane lane of the operands under analysis. 1590 /// \param OpIdx operand index in \p Lane lane we're looking the best 1591 /// candidate for. 1592 /// \param Idx operand index of the current candidate value. 1593 /// \returns The additional score due to possible broadcasting of the 1594 /// elements in the lane. It is more profitable to have power-of-2 unique 1595 /// elements in the lane, it will be vectorized with higher probability 1596 /// after removing duplicates. Currently the SLP vectorizer supports only 1597 /// vectorization of the power-of-2 number of unique scalars. 1598 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1599 Value *IdxLaneV = getData(Idx, Lane).V; 1600 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1601 return 0; 1602 SmallPtrSet<Value *, 4> Uniques; 1603 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1604 if (Ln == Lane) 1605 continue; 1606 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1607 if (!isa<Instruction>(OpIdxLnV)) 1608 return 0; 1609 Uniques.insert(OpIdxLnV); 1610 } 1611 int UniquesCount = Uniques.size(); 1612 int UniquesCntWithIdxLaneV = 1613 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1614 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1615 int UniquesCntWithOpIdxLaneV = 1616 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1617 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1618 return 0; 1619 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1620 UniquesCntWithOpIdxLaneV) - 1621 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1622 } 1623 1624 /// \param Lane lane of the operands under analysis. 1625 /// \param OpIdx operand index in \p Lane lane we're looking the best 1626 /// candidate for. 1627 /// \param Idx operand index of the current candidate value. 1628 /// \returns The additional score for the scalar which users are all 1629 /// vectorized. 1630 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1631 Value *IdxLaneV = getData(Idx, Lane).V; 1632 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1633 // Do not care about number of uses for vector-like instructions 1634 // (extractelement/extractvalue with constant indices), they are extracts 1635 // themselves and already externally used. Vectorization of such 1636 // instructions does not add extra extractelement instruction, just may 1637 // remove it. 1638 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1639 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1640 return LookAheadHeuristics::ScoreAllUserVectorized; 1641 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1642 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1643 return 0; 1644 return R.areAllUsersVectorized(IdxLaneI) 1645 ? LookAheadHeuristics::ScoreAllUserVectorized 1646 : 0; 1647 } 1648 1649 /// Score scaling factor for fully compatible instructions but with 1650 /// different number of external uses. Allows better selection of the 1651 /// instructions with less external uses. 1652 static const int ScoreScaleFactor = 10; 1653 1654 /// \Returns the look-ahead score, which tells us how much the sub-trees 1655 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1656 /// score. This helps break ties in an informed way when we cannot decide on 1657 /// the order of the operands by just considering the immediate 1658 /// predecessors. 1659 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1660 int Lane, unsigned OpIdx, unsigned Idx, 1661 bool &IsUsed) { 1662 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(), 1663 LookAheadMaxDepth); 1664 // Keep track of the instruction stack as we recurse into the operands 1665 // during the look-ahead score exploration. 1666 int Score = 1667 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1668 /*CurrLevel=*/1, MainAltOps); 1669 if (Score) { 1670 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1671 if (Score <= -SplatScore) { 1672 // Set the minimum score for splat-like sequence to avoid setting 1673 // failed state. 1674 Score = 1; 1675 } else { 1676 Score += SplatScore; 1677 // Scale score to see the difference between different operands 1678 // and similar operands but all vectorized/not all vectorized 1679 // uses. It does not affect actual selection of the best 1680 // compatible operand in general, just allows to select the 1681 // operand with all vectorized uses. 1682 Score *= ScoreScaleFactor; 1683 Score += getExternalUseScore(Lane, OpIdx, Idx); 1684 IsUsed = true; 1685 } 1686 } 1687 return Score; 1688 } 1689 1690 /// Best defined scores per lanes between the passes. Used to choose the 1691 /// best operand (with the highest score) between the passes. 1692 /// The key - {Operand Index, Lane}. 1693 /// The value - the best score between the passes for the lane and the 1694 /// operand. 1695 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1696 BestScoresPerLanes; 1697 1698 // Search all operands in Ops[*][Lane] for the one that matches best 1699 // Ops[OpIdx][LastLane] and return its opreand index. 1700 // If no good match can be found, return std::nullopt. 1701 std::optional<unsigned> 1702 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1703 ArrayRef<ReorderingMode> ReorderingModes, 1704 ArrayRef<Value *> MainAltOps) { 1705 unsigned NumOperands = getNumOperands(); 1706 1707 // The operand of the previous lane at OpIdx. 1708 Value *OpLastLane = getData(OpIdx, LastLane).V; 1709 1710 // Our strategy mode for OpIdx. 1711 ReorderingMode RMode = ReorderingModes[OpIdx]; 1712 if (RMode == ReorderingMode::Failed) 1713 return std::nullopt; 1714 1715 // The linearized opcode of the operand at OpIdx, Lane. 1716 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1717 1718 // The best operand index and its score. 1719 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1720 // are using the score to differentiate between the two. 1721 struct BestOpData { 1722 std::optional<unsigned> Idx; 1723 unsigned Score = 0; 1724 } BestOp; 1725 BestOp.Score = 1726 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1727 .first->second; 1728 1729 // Track if the operand must be marked as used. If the operand is set to 1730 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1731 // want to reestimate the operands again on the following iterations). 1732 bool IsUsed = 1733 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1734 // Iterate through all unused operands and look for the best. 1735 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1736 // Get the operand at Idx and Lane. 1737 OperandData &OpData = getData(Idx, Lane); 1738 Value *Op = OpData.V; 1739 bool OpAPO = OpData.APO; 1740 1741 // Skip already selected operands. 1742 if (OpData.IsUsed) 1743 continue; 1744 1745 // Skip if we are trying to move the operand to a position with a 1746 // different opcode in the linearized tree form. This would break the 1747 // semantics. 1748 if (OpAPO != OpIdxAPO) 1749 continue; 1750 1751 // Look for an operand that matches the current mode. 1752 switch (RMode) { 1753 case ReorderingMode::Load: 1754 case ReorderingMode::Constant: 1755 case ReorderingMode::Opcode: { 1756 bool LeftToRight = Lane > LastLane; 1757 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1758 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1759 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1760 OpIdx, Idx, IsUsed); 1761 if (Score > static_cast<int>(BestOp.Score)) { 1762 BestOp.Idx = Idx; 1763 BestOp.Score = Score; 1764 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1765 } 1766 break; 1767 } 1768 case ReorderingMode::Splat: 1769 if (Op == OpLastLane) 1770 BestOp.Idx = Idx; 1771 break; 1772 case ReorderingMode::Failed: 1773 llvm_unreachable("Not expected Failed reordering mode."); 1774 } 1775 } 1776 1777 if (BestOp.Idx) { 1778 getData(*BestOp.Idx, Lane).IsUsed = IsUsed; 1779 return BestOp.Idx; 1780 } 1781 // If we could not find a good match return std::nullopt. 1782 return std::nullopt; 1783 } 1784 1785 /// Helper for reorderOperandVecs. 1786 /// \returns the lane that we should start reordering from. This is the one 1787 /// which has the least number of operands that can freely move about or 1788 /// less profitable because it already has the most optimal set of operands. 1789 unsigned getBestLaneToStartReordering() const { 1790 unsigned Min = UINT_MAX; 1791 unsigned SameOpNumber = 0; 1792 // std::pair<unsigned, unsigned> is used to implement a simple voting 1793 // algorithm and choose the lane with the least number of operands that 1794 // can freely move about or less profitable because it already has the 1795 // most optimal set of operands. The first unsigned is a counter for 1796 // voting, the second unsigned is the counter of lanes with instructions 1797 // with same/alternate opcodes and same parent basic block. 1798 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1799 // Try to be closer to the original results, if we have multiple lanes 1800 // with same cost. If 2 lanes have the same cost, use the one with the 1801 // lowest index. 1802 for (int I = getNumLanes(); I > 0; --I) { 1803 unsigned Lane = I - 1; 1804 OperandsOrderData NumFreeOpsHash = 1805 getMaxNumOperandsThatCanBeReordered(Lane); 1806 // Compare the number of operands that can move and choose the one with 1807 // the least number. 1808 if (NumFreeOpsHash.NumOfAPOs < Min) { 1809 Min = NumFreeOpsHash.NumOfAPOs; 1810 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1811 HashMap.clear(); 1812 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1813 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1814 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1815 // Select the most optimal lane in terms of number of operands that 1816 // should be moved around. 1817 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1818 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1819 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1820 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1821 auto *It = HashMap.find(NumFreeOpsHash.Hash); 1822 if (It == HashMap.end()) 1823 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1824 else 1825 ++It->second.first; 1826 } 1827 } 1828 // Select the lane with the minimum counter. 1829 unsigned BestLane = 0; 1830 unsigned CntMin = UINT_MAX; 1831 for (const auto &Data : reverse(HashMap)) { 1832 if (Data.second.first < CntMin) { 1833 CntMin = Data.second.first; 1834 BestLane = Data.second.second; 1835 } 1836 } 1837 return BestLane; 1838 } 1839 1840 /// Data structure that helps to reorder operands. 1841 struct OperandsOrderData { 1842 /// The best number of operands with the same APOs, which can be 1843 /// reordered. 1844 unsigned NumOfAPOs = UINT_MAX; 1845 /// Number of operands with the same/alternate instruction opcode and 1846 /// parent. 1847 unsigned NumOpsWithSameOpcodeParent = 0; 1848 /// Hash for the actual operands ordering. 1849 /// Used to count operands, actually their position id and opcode 1850 /// value. It is used in the voting mechanism to find the lane with the 1851 /// least number of operands that can freely move about or less profitable 1852 /// because it already has the most optimal set of operands. Can be 1853 /// replaced with SmallVector<unsigned> instead but hash code is faster 1854 /// and requires less memory. 1855 unsigned Hash = 0; 1856 }; 1857 /// \returns the maximum number of operands that are allowed to be reordered 1858 /// for \p Lane and the number of compatible instructions(with the same 1859 /// parent/opcode). This is used as a heuristic for selecting the first lane 1860 /// to start operand reordering. 1861 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1862 unsigned CntTrue = 0; 1863 unsigned NumOperands = getNumOperands(); 1864 // Operands with the same APO can be reordered. We therefore need to count 1865 // how many of them we have for each APO, like this: Cnt[APO] = x. 1866 // Since we only have two APOs, namely true and false, we can avoid using 1867 // a map. Instead we can simply count the number of operands that 1868 // correspond to one of them (in this case the 'true' APO), and calculate 1869 // the other by subtracting it from the total number of operands. 1870 // Operands with the same instruction opcode and parent are more 1871 // profitable since we don't need to move them in many cases, with a high 1872 // probability such lane already can be vectorized effectively. 1873 bool AllUndefs = true; 1874 unsigned NumOpsWithSameOpcodeParent = 0; 1875 Instruction *OpcodeI = nullptr; 1876 BasicBlock *Parent = nullptr; 1877 unsigned Hash = 0; 1878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1879 const OperandData &OpData = getData(OpIdx, Lane); 1880 if (OpData.APO) 1881 ++CntTrue; 1882 // Use Boyer-Moore majority voting for finding the majority opcode and 1883 // the number of times it occurs. 1884 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1885 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() || 1886 I->getParent() != Parent) { 1887 if (NumOpsWithSameOpcodeParent == 0) { 1888 NumOpsWithSameOpcodeParent = 1; 1889 OpcodeI = I; 1890 Parent = I->getParent(); 1891 } else { 1892 --NumOpsWithSameOpcodeParent; 1893 } 1894 } else { 1895 ++NumOpsWithSameOpcodeParent; 1896 } 1897 } 1898 Hash = hash_combine( 1899 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1900 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1901 } 1902 if (AllUndefs) 1903 return {}; 1904 OperandsOrderData Data; 1905 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1906 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1907 Data.Hash = Hash; 1908 return Data; 1909 } 1910 1911 /// Go through the instructions in VL and append their operands. 1912 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1913 assert(!VL.empty() && "Bad VL"); 1914 assert((empty() || VL.size() == getNumLanes()) && 1915 "Expected same number of lanes"); 1916 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1917 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1918 OpsVec.resize(NumOperands); 1919 unsigned NumLanes = VL.size(); 1920 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1921 OpsVec[OpIdx].resize(NumLanes); 1922 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1923 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1924 // Our tree has just 3 nodes: the root and two operands. 1925 // It is therefore trivial to get the APO. We only need to check the 1926 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1927 // RHS operand. The LHS operand of both add and sub is never attached 1928 // to an inversese operation in the linearized form, therefore its APO 1929 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1930 1931 // Since operand reordering is performed on groups of commutative 1932 // operations or alternating sequences (e.g., +, -), we can safely 1933 // tell the inverse operations by checking commutativity. 1934 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1935 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1936 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1937 APO, false}; 1938 } 1939 } 1940 } 1941 1942 /// \returns the number of operands. 1943 unsigned getNumOperands() const { return OpsVec.size(); } 1944 1945 /// \returns the number of lanes. 1946 unsigned getNumLanes() const { return OpsVec[0].size(); } 1947 1948 /// \returns the operand value at \p OpIdx and \p Lane. 1949 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1950 return getData(OpIdx, Lane).V; 1951 } 1952 1953 /// \returns true if the data structure is empty. 1954 bool empty() const { return OpsVec.empty(); } 1955 1956 /// Clears the data. 1957 void clear() { OpsVec.clear(); } 1958 1959 /// \Returns true if there are enough operands identical to \p Op to fill 1960 /// the whole vector. 1961 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1962 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1963 bool OpAPO = getData(OpIdx, Lane).APO; 1964 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1965 if (Ln == Lane) 1966 continue; 1967 // This is set to true if we found a candidate for broadcast at Lane. 1968 bool FoundCandidate = false; 1969 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1970 OperandData &Data = getData(OpI, Ln); 1971 if (Data.APO != OpAPO || Data.IsUsed) 1972 continue; 1973 if (Data.V == Op) { 1974 FoundCandidate = true; 1975 Data.IsUsed = true; 1976 break; 1977 } 1978 } 1979 if (!FoundCandidate) 1980 return false; 1981 } 1982 return true; 1983 } 1984 1985 public: 1986 /// Initialize with all the operands of the instruction vector \p RootVL. 1987 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI, 1988 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) 1989 : TLI(TLI), DL(DL), SE(SE), R(R) { 1990 // Append all the operands of RootVL. 1991 appendOperandsOfVL(RootVL); 1992 } 1993 1994 /// \Returns a value vector with the operands across all lanes for the 1995 /// opearnd at \p OpIdx. 1996 ValueList getVL(unsigned OpIdx) const { 1997 ValueList OpVL(OpsVec[OpIdx].size()); 1998 assert(OpsVec[OpIdx].size() == getNumLanes() && 1999 "Expected same num of lanes across all operands"); 2000 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 2001 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 2002 return OpVL; 2003 } 2004 2005 // Performs operand reordering for 2 or more operands. 2006 // The original operands are in OrigOps[OpIdx][Lane]. 2007 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 2008 void reorder() { 2009 unsigned NumOperands = getNumOperands(); 2010 unsigned NumLanes = getNumLanes(); 2011 // Each operand has its own mode. We are using this mode to help us select 2012 // the instructions for each lane, so that they match best with the ones 2013 // we have selected so far. 2014 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 2015 2016 // This is a greedy single-pass algorithm. We are going over each lane 2017 // once and deciding on the best order right away with no back-tracking. 2018 // However, in order to increase its effectiveness, we start with the lane 2019 // that has operands that can move the least. For example, given the 2020 // following lanes: 2021 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 2022 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 2023 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 2024 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 2025 // we will start at Lane 1, since the operands of the subtraction cannot 2026 // be reordered. Then we will visit the rest of the lanes in a circular 2027 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 2028 2029 // Find the first lane that we will start our search from. 2030 unsigned FirstLane = getBestLaneToStartReordering(); 2031 2032 // Initialize the modes. 2033 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2034 Value *OpLane0 = getValue(OpIdx, FirstLane); 2035 // Keep track if we have instructions with all the same opcode on one 2036 // side. 2037 if (isa<LoadInst>(OpLane0)) 2038 ReorderingModes[OpIdx] = ReorderingMode::Load; 2039 else if (isa<Instruction>(OpLane0)) { 2040 // Check if OpLane0 should be broadcast. 2041 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 2042 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2043 else 2044 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 2045 } 2046 else if (isa<Constant>(OpLane0)) 2047 ReorderingModes[OpIdx] = ReorderingMode::Constant; 2048 else if (isa<Argument>(OpLane0)) 2049 // Our best hope is a Splat. It may save some cost in some cases. 2050 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2051 else 2052 // NOTE: This should be unreachable. 2053 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2054 } 2055 2056 // Check that we don't have same operands. No need to reorder if operands 2057 // are just perfect diamond or shuffled diamond match. Do not do it only 2058 // for possible broadcasts or non-power of 2 number of scalars (just for 2059 // now). 2060 auto &&SkipReordering = [this]() { 2061 SmallPtrSet<Value *, 4> UniqueValues; 2062 ArrayRef<OperandData> Op0 = OpsVec.front(); 2063 for (const OperandData &Data : Op0) 2064 UniqueValues.insert(Data.V); 2065 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 2066 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 2067 return !UniqueValues.contains(Data.V); 2068 })) 2069 return false; 2070 } 2071 // TODO: Check if we can remove a check for non-power-2 number of 2072 // scalars after full support of non-power-2 vectorization. 2073 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 2074 }; 2075 2076 // If the initial strategy fails for any of the operand indexes, then we 2077 // perform reordering again in a second pass. This helps avoid assigning 2078 // high priority to the failed strategy, and should improve reordering for 2079 // the non-failed operand indexes. 2080 for (int Pass = 0; Pass != 2; ++Pass) { 2081 // Check if no need to reorder operands since they're are perfect or 2082 // shuffled diamond match. 2083 // Need to do it to avoid extra external use cost counting for 2084 // shuffled matches, which may cause regressions. 2085 if (SkipReordering()) 2086 break; 2087 // Skip the second pass if the first pass did not fail. 2088 bool StrategyFailed = false; 2089 // Mark all operand data as free to use. 2090 clearUsed(); 2091 // We keep the original operand order for the FirstLane, so reorder the 2092 // rest of the lanes. We are visiting the nodes in a circular fashion, 2093 // using FirstLane as the center point and increasing the radius 2094 // distance. 2095 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 2096 for (unsigned I = 0; I < NumOperands; ++I) 2097 MainAltOps[I].push_back(getData(I, FirstLane).V); 2098 2099 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 2100 // Visit the lane on the right and then the lane on the left. 2101 for (int Direction : {+1, -1}) { 2102 int Lane = FirstLane + Direction * Distance; 2103 if (Lane < 0 || Lane >= (int)NumLanes) 2104 continue; 2105 int LastLane = Lane - Direction; 2106 assert(LastLane >= 0 && LastLane < (int)NumLanes && 2107 "Out of bounds"); 2108 // Look for a good match for each operand. 2109 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2110 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 2111 std::optional<unsigned> BestIdx = getBestOperand( 2112 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 2113 // By not selecting a value, we allow the operands that follow to 2114 // select a better matching value. We will get a non-null value in 2115 // the next run of getBestOperand(). 2116 if (BestIdx) { 2117 // Swap the current operand with the one returned by 2118 // getBestOperand(). 2119 swap(OpIdx, *BestIdx, Lane); 2120 } else { 2121 // We failed to find a best operand, set mode to 'Failed'. 2122 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2123 // Enable the second pass. 2124 StrategyFailed = true; 2125 } 2126 // Try to get the alternate opcode and follow it during analysis. 2127 if (MainAltOps[OpIdx].size() != 2) { 2128 OperandData &AltOp = getData(OpIdx, Lane); 2129 InstructionsState OpS = 2130 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI); 2131 if (OpS.getOpcode() && OpS.isAltShuffle()) 2132 MainAltOps[OpIdx].push_back(AltOp.V); 2133 } 2134 } 2135 } 2136 } 2137 // Skip second pass if the strategy did not fail. 2138 if (!StrategyFailed) 2139 break; 2140 } 2141 } 2142 2143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2144 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 2145 switch (RMode) { 2146 case ReorderingMode::Load: 2147 return "Load"; 2148 case ReorderingMode::Opcode: 2149 return "Opcode"; 2150 case ReorderingMode::Constant: 2151 return "Constant"; 2152 case ReorderingMode::Splat: 2153 return "Splat"; 2154 case ReorderingMode::Failed: 2155 return "Failed"; 2156 } 2157 llvm_unreachable("Unimplemented Reordering Type"); 2158 } 2159 2160 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 2161 raw_ostream &OS) { 2162 return OS << getModeStr(RMode); 2163 } 2164 2165 /// Debug print. 2166 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 2167 printMode(RMode, dbgs()); 2168 } 2169 2170 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 2171 return printMode(RMode, OS); 2172 } 2173 2174 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 2175 const unsigned Indent = 2; 2176 unsigned Cnt = 0; 2177 for (const OperandDataVec &OpDataVec : OpsVec) { 2178 OS << "Operand " << Cnt++ << "\n"; 2179 for (const OperandData &OpData : OpDataVec) { 2180 OS.indent(Indent) << "{"; 2181 if (Value *V = OpData.V) 2182 OS << *V; 2183 else 2184 OS << "null"; 2185 OS << ", APO:" << OpData.APO << "}\n"; 2186 } 2187 OS << "\n"; 2188 } 2189 return OS; 2190 } 2191 2192 /// Debug print. 2193 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2194 #endif 2195 }; 2196 2197 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2198 /// for a pair which have highest score deemed to have best chance to form 2199 /// root of profitable tree to vectorize. Return std::nullopt if no candidate 2200 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit 2201 /// of the cost, considered to be good enough score. 2202 std::optional<int> 2203 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2204 int Limit = LookAheadHeuristics::ScoreFail) { 2205 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2, 2206 RootLookAheadMaxDepth); 2207 int BestScore = Limit; 2208 std::optional<int> Index; 2209 for (int I : seq<int>(0, Candidates.size())) { 2210 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2211 Candidates[I].second, 2212 /*U1=*/nullptr, /*U2=*/nullptr, 2213 /*Level=*/1, std::nullopt); 2214 if (Score > BestScore) { 2215 BestScore = Score; 2216 Index = I; 2217 } 2218 } 2219 return Index; 2220 } 2221 2222 /// Checks if the instruction is marked for deletion. 2223 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2224 2225 /// Removes an instruction from its block and eventually deletes it. 2226 /// It's like Instruction::eraseFromParent() except that the actual deletion 2227 /// is delayed until BoUpSLP is destructed. 2228 void eraseInstruction(Instruction *I) { 2229 DeletedInstructions.insert(I); 2230 } 2231 2232 /// Checks if the instruction was already analyzed for being possible 2233 /// reduction root. 2234 bool isAnalyzedReductionRoot(Instruction *I) const { 2235 return AnalyzedReductionsRoots.count(I); 2236 } 2237 /// Register given instruction as already analyzed for being possible 2238 /// reduction root. 2239 void analyzedReductionRoot(Instruction *I) { 2240 AnalyzedReductionsRoots.insert(I); 2241 } 2242 /// Checks if the provided list of reduced values was checked already for 2243 /// vectorization. 2244 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const { 2245 return AnalyzedReductionVals.contains(hash_value(VL)); 2246 } 2247 /// Adds the list of reduced values to list of already checked values for the 2248 /// vectorization. 2249 void analyzedReductionVals(ArrayRef<Value *> VL) { 2250 AnalyzedReductionVals.insert(hash_value(VL)); 2251 } 2252 /// Clear the list of the analyzed reduction root instructions. 2253 void clearReductionData() { 2254 AnalyzedReductionsRoots.clear(); 2255 AnalyzedReductionVals.clear(); 2256 } 2257 /// Checks if the given value is gathered in one of the nodes. 2258 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2259 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2260 } 2261 2262 /// Check if the value is vectorized in the tree. 2263 bool isVectorized(Value *V) const { return getTreeEntry(V); } 2264 2265 ~BoUpSLP(); 2266 2267 private: 2268 /// Determine if a vectorized value \p V in can be demoted to 2269 /// a smaller type with a truncation. We collect the values that will be 2270 /// demoted in ToDemote and additional roots that require investigating in 2271 /// Roots. 2272 /// \param DemotedConsts list of Instruction/OperandIndex pairs that are 2273 /// constant and to be demoted. Required to correctly identify constant nodes 2274 /// to be demoted. 2275 bool collectValuesToDemote( 2276 Value *V, SmallVectorImpl<Value *> &ToDemote, 2277 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 2278 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const; 2279 2280 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2281 /// reordering (i.e. the operands can be reordered because they have only one 2282 /// user and reordarable). 2283 /// \param ReorderableGathers List of all gather nodes that require reordering 2284 /// (e.g., gather of extractlements or partially vectorizable loads). 2285 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2286 /// reordering, subset of \p NonVectorized. 2287 bool 2288 canReorderOperands(TreeEntry *UserTE, 2289 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2290 ArrayRef<TreeEntry *> ReorderableGathers, 2291 SmallVectorImpl<TreeEntry *> &GatherOps); 2292 2293 /// Checks if the given \p TE is a gather node with clustered reused scalars 2294 /// and reorders it per given \p Mask. 2295 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const; 2296 2297 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2298 /// if any. If it is not vectorized (gather node), returns nullptr. 2299 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2300 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2301 TreeEntry *TE = nullptr; 2302 const auto *It = find_if(VL, [&](Value *V) { 2303 TE = getTreeEntry(V); 2304 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) 2305 return true; 2306 auto It = MultiNodeScalars.find(V); 2307 if (It != MultiNodeScalars.end()) { 2308 for (TreeEntry *E : It->second) { 2309 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) { 2310 TE = E; 2311 return true; 2312 } 2313 } 2314 } 2315 return false; 2316 }); 2317 if (It != VL.end()) { 2318 assert(TE->isSame(VL) && "Expected same scalars."); 2319 return TE; 2320 } 2321 return nullptr; 2322 } 2323 2324 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2325 /// if any. If it is not vectorized (gather node), returns nullptr. 2326 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2327 unsigned OpIdx) const { 2328 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2329 const_cast<TreeEntry *>(UserTE), OpIdx); 2330 } 2331 2332 /// Checks if all users of \p I are the part of the vectorization tree. 2333 bool areAllUsersVectorized( 2334 Instruction *I, 2335 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const; 2336 2337 /// Return information about the vector formed for the specified index 2338 /// of a vector of (the same) instruction. 2339 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops); 2340 2341 /// \ returns the graph entry for the \p Idx operand of the \p E entry. 2342 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const; 2343 2344 /// \returns the cost of the vectorizable entry. 2345 InstructionCost getEntryCost(const TreeEntry *E, 2346 ArrayRef<Value *> VectorizedVals, 2347 SmallPtrSetImpl<Value *> &CheckedExtracts); 2348 2349 /// This is the recursive part of buildTree. 2350 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2351 const EdgeInfo &EI); 2352 2353 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2354 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2355 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2356 /// returns false, setting \p CurrentOrder to either an empty vector or a 2357 /// non-identity permutation that allows to reuse extract instructions. 2358 /// \param ResizeAllowed indicates whether it is allowed to handle subvector 2359 /// extract order. 2360 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2361 SmallVectorImpl<unsigned> &CurrentOrder, 2362 bool ResizeAllowed = false) const; 2363 2364 /// Vectorize a single entry in the tree. 2365 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2366 /// avoid issues with def-use order. 2367 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs); 2368 2369 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry 2370 /// \p E. 2371 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2372 /// avoid issues with def-use order. 2373 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs); 2374 2375 /// Create a new vector from a list of scalar values. Produces a sequence 2376 /// which exploits values reused across lanes, and arranges the inserts 2377 /// for ease of later optimization. 2378 template <typename BVTy, typename ResTy, typename... Args> 2379 ResTy processBuildVector(const TreeEntry *E, Args &...Params); 2380 2381 /// Create a new vector from a list of scalar values. Produces a sequence 2382 /// which exploits values reused across lanes, and arranges the inserts 2383 /// for ease of later optimization. 2384 Value *createBuildVector(const TreeEntry *E); 2385 2386 /// Returns the instruction in the bundle, which can be used as a base point 2387 /// for scheduling. Usually it is the last instruction in the bundle, except 2388 /// for the case when all operands are external (in this case, it is the first 2389 /// instruction in the list). 2390 Instruction &getLastInstructionInBundle(const TreeEntry *E); 2391 2392 /// Tries to find extractelement instructions with constant indices from fixed 2393 /// vector type and gather such instructions into a bunch, which highly likely 2394 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2395 /// was successful, the matched scalars are replaced by poison values in \p VL 2396 /// for future analysis. 2397 std::optional<TargetTransformInfo::ShuffleKind> 2398 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL, 2399 SmallVectorImpl<int> &Mask) const; 2400 2401 /// Tries to find extractelement instructions with constant indices from fixed 2402 /// vector type and gather such instructions into a bunch, which highly likely 2403 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2404 /// was successful, the matched scalars are replaced by poison values in \p VL 2405 /// for future analysis. 2406 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2407 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 2408 SmallVectorImpl<int> &Mask, 2409 unsigned NumParts) const; 2410 2411 /// Checks if the gathered \p VL can be represented as a single register 2412 /// shuffle(s) of previous tree entries. 2413 /// \param TE Tree entry checked for permutation. 2414 /// \param VL List of scalars (a subset of the TE scalar), checked for 2415 /// permutations. Must form single-register vector. 2416 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2417 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask. 2418 std::optional<TargetTransformInfo::ShuffleKind> 2419 isGatherShuffledSingleRegisterEntry( 2420 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 2421 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part); 2422 2423 /// Checks if the gathered \p VL can be represented as multi-register 2424 /// shuffle(s) of previous tree entries. 2425 /// \param TE Tree entry checked for permutation. 2426 /// \param VL List of scalars (a subset of the TE scalar), checked for 2427 /// permutations. 2428 /// \returns per-register series of ShuffleKind, if gathered values can be 2429 /// represented as shuffles of previous tree entries. \p Mask is filled with 2430 /// the shuffle mask (also on per-register base). 2431 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2432 isGatherShuffledEntry( 2433 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 2434 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 2435 unsigned NumParts); 2436 2437 /// \returns the scalarization cost for this list of values. Assuming that 2438 /// this subtree gets vectorized, we may need to extract the values from the 2439 /// roots. This method calculates the cost of extracting the values. 2440 /// \param ForPoisonSrc true if initial vector is poison, false otherwise. 2441 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc) const; 2442 2443 /// Set the Builder insert point to one after the last instruction in 2444 /// the bundle 2445 void setInsertPointAfterBundle(const TreeEntry *E); 2446 2447 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not 2448 /// specified, the starting vector value is poison. 2449 Value *gather(ArrayRef<Value *> VL, Value *Root); 2450 2451 /// \returns whether the VectorizableTree is fully vectorizable and will 2452 /// be beneficial even the tree height is tiny. 2453 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2454 2455 /// Reorder commutative or alt operands to get better probability of 2456 /// generating vectorized code. 2457 static void reorderInputsAccordingToOpcode( 2458 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 2459 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 2460 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R); 2461 2462 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2463 /// users of \p TE and collects the stores. It returns the map from the store 2464 /// pointers to the collected stores. 2465 DenseMap<Value *, SmallVector<StoreInst *>> 2466 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2467 2468 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2469 /// stores in \p StoresVec can form a vector instruction. If so it returns 2470 /// true and populates \p ReorderIndices with the shuffle indices of the 2471 /// stores when compared to the sorted vector. 2472 bool canFormVector(ArrayRef<StoreInst *> StoresVec, 2473 OrdersType &ReorderIndices) const; 2474 2475 /// Iterates through the users of \p TE, looking for scalar stores that can be 2476 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2477 /// their order and builds an order index vector for each store bundle. It 2478 /// returns all these order vectors found. 2479 /// We run this after the tree has formed, otherwise we may come across user 2480 /// instructions that are not yet in the tree. 2481 SmallVector<OrdersType, 1> 2482 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2483 2484 struct TreeEntry { 2485 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2486 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2487 2488 /// \returns Common mask for reorder indices and reused scalars. 2489 SmallVector<int> getCommonMask() const { 2490 SmallVector<int> Mask; 2491 inversePermutation(ReorderIndices, Mask); 2492 ::addMask(Mask, ReuseShuffleIndices); 2493 return Mask; 2494 } 2495 2496 /// \returns true if the scalars in VL are equal to this entry. 2497 bool isSame(ArrayRef<Value *> VL) const { 2498 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2499 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2500 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2501 return VL.size() == Mask.size() && 2502 std::equal(VL.begin(), VL.end(), Mask.begin(), 2503 [Scalars](Value *V, int Idx) { 2504 return (isa<UndefValue>(V) && 2505 Idx == PoisonMaskElem) || 2506 (Idx != PoisonMaskElem && V == Scalars[Idx]); 2507 }); 2508 }; 2509 if (!ReorderIndices.empty()) { 2510 // TODO: implement matching if the nodes are just reordered, still can 2511 // treat the vector as the same if the list of scalars matches VL 2512 // directly, without reordering. 2513 SmallVector<int> Mask; 2514 inversePermutation(ReorderIndices, Mask); 2515 if (VL.size() == Scalars.size()) 2516 return IsSame(Scalars, Mask); 2517 if (VL.size() == ReuseShuffleIndices.size()) { 2518 ::addMask(Mask, ReuseShuffleIndices); 2519 return IsSame(Scalars, Mask); 2520 } 2521 return false; 2522 } 2523 return IsSame(Scalars, ReuseShuffleIndices); 2524 } 2525 2526 bool isOperandGatherNode(const EdgeInfo &UserEI) const { 2527 return State == TreeEntry::NeedToGather && 2528 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx && 2529 UserTreeIndices.front().UserTE == UserEI.UserTE; 2530 } 2531 2532 /// \returns true if current entry has same operands as \p TE. 2533 bool hasEqualOperands(const TreeEntry &TE) const { 2534 if (TE.getNumOperands() != getNumOperands()) 2535 return false; 2536 SmallBitVector Used(getNumOperands()); 2537 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2538 unsigned PrevCount = Used.count(); 2539 for (unsigned K = 0; K < E; ++K) { 2540 if (Used.test(K)) 2541 continue; 2542 if (getOperand(K) == TE.getOperand(I)) { 2543 Used.set(K); 2544 break; 2545 } 2546 } 2547 // Check if we actually found the matching operand. 2548 if (PrevCount == Used.count()) 2549 return false; 2550 } 2551 return true; 2552 } 2553 2554 /// \return Final vectorization factor for the node. Defined by the total 2555 /// number of vectorized scalars, including those, used several times in the 2556 /// entry and counted in the \a ReuseShuffleIndices, if any. 2557 unsigned getVectorFactor() const { 2558 if (!ReuseShuffleIndices.empty()) 2559 return ReuseShuffleIndices.size(); 2560 return Scalars.size(); 2561 }; 2562 2563 /// A vector of scalars. 2564 ValueList Scalars; 2565 2566 /// The Scalars are vectorized into this value. It is initialized to Null. 2567 WeakTrackingVH VectorizedValue = nullptr; 2568 2569 /// New vector phi instructions emitted for the vectorized phi nodes. 2570 PHINode *PHI = nullptr; 2571 2572 /// Do we need to gather this sequence or vectorize it 2573 /// (either with vector instruction or with scatter/gather 2574 /// intrinsics for store/load)? 2575 enum EntryState { 2576 Vectorize, 2577 ScatterVectorize, 2578 PossibleStridedVectorize, 2579 NeedToGather 2580 }; 2581 EntryState State; 2582 2583 /// Does this sequence require some shuffling? 2584 SmallVector<int, 4> ReuseShuffleIndices; 2585 2586 /// Does this entry require reordering? 2587 SmallVector<unsigned, 4> ReorderIndices; 2588 2589 /// Points back to the VectorizableTree. 2590 /// 2591 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2592 /// to be a pointer and needs to be able to initialize the child iterator. 2593 /// Thus we need a reference back to the container to translate the indices 2594 /// to entries. 2595 VecTreeTy &Container; 2596 2597 /// The TreeEntry index containing the user of this entry. We can actually 2598 /// have multiple users so the data structure is not truly a tree. 2599 SmallVector<EdgeInfo, 1> UserTreeIndices; 2600 2601 /// The index of this treeEntry in VectorizableTree. 2602 int Idx = -1; 2603 2604 private: 2605 /// The operands of each instruction in each lane Operands[op_index][lane]. 2606 /// Note: This helps avoid the replication of the code that performs the 2607 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2608 SmallVector<ValueList, 2> Operands; 2609 2610 /// The main/alternate instruction. 2611 Instruction *MainOp = nullptr; 2612 Instruction *AltOp = nullptr; 2613 2614 public: 2615 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2616 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2617 if (Operands.size() < OpIdx + 1) 2618 Operands.resize(OpIdx + 1); 2619 assert(Operands[OpIdx].empty() && "Already resized?"); 2620 assert(OpVL.size() <= Scalars.size() && 2621 "Number of operands is greater than the number of scalars."); 2622 Operands[OpIdx].resize(OpVL.size()); 2623 copy(OpVL, Operands[OpIdx].begin()); 2624 } 2625 2626 /// Set the operands of this bundle in their original order. 2627 void setOperandsInOrder() { 2628 assert(Operands.empty() && "Already initialized?"); 2629 auto *I0 = cast<Instruction>(Scalars[0]); 2630 Operands.resize(I0->getNumOperands()); 2631 unsigned NumLanes = Scalars.size(); 2632 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2633 OpIdx != NumOperands; ++OpIdx) { 2634 Operands[OpIdx].resize(NumLanes); 2635 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2636 auto *I = cast<Instruction>(Scalars[Lane]); 2637 assert(I->getNumOperands() == NumOperands && 2638 "Expected same number of operands"); 2639 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2640 } 2641 } 2642 } 2643 2644 /// Reorders operands of the node to the given mask \p Mask. 2645 void reorderOperands(ArrayRef<int> Mask) { 2646 for (ValueList &Operand : Operands) 2647 reorderScalars(Operand, Mask); 2648 } 2649 2650 /// \returns the \p OpIdx operand of this TreeEntry. 2651 ValueList &getOperand(unsigned OpIdx) { 2652 assert(OpIdx < Operands.size() && "Off bounds"); 2653 return Operands[OpIdx]; 2654 } 2655 2656 /// \returns the \p OpIdx operand of this TreeEntry. 2657 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2658 assert(OpIdx < Operands.size() && "Off bounds"); 2659 return Operands[OpIdx]; 2660 } 2661 2662 /// \returns the number of operands. 2663 unsigned getNumOperands() const { return Operands.size(); } 2664 2665 /// \return the single \p OpIdx operand. 2666 Value *getSingleOperand(unsigned OpIdx) const { 2667 assert(OpIdx < Operands.size() && "Off bounds"); 2668 assert(!Operands[OpIdx].empty() && "No operand available"); 2669 return Operands[OpIdx][0]; 2670 } 2671 2672 /// Some of the instructions in the list have alternate opcodes. 2673 bool isAltShuffle() const { return MainOp != AltOp; } 2674 2675 bool isOpcodeOrAlt(Instruction *I) const { 2676 unsigned CheckedOpcode = I->getOpcode(); 2677 return (getOpcode() == CheckedOpcode || 2678 getAltOpcode() == CheckedOpcode); 2679 } 2680 2681 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2682 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2683 /// \p OpValue. 2684 Value *isOneOf(Value *Op) const { 2685 auto *I = dyn_cast<Instruction>(Op); 2686 if (I && isOpcodeOrAlt(I)) 2687 return Op; 2688 return MainOp; 2689 } 2690 2691 void setOperations(const InstructionsState &S) { 2692 MainOp = S.MainOp; 2693 AltOp = S.AltOp; 2694 } 2695 2696 Instruction *getMainOp() const { 2697 return MainOp; 2698 } 2699 2700 Instruction *getAltOp() const { 2701 return AltOp; 2702 } 2703 2704 /// The main/alternate opcodes for the list of instructions. 2705 unsigned getOpcode() const { 2706 return MainOp ? MainOp->getOpcode() : 0; 2707 } 2708 2709 unsigned getAltOpcode() const { 2710 return AltOp ? AltOp->getOpcode() : 0; 2711 } 2712 2713 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2714 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2715 int findLaneForValue(Value *V) const { 2716 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2717 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2718 if (!ReorderIndices.empty()) 2719 FoundLane = ReorderIndices[FoundLane]; 2720 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2721 if (!ReuseShuffleIndices.empty()) { 2722 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2723 find(ReuseShuffleIndices, FoundLane)); 2724 } 2725 return FoundLane; 2726 } 2727 2728 /// Build a shuffle mask for graph entry which represents a merge of main 2729 /// and alternate operations. 2730 void 2731 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp, 2732 SmallVectorImpl<int> &Mask, 2733 SmallVectorImpl<Value *> *OpScalars = nullptr, 2734 SmallVectorImpl<Value *> *AltScalars = nullptr) const; 2735 2736 #ifndef NDEBUG 2737 /// Debug printer. 2738 LLVM_DUMP_METHOD void dump() const { 2739 dbgs() << Idx << ".\n"; 2740 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2741 dbgs() << "Operand " << OpI << ":\n"; 2742 for (const Value *V : Operands[OpI]) 2743 dbgs().indent(2) << *V << "\n"; 2744 } 2745 dbgs() << "Scalars: \n"; 2746 for (Value *V : Scalars) 2747 dbgs().indent(2) << *V << "\n"; 2748 dbgs() << "State: "; 2749 switch (State) { 2750 case Vectorize: 2751 dbgs() << "Vectorize\n"; 2752 break; 2753 case ScatterVectorize: 2754 dbgs() << "ScatterVectorize\n"; 2755 break; 2756 case PossibleStridedVectorize: 2757 dbgs() << "PossibleStridedVectorize\n"; 2758 break; 2759 case NeedToGather: 2760 dbgs() << "NeedToGather\n"; 2761 break; 2762 } 2763 dbgs() << "MainOp: "; 2764 if (MainOp) 2765 dbgs() << *MainOp << "\n"; 2766 else 2767 dbgs() << "NULL\n"; 2768 dbgs() << "AltOp: "; 2769 if (AltOp) 2770 dbgs() << *AltOp << "\n"; 2771 else 2772 dbgs() << "NULL\n"; 2773 dbgs() << "VectorizedValue: "; 2774 if (VectorizedValue) 2775 dbgs() << *VectorizedValue << "\n"; 2776 else 2777 dbgs() << "NULL\n"; 2778 dbgs() << "ReuseShuffleIndices: "; 2779 if (ReuseShuffleIndices.empty()) 2780 dbgs() << "Empty"; 2781 else 2782 for (int ReuseIdx : ReuseShuffleIndices) 2783 dbgs() << ReuseIdx << ", "; 2784 dbgs() << "\n"; 2785 dbgs() << "ReorderIndices: "; 2786 for (unsigned ReorderIdx : ReorderIndices) 2787 dbgs() << ReorderIdx << ", "; 2788 dbgs() << "\n"; 2789 dbgs() << "UserTreeIndices: "; 2790 for (const auto &EInfo : UserTreeIndices) 2791 dbgs() << EInfo << ", "; 2792 dbgs() << "\n"; 2793 } 2794 #endif 2795 }; 2796 2797 #ifndef NDEBUG 2798 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2799 InstructionCost VecCost, InstructionCost ScalarCost, 2800 StringRef Banner) const { 2801 dbgs() << "SLP: " << Banner << ":\n"; 2802 E->dump(); 2803 dbgs() << "SLP: Costs:\n"; 2804 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2805 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2806 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2807 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " 2808 << ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2809 } 2810 #endif 2811 2812 /// Create a new VectorizableTree entry. 2813 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2814 std::optional<ScheduleData *> Bundle, 2815 const InstructionsState &S, 2816 const EdgeInfo &UserTreeIdx, 2817 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2818 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2819 TreeEntry::EntryState EntryState = 2820 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2821 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2822 ReuseShuffleIndices, ReorderIndices); 2823 } 2824 2825 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2826 TreeEntry::EntryState EntryState, 2827 std::optional<ScheduleData *> Bundle, 2828 const InstructionsState &S, 2829 const EdgeInfo &UserTreeIdx, 2830 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2831 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2832 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2833 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2834 "Need to vectorize gather entry?"); 2835 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2836 TreeEntry *Last = VectorizableTree.back().get(); 2837 Last->Idx = VectorizableTree.size() - 1; 2838 Last->State = EntryState; 2839 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2840 ReuseShuffleIndices.end()); 2841 if (ReorderIndices.empty()) { 2842 Last->Scalars.assign(VL.begin(), VL.end()); 2843 Last->setOperations(S); 2844 } else { 2845 // Reorder scalars and build final mask. 2846 Last->Scalars.assign(VL.size(), nullptr); 2847 transform(ReorderIndices, Last->Scalars.begin(), 2848 [VL](unsigned Idx) -> Value * { 2849 if (Idx >= VL.size()) 2850 return UndefValue::get(VL.front()->getType()); 2851 return VL[Idx]; 2852 }); 2853 InstructionsState S = getSameOpcode(Last->Scalars, *TLI); 2854 Last->setOperations(S); 2855 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2856 } 2857 if (Last->State != TreeEntry::NeedToGather) { 2858 for (Value *V : VL) { 2859 const TreeEntry *TE = getTreeEntry(V); 2860 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) && 2861 "Scalar already in tree!"); 2862 if (TE) { 2863 if (TE != Last) 2864 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last); 2865 continue; 2866 } 2867 ScalarToTreeEntry[V] = Last; 2868 } 2869 // Update the scheduler bundle to point to this TreeEntry. 2870 ScheduleData *BundleMember = *Bundle; 2871 assert((BundleMember || isa<PHINode>(S.MainOp) || 2872 isVectorLikeInstWithConstOps(S.MainOp) || 2873 doesNotNeedToSchedule(VL)) && 2874 "Bundle and VL out of sync"); 2875 if (BundleMember) { 2876 for (Value *V : VL) { 2877 if (doesNotNeedToBeScheduled(V)) 2878 continue; 2879 if (!BundleMember) 2880 continue; 2881 BundleMember->TE = Last; 2882 BundleMember = BundleMember->NextInBundle; 2883 } 2884 } 2885 assert(!BundleMember && "Bundle and VL out of sync"); 2886 } else { 2887 MustGather.insert(VL.begin(), VL.end()); 2888 // Build a map for gathered scalars to the nodes where they are used. 2889 for (Value *V : VL) 2890 if (!isConstant(V)) 2891 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last); 2892 } 2893 2894 if (UserTreeIdx.UserTE) 2895 Last->UserTreeIndices.push_back(UserTreeIdx); 2896 2897 return Last; 2898 } 2899 2900 /// -- Vectorization State -- 2901 /// Holds all of the tree entries. 2902 TreeEntry::VecTreeTy VectorizableTree; 2903 2904 #ifndef NDEBUG 2905 /// Debug printer. 2906 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2907 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2908 VectorizableTree[Id]->dump(); 2909 dbgs() << "\n"; 2910 } 2911 } 2912 #endif 2913 2914 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2915 2916 const TreeEntry *getTreeEntry(Value *V) const { 2917 return ScalarToTreeEntry.lookup(V); 2918 } 2919 2920 /// Checks if the specified list of the instructions/values can be vectorized 2921 /// and fills required data before actual scheduling of the instructions. 2922 TreeEntry::EntryState getScalarsVectorizationState( 2923 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 2924 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const; 2925 2926 /// Maps a specific scalar to its tree entry. 2927 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry; 2928 2929 /// List of scalars, used in several vectorize nodes, and the list of the 2930 /// nodes. 2931 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars; 2932 2933 /// Maps a value to the proposed vectorizable size. 2934 SmallDenseMap<Value *, unsigned> InstrElementSize; 2935 2936 /// A list of scalars that we found that we need to keep as scalars. 2937 ValueSet MustGather; 2938 2939 /// A map between the vectorized entries and the last instructions in the 2940 /// bundles. The bundles are built in use order, not in the def order of the 2941 /// instructions. So, we cannot rely directly on the last instruction in the 2942 /// bundle being the last instruction in the program order during 2943 /// vectorization process since the basic blocks are affected, need to 2944 /// pre-gather them before. 2945 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction; 2946 2947 /// List of gather nodes, depending on other gather/vector nodes, which should 2948 /// be emitted after the vector instruction emission process to correctly 2949 /// handle order of the vector instructions and shuffles. 2950 SetVector<const TreeEntry *> PostponedGathers; 2951 2952 using ValueToGatherNodesMap = 2953 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>; 2954 ValueToGatherNodesMap ValueToGatherNodes; 2955 2956 /// This POD struct describes one external user in the vectorized tree. 2957 struct ExternalUser { 2958 ExternalUser(Value *S, llvm::User *U, int L) 2959 : Scalar(S), User(U), Lane(L) {} 2960 2961 // Which scalar in our function. 2962 Value *Scalar; 2963 2964 // Which user that uses the scalar. 2965 llvm::User *User; 2966 2967 // Which lane does the scalar belong to. 2968 int Lane; 2969 }; 2970 using UserList = SmallVector<ExternalUser, 16>; 2971 2972 /// Checks if two instructions may access the same memory. 2973 /// 2974 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2975 /// is invariant in the calling loop. 2976 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2977 Instruction *Inst2) { 2978 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2)) 2979 return true; 2980 // First check if the result is already in the cache. 2981 AliasCacheKey Key = std::make_pair(Inst1, Inst2); 2982 auto It = AliasCache.find(Key); 2983 if (It != AliasCache.end()) 2984 return It->second; 2985 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2986 // Store the result in the cache. 2987 AliasCache.try_emplace(Key, Aliased); 2988 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased); 2989 return Aliased; 2990 } 2991 2992 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2993 2994 /// Cache for alias results. 2995 /// TODO: consider moving this to the AliasAnalysis itself. 2996 DenseMap<AliasCacheKey, bool> AliasCache; 2997 2998 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2999 // globally through SLP because we don't perform any action which 3000 // invalidates capture results. 3001 BatchAAResults BatchAA; 3002 3003 /// Temporary store for deleted instructions. Instructions will be deleted 3004 /// eventually when the BoUpSLP is destructed. The deferral is required to 3005 /// ensure that there are no incorrect collisions in the AliasCache, which 3006 /// can happen if a new instruction is allocated at the same address as a 3007 /// previously deleted instruction. 3008 DenseSet<Instruction *> DeletedInstructions; 3009 3010 /// Set of the instruction, being analyzed already for reductions. 3011 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 3012 3013 /// Set of hashes for the list of reduction values already being analyzed. 3014 DenseSet<size_t> AnalyzedReductionVals; 3015 3016 /// A list of values that need to extracted out of the tree. 3017 /// This list holds pairs of (Internal Scalar : External User). External User 3018 /// can be nullptr, it means that this Internal Scalar will be used later, 3019 /// after vectorization. 3020 UserList ExternalUses; 3021 3022 /// Values used only by @llvm.assume calls. 3023 SmallPtrSet<const Value *, 32> EphValues; 3024 3025 /// Holds all of the instructions that we gathered, shuffle instructions and 3026 /// extractelements. 3027 SetVector<Instruction *> GatherShuffleExtractSeq; 3028 3029 /// A list of blocks that we are going to CSE. 3030 DenseSet<BasicBlock *> CSEBlocks; 3031 3032 /// Contains all scheduling relevant data for an instruction. 3033 /// A ScheduleData either represents a single instruction or a member of an 3034 /// instruction bundle (= a group of instructions which is combined into a 3035 /// vector instruction). 3036 struct ScheduleData { 3037 // The initial value for the dependency counters. It means that the 3038 // dependencies are not calculated yet. 3039 enum { InvalidDeps = -1 }; 3040 3041 ScheduleData() = default; 3042 3043 void init(int BlockSchedulingRegionID, Value *OpVal) { 3044 FirstInBundle = this; 3045 NextInBundle = nullptr; 3046 NextLoadStore = nullptr; 3047 IsScheduled = false; 3048 SchedulingRegionID = BlockSchedulingRegionID; 3049 clearDependencies(); 3050 OpValue = OpVal; 3051 TE = nullptr; 3052 } 3053 3054 /// Verify basic self consistency properties 3055 void verify() { 3056 if (hasValidDependencies()) { 3057 assert(UnscheduledDeps <= Dependencies && "invariant"); 3058 } else { 3059 assert(UnscheduledDeps == Dependencies && "invariant"); 3060 } 3061 3062 if (IsScheduled) { 3063 assert(isSchedulingEntity() && 3064 "unexpected scheduled state"); 3065 for (const ScheduleData *BundleMember = this; BundleMember; 3066 BundleMember = BundleMember->NextInBundle) { 3067 assert(BundleMember->hasValidDependencies() && 3068 BundleMember->UnscheduledDeps == 0 && 3069 "unexpected scheduled state"); 3070 assert((BundleMember == this || !BundleMember->IsScheduled) && 3071 "only bundle is marked scheduled"); 3072 } 3073 } 3074 3075 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 3076 "all bundle members must be in same basic block"); 3077 } 3078 3079 /// Returns true if the dependency information has been calculated. 3080 /// Note that depenendency validity can vary between instructions within 3081 /// a single bundle. 3082 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 3083 3084 /// Returns true for single instructions and for bundle representatives 3085 /// (= the head of a bundle). 3086 bool isSchedulingEntity() const { return FirstInBundle == this; } 3087 3088 /// Returns true if it represents an instruction bundle and not only a 3089 /// single instruction. 3090 bool isPartOfBundle() const { 3091 return NextInBundle != nullptr || FirstInBundle != this || TE; 3092 } 3093 3094 /// Returns true if it is ready for scheduling, i.e. it has no more 3095 /// unscheduled depending instructions/bundles. 3096 bool isReady() const { 3097 assert(isSchedulingEntity() && 3098 "can't consider non-scheduling entity for ready list"); 3099 return unscheduledDepsInBundle() == 0 && !IsScheduled; 3100 } 3101 3102 /// Modifies the number of unscheduled dependencies for this instruction, 3103 /// and returns the number of remaining dependencies for the containing 3104 /// bundle. 3105 int incrementUnscheduledDeps(int Incr) { 3106 assert(hasValidDependencies() && 3107 "increment of unscheduled deps would be meaningless"); 3108 UnscheduledDeps += Incr; 3109 return FirstInBundle->unscheduledDepsInBundle(); 3110 } 3111 3112 /// Sets the number of unscheduled dependencies to the number of 3113 /// dependencies. 3114 void resetUnscheduledDeps() { 3115 UnscheduledDeps = Dependencies; 3116 } 3117 3118 /// Clears all dependency information. 3119 void clearDependencies() { 3120 Dependencies = InvalidDeps; 3121 resetUnscheduledDeps(); 3122 MemoryDependencies.clear(); 3123 ControlDependencies.clear(); 3124 } 3125 3126 int unscheduledDepsInBundle() const { 3127 assert(isSchedulingEntity() && "only meaningful on the bundle"); 3128 int Sum = 0; 3129 for (const ScheduleData *BundleMember = this; BundleMember; 3130 BundleMember = BundleMember->NextInBundle) { 3131 if (BundleMember->UnscheduledDeps == InvalidDeps) 3132 return InvalidDeps; 3133 Sum += BundleMember->UnscheduledDeps; 3134 } 3135 return Sum; 3136 } 3137 3138 void dump(raw_ostream &os) const { 3139 if (!isSchedulingEntity()) { 3140 os << "/ " << *Inst; 3141 } else if (NextInBundle) { 3142 os << '[' << *Inst; 3143 ScheduleData *SD = NextInBundle; 3144 while (SD) { 3145 os << ';' << *SD->Inst; 3146 SD = SD->NextInBundle; 3147 } 3148 os << ']'; 3149 } else { 3150 os << *Inst; 3151 } 3152 } 3153 3154 Instruction *Inst = nullptr; 3155 3156 /// Opcode of the current instruction in the schedule data. 3157 Value *OpValue = nullptr; 3158 3159 /// The TreeEntry that this instruction corresponds to. 3160 TreeEntry *TE = nullptr; 3161 3162 /// Points to the head in an instruction bundle (and always to this for 3163 /// single instructions). 3164 ScheduleData *FirstInBundle = nullptr; 3165 3166 /// Single linked list of all instructions in a bundle. Null if it is a 3167 /// single instruction. 3168 ScheduleData *NextInBundle = nullptr; 3169 3170 /// Single linked list of all memory instructions (e.g. load, store, call) 3171 /// in the block - until the end of the scheduling region. 3172 ScheduleData *NextLoadStore = nullptr; 3173 3174 /// The dependent memory instructions. 3175 /// This list is derived on demand in calculateDependencies(). 3176 SmallVector<ScheduleData *, 4> MemoryDependencies; 3177 3178 /// List of instructions which this instruction could be control dependent 3179 /// on. Allowing such nodes to be scheduled below this one could introduce 3180 /// a runtime fault which didn't exist in the original program. 3181 /// ex: this is a load or udiv following a readonly call which inf loops 3182 SmallVector<ScheduleData *, 4> ControlDependencies; 3183 3184 /// This ScheduleData is in the current scheduling region if this matches 3185 /// the current SchedulingRegionID of BlockScheduling. 3186 int SchedulingRegionID = 0; 3187 3188 /// Used for getting a "good" final ordering of instructions. 3189 int SchedulingPriority = 0; 3190 3191 /// The number of dependencies. Constitutes of the number of users of the 3192 /// instruction plus the number of dependent memory instructions (if any). 3193 /// This value is calculated on demand. 3194 /// If InvalidDeps, the number of dependencies is not calculated yet. 3195 int Dependencies = InvalidDeps; 3196 3197 /// The number of dependencies minus the number of dependencies of scheduled 3198 /// instructions. As soon as this is zero, the instruction/bundle gets ready 3199 /// for scheduling. 3200 /// Note that this is negative as long as Dependencies is not calculated. 3201 int UnscheduledDeps = InvalidDeps; 3202 3203 /// True if this instruction is scheduled (or considered as scheduled in the 3204 /// dry-run). 3205 bool IsScheduled = false; 3206 }; 3207 3208 #ifndef NDEBUG 3209 friend inline raw_ostream &operator<<(raw_ostream &os, 3210 const BoUpSLP::ScheduleData &SD) { 3211 SD.dump(os); 3212 return os; 3213 } 3214 #endif 3215 3216 friend struct GraphTraits<BoUpSLP *>; 3217 friend struct DOTGraphTraits<BoUpSLP *>; 3218 3219 /// Contains all scheduling data for a basic block. 3220 /// It does not schedules instructions, which are not memory read/write 3221 /// instructions and their operands are either constants, or arguments, or 3222 /// phis, or instructions from others blocks, or their users are phis or from 3223 /// the other blocks. The resulting vector instructions can be placed at the 3224 /// beginning of the basic block without scheduling (if operands does not need 3225 /// to be scheduled) or at the end of the block (if users are outside of the 3226 /// block). It allows to save some compile time and memory used by the 3227 /// compiler. 3228 /// ScheduleData is assigned for each instruction in between the boundaries of 3229 /// the tree entry, even for those, which are not part of the graph. It is 3230 /// required to correctly follow the dependencies between the instructions and 3231 /// their correct scheduling. The ScheduleData is not allocated for the 3232 /// instructions, which do not require scheduling, like phis, nodes with 3233 /// extractelements/insertelements only or nodes with instructions, with 3234 /// uses/operands outside of the block. 3235 struct BlockScheduling { 3236 BlockScheduling(BasicBlock *BB) 3237 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 3238 3239 void clear() { 3240 ReadyInsts.clear(); 3241 ScheduleStart = nullptr; 3242 ScheduleEnd = nullptr; 3243 FirstLoadStoreInRegion = nullptr; 3244 LastLoadStoreInRegion = nullptr; 3245 RegionHasStackSave = false; 3246 3247 // Reduce the maximum schedule region size by the size of the 3248 // previous scheduling run. 3249 ScheduleRegionSizeLimit -= ScheduleRegionSize; 3250 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 3251 ScheduleRegionSizeLimit = MinScheduleRegionSize; 3252 ScheduleRegionSize = 0; 3253 3254 // Make a new scheduling region, i.e. all existing ScheduleData is not 3255 // in the new region yet. 3256 ++SchedulingRegionID; 3257 } 3258 3259 ScheduleData *getScheduleData(Instruction *I) { 3260 if (BB != I->getParent()) 3261 // Avoid lookup if can't possibly be in map. 3262 return nullptr; 3263 ScheduleData *SD = ScheduleDataMap.lookup(I); 3264 if (SD && isInSchedulingRegion(SD)) 3265 return SD; 3266 return nullptr; 3267 } 3268 3269 ScheduleData *getScheduleData(Value *V) { 3270 if (auto *I = dyn_cast<Instruction>(V)) 3271 return getScheduleData(I); 3272 return nullptr; 3273 } 3274 3275 ScheduleData *getScheduleData(Value *V, Value *Key) { 3276 if (V == Key) 3277 return getScheduleData(V); 3278 auto I = ExtraScheduleDataMap.find(V); 3279 if (I != ExtraScheduleDataMap.end()) { 3280 ScheduleData *SD = I->second.lookup(Key); 3281 if (SD && isInSchedulingRegion(SD)) 3282 return SD; 3283 } 3284 return nullptr; 3285 } 3286 3287 bool isInSchedulingRegion(ScheduleData *SD) const { 3288 return SD->SchedulingRegionID == SchedulingRegionID; 3289 } 3290 3291 /// Marks an instruction as scheduled and puts all dependent ready 3292 /// instructions into the ready-list. 3293 template <typename ReadyListType> 3294 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 3295 SD->IsScheduled = true; 3296 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 3297 3298 for (ScheduleData *BundleMember = SD; BundleMember; 3299 BundleMember = BundleMember->NextInBundle) { 3300 if (BundleMember->Inst != BundleMember->OpValue) 3301 continue; 3302 3303 // Handle the def-use chain dependencies. 3304 3305 // Decrement the unscheduled counter and insert to ready list if ready. 3306 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 3307 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 3308 if (OpDef && OpDef->hasValidDependencies() && 3309 OpDef->incrementUnscheduledDeps(-1) == 0) { 3310 // There are no more unscheduled dependencies after 3311 // decrementing, so we can put the dependent instruction 3312 // into the ready list. 3313 ScheduleData *DepBundle = OpDef->FirstInBundle; 3314 assert(!DepBundle->IsScheduled && 3315 "already scheduled bundle gets ready"); 3316 ReadyList.insert(DepBundle); 3317 LLVM_DEBUG(dbgs() 3318 << "SLP: gets ready (def): " << *DepBundle << "\n"); 3319 } 3320 }); 3321 }; 3322 3323 // If BundleMember is a vector bundle, its operands may have been 3324 // reordered during buildTree(). We therefore need to get its operands 3325 // through the TreeEntry. 3326 if (TreeEntry *TE = BundleMember->TE) { 3327 // Need to search for the lane since the tree entry can be reordered. 3328 int Lane = std::distance(TE->Scalars.begin(), 3329 find(TE->Scalars, BundleMember->Inst)); 3330 assert(Lane >= 0 && "Lane not set"); 3331 3332 // Since vectorization tree is being built recursively this assertion 3333 // ensures that the tree entry has all operands set before reaching 3334 // this code. Couple of exceptions known at the moment are extracts 3335 // where their second (immediate) operand is not added. Since 3336 // immediates do not affect scheduler behavior this is considered 3337 // okay. 3338 auto *In = BundleMember->Inst; 3339 assert(In && 3340 (isa<ExtractValueInst, ExtractElementInst>(In) || 3341 In->getNumOperands() == TE->getNumOperands()) && 3342 "Missed TreeEntry operands?"); 3343 (void)In; // fake use to avoid build failure when assertions disabled 3344 3345 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3346 OpIdx != NumOperands; ++OpIdx) 3347 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3348 DecrUnsched(I); 3349 } else { 3350 // If BundleMember is a stand-alone instruction, no operand reordering 3351 // has taken place, so we directly access its operands. 3352 for (Use &U : BundleMember->Inst->operands()) 3353 if (auto *I = dyn_cast<Instruction>(U.get())) 3354 DecrUnsched(I); 3355 } 3356 // Handle the memory dependencies. 3357 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3358 if (MemoryDepSD->hasValidDependencies() && 3359 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3360 // There are no more unscheduled dependencies after decrementing, 3361 // so we can put the dependent instruction into the ready list. 3362 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3363 assert(!DepBundle->IsScheduled && 3364 "already scheduled bundle gets ready"); 3365 ReadyList.insert(DepBundle); 3366 LLVM_DEBUG(dbgs() 3367 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3368 } 3369 } 3370 // Handle the control dependencies. 3371 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3372 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3373 // There are no more unscheduled dependencies after decrementing, 3374 // so we can put the dependent instruction into the ready list. 3375 ScheduleData *DepBundle = DepSD->FirstInBundle; 3376 assert(!DepBundle->IsScheduled && 3377 "already scheduled bundle gets ready"); 3378 ReadyList.insert(DepBundle); 3379 LLVM_DEBUG(dbgs() 3380 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// Verify basic self consistency properties of the data structure. 3387 void verify() { 3388 if (!ScheduleStart) 3389 return; 3390 3391 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3392 ScheduleStart->comesBefore(ScheduleEnd) && 3393 "Not a valid scheduling region?"); 3394 3395 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3396 auto *SD = getScheduleData(I); 3397 if (!SD) 3398 continue; 3399 assert(isInSchedulingRegion(SD) && 3400 "primary schedule data not in window?"); 3401 assert(isInSchedulingRegion(SD->FirstInBundle) && 3402 "entire bundle in window!"); 3403 (void)SD; 3404 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3405 } 3406 3407 for (auto *SD : ReadyInsts) { 3408 assert(SD->isSchedulingEntity() && SD->isReady() && 3409 "item in ready list not ready?"); 3410 (void)SD; 3411 } 3412 } 3413 3414 void doForAllOpcodes(Value *V, 3415 function_ref<void(ScheduleData *SD)> Action) { 3416 if (ScheduleData *SD = getScheduleData(V)) 3417 Action(SD); 3418 auto I = ExtraScheduleDataMap.find(V); 3419 if (I != ExtraScheduleDataMap.end()) 3420 for (auto &P : I->second) 3421 if (isInSchedulingRegion(P.second)) 3422 Action(P.second); 3423 } 3424 3425 /// Put all instructions into the ReadyList which are ready for scheduling. 3426 template <typename ReadyListType> 3427 void initialFillReadyList(ReadyListType &ReadyList) { 3428 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3429 doForAllOpcodes(I, [&](ScheduleData *SD) { 3430 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3431 SD->isReady()) { 3432 ReadyList.insert(SD); 3433 LLVM_DEBUG(dbgs() 3434 << "SLP: initially in ready list: " << *SD << "\n"); 3435 } 3436 }); 3437 } 3438 } 3439 3440 /// Build a bundle from the ScheduleData nodes corresponding to the 3441 /// scalar instruction for each lane. 3442 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3443 3444 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3445 /// cyclic dependencies. This is only a dry-run, no instructions are 3446 /// actually moved at this stage. 3447 /// \returns the scheduling bundle. The returned Optional value is not 3448 /// std::nullopt if \p VL is allowed to be scheduled. 3449 std::optional<ScheduleData *> 3450 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3451 const InstructionsState &S); 3452 3453 /// Un-bundles a group of instructions. 3454 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3455 3456 /// Allocates schedule data chunk. 3457 ScheduleData *allocateScheduleDataChunks(); 3458 3459 /// Extends the scheduling region so that V is inside the region. 3460 /// \returns true if the region size is within the limit. 3461 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3462 3463 /// Initialize the ScheduleData structures for new instructions in the 3464 /// scheduling region. 3465 void initScheduleData(Instruction *FromI, Instruction *ToI, 3466 ScheduleData *PrevLoadStore, 3467 ScheduleData *NextLoadStore); 3468 3469 /// Updates the dependency information of a bundle and of all instructions/ 3470 /// bundles which depend on the original bundle. 3471 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3472 BoUpSLP *SLP); 3473 3474 /// Sets all instruction in the scheduling region to un-scheduled. 3475 void resetSchedule(); 3476 3477 BasicBlock *BB; 3478 3479 /// Simple memory allocation for ScheduleData. 3480 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3481 3482 /// The size of a ScheduleData array in ScheduleDataChunks. 3483 int ChunkSize; 3484 3485 /// The allocator position in the current chunk, which is the last entry 3486 /// of ScheduleDataChunks. 3487 int ChunkPos; 3488 3489 /// Attaches ScheduleData to Instruction. 3490 /// Note that the mapping survives during all vectorization iterations, i.e. 3491 /// ScheduleData structures are recycled. 3492 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3493 3494 /// Attaches ScheduleData to Instruction with the leading key. 3495 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3496 ExtraScheduleDataMap; 3497 3498 /// The ready-list for scheduling (only used for the dry-run). 3499 SetVector<ScheduleData *> ReadyInsts; 3500 3501 /// The first instruction of the scheduling region. 3502 Instruction *ScheduleStart = nullptr; 3503 3504 /// The first instruction _after_ the scheduling region. 3505 Instruction *ScheduleEnd = nullptr; 3506 3507 /// The first memory accessing instruction in the scheduling region 3508 /// (can be null). 3509 ScheduleData *FirstLoadStoreInRegion = nullptr; 3510 3511 /// The last memory accessing instruction in the scheduling region 3512 /// (can be null). 3513 ScheduleData *LastLoadStoreInRegion = nullptr; 3514 3515 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3516 /// region? Used to optimize the dependence calculation for the 3517 /// common case where there isn't. 3518 bool RegionHasStackSave = false; 3519 3520 /// The current size of the scheduling region. 3521 int ScheduleRegionSize = 0; 3522 3523 /// The maximum size allowed for the scheduling region. 3524 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3525 3526 /// The ID of the scheduling region. For a new vectorization iteration this 3527 /// is incremented which "removes" all ScheduleData from the region. 3528 /// Make sure that the initial SchedulingRegionID is greater than the 3529 /// initial SchedulingRegionID in ScheduleData (which is 0). 3530 int SchedulingRegionID = 1; 3531 }; 3532 3533 /// Attaches the BlockScheduling structures to basic blocks. 3534 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3535 3536 /// Performs the "real" scheduling. Done before vectorization is actually 3537 /// performed in a basic block. 3538 void scheduleBlock(BlockScheduling *BS); 3539 3540 /// List of users to ignore during scheduling and that don't need extracting. 3541 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3542 3543 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3544 /// sorted SmallVectors of unsigned. 3545 struct OrdersTypeDenseMapInfo { 3546 static OrdersType getEmptyKey() { 3547 OrdersType V; 3548 V.push_back(~1U); 3549 return V; 3550 } 3551 3552 static OrdersType getTombstoneKey() { 3553 OrdersType V; 3554 V.push_back(~2U); 3555 return V; 3556 } 3557 3558 static unsigned getHashValue(const OrdersType &V) { 3559 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3560 } 3561 3562 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3563 return LHS == RHS; 3564 } 3565 }; 3566 3567 // Analysis and block reference. 3568 Function *F; 3569 ScalarEvolution *SE; 3570 TargetTransformInfo *TTI; 3571 TargetLibraryInfo *TLI; 3572 LoopInfo *LI; 3573 DominatorTree *DT; 3574 AssumptionCache *AC; 3575 DemandedBits *DB; 3576 const DataLayout *DL; 3577 OptimizationRemarkEmitter *ORE; 3578 3579 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3580 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3581 3582 /// Instruction builder to construct the vectorized tree. 3583 IRBuilder<> Builder; 3584 3585 /// A map of scalar integer values to the smallest bit width with which they 3586 /// can legally be represented. The values map to (width, signed) pairs, 3587 /// where "width" indicates the minimum bit width and "signed" is True if the 3588 /// value must be signed-extended, rather than zero-extended, back to its 3589 /// original width. 3590 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs; 3591 }; 3592 3593 } // end namespace slpvectorizer 3594 3595 template <> struct GraphTraits<BoUpSLP *> { 3596 using TreeEntry = BoUpSLP::TreeEntry; 3597 3598 /// NodeRef has to be a pointer per the GraphWriter. 3599 using NodeRef = TreeEntry *; 3600 3601 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3602 3603 /// Add the VectorizableTree to the index iterator to be able to return 3604 /// TreeEntry pointers. 3605 struct ChildIteratorType 3606 : public iterator_adaptor_base< 3607 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3608 ContainerTy &VectorizableTree; 3609 3610 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3611 ContainerTy &VT) 3612 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3613 3614 NodeRef operator*() { return I->UserTE; } 3615 }; 3616 3617 static NodeRef getEntryNode(BoUpSLP &R) { 3618 return R.VectorizableTree[0].get(); 3619 } 3620 3621 static ChildIteratorType child_begin(NodeRef N) { 3622 return {N->UserTreeIndices.begin(), N->Container}; 3623 } 3624 3625 static ChildIteratorType child_end(NodeRef N) { 3626 return {N->UserTreeIndices.end(), N->Container}; 3627 } 3628 3629 /// For the node iterator we just need to turn the TreeEntry iterator into a 3630 /// TreeEntry* iterator so that it dereferences to NodeRef. 3631 class nodes_iterator { 3632 using ItTy = ContainerTy::iterator; 3633 ItTy It; 3634 3635 public: 3636 nodes_iterator(const ItTy &It2) : It(It2) {} 3637 NodeRef operator*() { return It->get(); } 3638 nodes_iterator operator++() { 3639 ++It; 3640 return *this; 3641 } 3642 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3643 }; 3644 3645 static nodes_iterator nodes_begin(BoUpSLP *R) { 3646 return nodes_iterator(R->VectorizableTree.begin()); 3647 } 3648 3649 static nodes_iterator nodes_end(BoUpSLP *R) { 3650 return nodes_iterator(R->VectorizableTree.end()); 3651 } 3652 3653 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3654 }; 3655 3656 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3657 using TreeEntry = BoUpSLP::TreeEntry; 3658 3659 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 3660 3661 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3662 std::string Str; 3663 raw_string_ostream OS(Str); 3664 OS << Entry->Idx << ".\n"; 3665 if (isSplat(Entry->Scalars)) 3666 OS << "<splat> "; 3667 for (auto *V : Entry->Scalars) { 3668 OS << *V; 3669 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3670 return EU.Scalar == V; 3671 })) 3672 OS << " <extract>"; 3673 OS << "\n"; 3674 } 3675 return Str; 3676 } 3677 3678 static std::string getNodeAttributes(const TreeEntry *Entry, 3679 const BoUpSLP *) { 3680 if (Entry->State == TreeEntry::NeedToGather) 3681 return "color=red"; 3682 if (Entry->State == TreeEntry::ScatterVectorize || 3683 Entry->State == TreeEntry::PossibleStridedVectorize) 3684 return "color=blue"; 3685 return ""; 3686 } 3687 }; 3688 3689 } // end namespace llvm 3690 3691 BoUpSLP::~BoUpSLP() { 3692 SmallVector<WeakTrackingVH> DeadInsts; 3693 for (auto *I : DeletedInstructions) { 3694 for (Use &U : I->operands()) { 3695 auto *Op = dyn_cast<Instruction>(U.get()); 3696 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3697 wouldInstructionBeTriviallyDead(Op, TLI)) 3698 DeadInsts.emplace_back(Op); 3699 } 3700 I->dropAllReferences(); 3701 } 3702 for (auto *I : DeletedInstructions) { 3703 assert(I->use_empty() && 3704 "trying to erase instruction with users."); 3705 I->eraseFromParent(); 3706 } 3707 3708 // Cleanup any dead scalar code feeding the vectorized instructions 3709 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3710 3711 #ifdef EXPENSIVE_CHECKS 3712 // If we could guarantee that this call is not extremely slow, we could 3713 // remove the ifdef limitation (see PR47712). 3714 assert(!verifyFunction(*F, &dbgs())); 3715 #endif 3716 } 3717 3718 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3719 /// contains original mask for the scalars reused in the node. Procedure 3720 /// transform this mask in accordance with the given \p Mask. 3721 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3722 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3723 "Expected non-empty mask."); 3724 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3725 Prev.swap(Reuses); 3726 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3727 if (Mask[I] != PoisonMaskElem) 3728 Reuses[Mask[I]] = Prev[I]; 3729 } 3730 3731 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3732 /// the original order of the scalars. Procedure transforms the provided order 3733 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3734 /// identity order, \p Order is cleared. 3735 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3736 assert(!Mask.empty() && "Expected non-empty mask."); 3737 SmallVector<int> MaskOrder; 3738 if (Order.empty()) { 3739 MaskOrder.resize(Mask.size()); 3740 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3741 } else { 3742 inversePermutation(Order, MaskOrder); 3743 } 3744 reorderReuses(MaskOrder, Mask); 3745 if (ShuffleVectorInst::isIdentityMask(MaskOrder, MaskOrder.size())) { 3746 Order.clear(); 3747 return; 3748 } 3749 Order.assign(Mask.size(), Mask.size()); 3750 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3751 if (MaskOrder[I] != PoisonMaskElem) 3752 Order[MaskOrder[I]] = I; 3753 fixupOrderingIndices(Order); 3754 } 3755 3756 std::optional<BoUpSLP::OrdersType> 3757 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3758 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3759 unsigned NumScalars = TE.Scalars.size(); 3760 OrdersType CurrentOrder(NumScalars, NumScalars); 3761 SmallVector<int> Positions; 3762 SmallBitVector UsedPositions(NumScalars); 3763 const TreeEntry *STE = nullptr; 3764 // Try to find all gathered scalars that are gets vectorized in other 3765 // vectorize node. Here we can have only one single tree vector node to 3766 // correctly identify order of the gathered scalars. 3767 for (unsigned I = 0; I < NumScalars; ++I) { 3768 Value *V = TE.Scalars[I]; 3769 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3770 continue; 3771 if (const auto *LocalSTE = getTreeEntry(V)) { 3772 if (!STE) 3773 STE = LocalSTE; 3774 else if (STE != LocalSTE) 3775 // Take the order only from the single vector node. 3776 return std::nullopt; 3777 unsigned Lane = 3778 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 3779 if (Lane >= NumScalars) 3780 return std::nullopt; 3781 if (CurrentOrder[Lane] != NumScalars) { 3782 if (Lane != I) 3783 continue; 3784 UsedPositions.reset(CurrentOrder[Lane]); 3785 } 3786 // The partial identity (where only some elements of the gather node are 3787 // in the identity order) is good. 3788 CurrentOrder[Lane] = I; 3789 UsedPositions.set(I); 3790 } 3791 } 3792 // Need to keep the order if we have a vector entry and at least 2 scalars or 3793 // the vectorized entry has just 2 scalars. 3794 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 3795 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 3796 for (unsigned I = 0; I < NumScalars; ++I) 3797 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3798 return false; 3799 return true; 3800 }; 3801 if (IsIdentityOrder(CurrentOrder)) 3802 return OrdersType(); 3803 auto *It = CurrentOrder.begin(); 3804 for (unsigned I = 0; I < NumScalars;) { 3805 if (UsedPositions.test(I)) { 3806 ++I; 3807 continue; 3808 } 3809 if (*It == NumScalars) { 3810 *It = I; 3811 ++I; 3812 } 3813 ++It; 3814 } 3815 return std::move(CurrentOrder); 3816 } 3817 return std::nullopt; 3818 } 3819 3820 namespace { 3821 /// Tracks the state we can represent the loads in the given sequence. 3822 enum class LoadsState { 3823 Gather, 3824 Vectorize, 3825 ScatterVectorize, 3826 PossibleStridedVectorize 3827 }; 3828 } // anonymous namespace 3829 3830 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2, 3831 const TargetLibraryInfo &TLI, 3832 bool CompareOpcodes = true) { 3833 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2)) 3834 return false; 3835 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 3836 if (!GEP1) 3837 return false; 3838 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 3839 if (!GEP2) 3840 return false; 3841 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 && 3842 ((isConstant(GEP1->getOperand(1)) && 3843 isConstant(GEP2->getOperand(1))) || 3844 !CompareOpcodes || 3845 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI) 3846 .getOpcode()); 3847 } 3848 3849 /// Checks if the given array of loads can be represented as a vectorized, 3850 /// scatter or just simple gather. 3851 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3852 const TargetTransformInfo &TTI, 3853 const DataLayout &DL, ScalarEvolution &SE, 3854 LoopInfo &LI, const TargetLibraryInfo &TLI, 3855 SmallVectorImpl<unsigned> &Order, 3856 SmallVectorImpl<Value *> &PointerOps) { 3857 // Check that a vectorized load would load the same memory as a scalar 3858 // load. For example, we don't want to vectorize loads that are smaller 3859 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3860 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3861 // from such a struct, we read/write packed bits disagreeing with the 3862 // unvectorized version. 3863 Type *ScalarTy = VL0->getType(); 3864 3865 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3866 return LoadsState::Gather; 3867 3868 // Make sure all loads in the bundle are simple - we can't vectorize 3869 // atomic or volatile loads. 3870 PointerOps.clear(); 3871 PointerOps.resize(VL.size()); 3872 auto *POIter = PointerOps.begin(); 3873 for (Value *V : VL) { 3874 auto *L = cast<LoadInst>(V); 3875 if (!L->isSimple()) 3876 return LoadsState::Gather; 3877 *POIter = L->getPointerOperand(); 3878 ++POIter; 3879 } 3880 3881 Order.clear(); 3882 // Check the order of pointer operands or that all pointers are the same. 3883 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order); 3884 if (IsSorted || all_of(PointerOps, [&](Value *P) { 3885 return arePointersCompatible(P, PointerOps.front(), TLI); 3886 })) { 3887 bool IsPossibleStrided = false; 3888 if (IsSorted) { 3889 Value *Ptr0; 3890 Value *PtrN; 3891 if (Order.empty()) { 3892 Ptr0 = PointerOps.front(); 3893 PtrN = PointerOps.back(); 3894 } else { 3895 Ptr0 = PointerOps[Order.front()]; 3896 PtrN = PointerOps[Order.back()]; 3897 } 3898 std::optional<int> Diff = 3899 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3900 // Check that the sorted loads are consecutive. 3901 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3902 return LoadsState::Vectorize; 3903 // Simple check if not a strided access - clear order. 3904 IsPossibleStrided = *Diff % (VL.size() - 1) == 0; 3905 } 3906 // TODO: need to improve analysis of the pointers, if not all of them are 3907 // GEPs or have > 2 operands, we end up with a gather node, which just 3908 // increases the cost. 3909 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent()); 3910 bool ProfitableGatherPointers = 3911 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) { 3912 return L && L->isLoopInvariant(V); 3913 })) <= VL.size() / 2 && VL.size() > 2; 3914 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) { 3915 auto *GEP = dyn_cast<GetElementPtrInst>(P); 3916 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) || 3917 (GEP && GEP->getNumOperands() == 2); 3918 })) { 3919 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3920 for (Value *V : VL) 3921 CommonAlignment = 3922 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3923 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3924 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) && 3925 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) 3926 return IsPossibleStrided ? LoadsState::PossibleStridedVectorize 3927 : LoadsState::ScatterVectorize; 3928 } 3929 } 3930 3931 return LoadsState::Gather; 3932 } 3933 3934 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3935 const DataLayout &DL, ScalarEvolution &SE, 3936 SmallVectorImpl<unsigned> &SortedIndices) { 3937 assert(llvm::all_of( 3938 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3939 "Expected list of pointer operands."); 3940 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3941 // Ptr into, sort and return the sorted indices with values next to one 3942 // another. 3943 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3944 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3945 3946 unsigned Cnt = 1; 3947 for (Value *Ptr : VL.drop_front()) { 3948 bool Found = any_of(Bases, [&](auto &Base) { 3949 std::optional<int> Diff = 3950 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3951 /*StrictCheck=*/true); 3952 if (!Diff) 3953 return false; 3954 3955 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3956 return true; 3957 }); 3958 3959 if (!Found) { 3960 // If we haven't found enough to usefully cluster, return early. 3961 if (Bases.size() > VL.size() / 2 - 1) 3962 return false; 3963 3964 // Not found already - add a new Base 3965 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3966 } 3967 } 3968 3969 // For each of the bases sort the pointers by Offset and check if any of the 3970 // base become consecutively allocated. 3971 bool AnyConsecutive = false; 3972 for (auto &Base : Bases) { 3973 auto &Vec = Base.second; 3974 if (Vec.size() > 1) { 3975 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 3976 const std::tuple<Value *, int, unsigned> &Y) { 3977 return std::get<1>(X) < std::get<1>(Y); 3978 }); 3979 int InitialOffset = std::get<1>(Vec[0]); 3980 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](const auto &P) { 3981 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 3982 }); 3983 } 3984 } 3985 3986 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 3987 SortedIndices.clear(); 3988 if (!AnyConsecutive) 3989 return false; 3990 3991 for (auto &Base : Bases) { 3992 for (auto &T : Base.second) 3993 SortedIndices.push_back(std::get<2>(T)); 3994 } 3995 3996 assert(SortedIndices.size() == VL.size() && 3997 "Expected SortedIndices to be the size of VL"); 3998 return true; 3999 } 4000 4001 std::optional<BoUpSLP::OrdersType> 4002 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 4003 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 4004 Type *ScalarTy = TE.Scalars[0]->getType(); 4005 4006 SmallVector<Value *> Ptrs; 4007 Ptrs.reserve(TE.Scalars.size()); 4008 for (Value *V : TE.Scalars) { 4009 auto *L = dyn_cast<LoadInst>(V); 4010 if (!L || !L->isSimple()) 4011 return std::nullopt; 4012 Ptrs.push_back(L->getPointerOperand()); 4013 } 4014 4015 BoUpSLP::OrdersType Order; 4016 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 4017 return std::move(Order); 4018 return std::nullopt; 4019 } 4020 4021 /// Check if two insertelement instructions are from the same buildvector. 4022 static bool areTwoInsertFromSameBuildVector( 4023 InsertElementInst *VU, InsertElementInst *V, 4024 function_ref<Value *(InsertElementInst *)> GetBaseOperand) { 4025 // Instructions must be from the same basic blocks. 4026 if (VU->getParent() != V->getParent()) 4027 return false; 4028 // Checks if 2 insertelements are from the same buildvector. 4029 if (VU->getType() != V->getType()) 4030 return false; 4031 // Multiple used inserts are separate nodes. 4032 if (!VU->hasOneUse() && !V->hasOneUse()) 4033 return false; 4034 auto *IE1 = VU; 4035 auto *IE2 = V; 4036 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4037 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4038 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4039 return false; 4040 // Go through the vector operand of insertelement instructions trying to find 4041 // either VU as the original vector for IE2 or V as the original vector for 4042 // IE1. 4043 SmallBitVector ReusedIdx( 4044 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue()); 4045 bool IsReusedIdx = false; 4046 do { 4047 if (IE2 == VU && !IE1) 4048 return VU->hasOneUse(); 4049 if (IE1 == V && !IE2) 4050 return V->hasOneUse(); 4051 if (IE1 && IE1 != V) { 4052 unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2); 4053 IsReusedIdx |= ReusedIdx.test(Idx1); 4054 ReusedIdx.set(Idx1); 4055 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx) 4056 IE1 = nullptr; 4057 else 4058 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1)); 4059 } 4060 if (IE2 && IE2 != VU) { 4061 unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1); 4062 IsReusedIdx |= ReusedIdx.test(Idx2); 4063 ReusedIdx.set(Idx2); 4064 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx) 4065 IE2 = nullptr; 4066 else 4067 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2)); 4068 } 4069 } while (!IsReusedIdx && (IE1 || IE2)); 4070 return false; 4071 } 4072 4073 std::optional<BoUpSLP::OrdersType> 4074 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { 4075 // No need to reorder if need to shuffle reuses, still need to shuffle the 4076 // node. 4077 if (!TE.ReuseShuffleIndices.empty()) { 4078 // Check if reuse shuffle indices can be improved by reordering. 4079 // For this, check that reuse mask is "clustered", i.e. each scalar values 4080 // is used once in each submask of size <number_of_scalars>. 4081 // Example: 4 scalar values. 4082 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered. 4083 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because 4084 // element 3 is used twice in the second submask. 4085 unsigned Sz = TE.Scalars.size(); 4086 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4087 Sz)) 4088 return std::nullopt; 4089 unsigned VF = TE.getVectorFactor(); 4090 // Try build correct order for extractelement instructions. 4091 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(), 4092 TE.ReuseShuffleIndices.end()); 4093 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() && 4094 all_of(TE.Scalars, [Sz](Value *V) { 4095 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V)); 4096 return Idx && *Idx < Sz; 4097 })) { 4098 SmallVector<int> ReorderMask(Sz, PoisonMaskElem); 4099 if (TE.ReorderIndices.empty()) 4100 std::iota(ReorderMask.begin(), ReorderMask.end(), 0); 4101 else 4102 inversePermutation(TE.ReorderIndices, ReorderMask); 4103 for (unsigned I = 0; I < VF; ++I) { 4104 int &Idx = ReusedMask[I]; 4105 if (Idx == PoisonMaskElem) 4106 continue; 4107 Value *V = TE.Scalars[ReorderMask[Idx]]; 4108 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V)); 4109 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI)); 4110 } 4111 } 4112 // Build the order of the VF size, need to reorder reuses shuffles, they are 4113 // always of VF size. 4114 OrdersType ResOrder(VF); 4115 std::iota(ResOrder.begin(), ResOrder.end(), 0); 4116 auto *It = ResOrder.begin(); 4117 for (unsigned K = 0; K < VF; K += Sz) { 4118 OrdersType CurrentOrder(TE.ReorderIndices); 4119 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)}; 4120 if (SubMask.front() == PoisonMaskElem) 4121 std::iota(SubMask.begin(), SubMask.end(), 0); 4122 reorderOrder(CurrentOrder, SubMask); 4123 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; }); 4124 std::advance(It, Sz); 4125 } 4126 if (all_of(enumerate(ResOrder), 4127 [](const auto &Data) { return Data.index() == Data.value(); })) 4128 return std::nullopt; // No need to reorder. 4129 return std::move(ResOrder); 4130 } 4131 if ((TE.State == TreeEntry::Vectorize || 4132 TE.State == TreeEntry::PossibleStridedVectorize) && 4133 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 4134 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 4135 !TE.isAltShuffle()) 4136 return TE.ReorderIndices; 4137 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) { 4138 auto PHICompare = [&](unsigned I1, unsigned I2) { 4139 Value *V1 = TE.Scalars[I1]; 4140 Value *V2 = TE.Scalars[I2]; 4141 if (V1 == V2) 4142 return false; 4143 if (!V1->hasOneUse() || !V2->hasOneUse()) 4144 return false; 4145 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin()); 4146 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin()); 4147 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1)) 4148 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) { 4149 if (!areTwoInsertFromSameBuildVector( 4150 IE1, IE2, 4151 [](InsertElementInst *II) { return II->getOperand(0); })) 4152 return false; 4153 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4154 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4155 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4156 return false; 4157 return *Idx1 < *Idx2; 4158 } 4159 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1)) 4160 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) { 4161 if (EE1->getOperand(0) != EE2->getOperand(0)) 4162 return false; 4163 std::optional<unsigned> Idx1 = getExtractIndex(EE1); 4164 std::optional<unsigned> Idx2 = getExtractIndex(EE2); 4165 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4166 return false; 4167 return *Idx1 < *Idx2; 4168 } 4169 return false; 4170 }; 4171 auto IsIdentityOrder = [](const OrdersType &Order) { 4172 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4173 if (Idx != Order[Idx]) 4174 return false; 4175 return true; 4176 }; 4177 if (!TE.ReorderIndices.empty()) 4178 return TE.ReorderIndices; 4179 DenseMap<unsigned, unsigned> PhiToId; 4180 SmallVector<unsigned> Phis(TE.Scalars.size()); 4181 std::iota(Phis.begin(), Phis.end(), 0); 4182 OrdersType ResOrder(TE.Scalars.size()); 4183 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) 4184 PhiToId[Id] = Id; 4185 stable_sort(Phis, PHICompare); 4186 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id) 4187 ResOrder[Id] = PhiToId[Phis[Id]]; 4188 if (IsIdentityOrder(ResOrder)) 4189 return std::nullopt; // No need to reorder. 4190 return std::move(ResOrder); 4191 } 4192 if (TE.State == TreeEntry::NeedToGather) { 4193 // TODO: add analysis of other gather nodes with extractelement 4194 // instructions and other values/instructions, not only undefs. 4195 if (((TE.getOpcode() == Instruction::ExtractElement && 4196 !TE.isAltShuffle()) || 4197 (all_of(TE.Scalars, 4198 [](Value *V) { 4199 return isa<UndefValue, ExtractElementInst>(V); 4200 }) && 4201 any_of(TE.Scalars, 4202 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 4203 all_of(TE.Scalars, 4204 [](Value *V) { 4205 auto *EE = dyn_cast<ExtractElementInst>(V); 4206 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 4207 }) && 4208 allSameType(TE.Scalars)) { 4209 // Check that gather of extractelements can be represented as 4210 // just a shuffle of a single vector. 4211 OrdersType CurrentOrder; 4212 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder, 4213 /*ResizeAllowed=*/true); 4214 if (Reuse || !CurrentOrder.empty()) { 4215 if (!CurrentOrder.empty()) 4216 fixupOrderingIndices(CurrentOrder); 4217 return std::move(CurrentOrder); 4218 } 4219 } 4220 // If the gather node is <undef, v, .., poison> and 4221 // insertelement poison, v, 0 [+ permute] 4222 // is cheaper than 4223 // insertelement poison, v, n - try to reorder. 4224 // If rotating the whole graph, exclude the permute cost, the whole graph 4225 // might be transformed. 4226 int Sz = TE.Scalars.size(); 4227 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) && 4228 count_if(TE.Scalars, UndefValue::classof) == Sz - 1) { 4229 const auto *It = 4230 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); }); 4231 if (It == TE.Scalars.begin()) 4232 return OrdersType(); 4233 auto *Ty = FixedVectorType::get(TE.Scalars.front()->getType(), Sz); 4234 if (It != TE.Scalars.end()) { 4235 OrdersType Order(Sz, Sz); 4236 unsigned Idx = std::distance(TE.Scalars.begin(), It); 4237 Order[Idx] = 0; 4238 fixupOrderingIndices(Order); 4239 SmallVector<int> Mask; 4240 inversePermutation(Order, Mask); 4241 InstructionCost PermuteCost = 4242 TopToBottom 4243 ? 0 4244 : TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, Mask); 4245 InstructionCost InsertFirstCost = TTI->getVectorInstrCost( 4246 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0, 4247 PoisonValue::get(Ty), *It); 4248 InstructionCost InsertIdxCost = TTI->getVectorInstrCost( 4249 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx, 4250 PoisonValue::get(Ty), *It); 4251 if (InsertFirstCost + PermuteCost < InsertIdxCost) 4252 return std::move(Order); 4253 } 4254 } 4255 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 4256 return CurrentOrder; 4257 if (TE.Scalars.size() >= 4) 4258 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 4259 return Order; 4260 } 4261 return std::nullopt; 4262 } 4263 4264 /// Checks if the given mask is a "clustered" mask with the same clusters of 4265 /// size \p Sz, which are not identity submasks. 4266 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask, 4267 unsigned Sz) { 4268 ArrayRef<int> FirstCluster = Mask.slice(0, Sz); 4269 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz)) 4270 return false; 4271 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) { 4272 ArrayRef<int> Cluster = Mask.slice(I, Sz); 4273 if (Cluster != FirstCluster) 4274 return false; 4275 } 4276 return true; 4277 } 4278 4279 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const { 4280 // Reorder reuses mask. 4281 reorderReuses(TE.ReuseShuffleIndices, Mask); 4282 const unsigned Sz = TE.Scalars.size(); 4283 // For vectorized and non-clustered reused no need to do anything else. 4284 if (TE.State != TreeEntry::NeedToGather || 4285 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4286 Sz) || 4287 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) 4288 return; 4289 SmallVector<int> NewMask; 4290 inversePermutation(TE.ReorderIndices, NewMask); 4291 addMask(NewMask, TE.ReuseShuffleIndices); 4292 // Clear reorder since it is going to be applied to the new mask. 4293 TE.ReorderIndices.clear(); 4294 // Try to improve gathered nodes with clustered reuses, if possible. 4295 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz); 4296 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end()); 4297 inversePermutation(NewOrder, NewMask); 4298 reorderScalars(TE.Scalars, NewMask); 4299 // Fill the reuses mask with the identity submasks. 4300 for (auto *It = TE.ReuseShuffleIndices.begin(), 4301 *End = TE.ReuseShuffleIndices.end(); 4302 It != End; std::advance(It, Sz)) 4303 std::iota(It, std::next(It, Sz), 0); 4304 } 4305 4306 void BoUpSLP::reorderTopToBottom() { 4307 // Maps VF to the graph nodes. 4308 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 4309 // ExtractElement gather nodes which can be vectorized and need to handle 4310 // their ordering. 4311 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4312 4313 // Phi nodes can have preferred ordering based on their result users 4314 DenseMap<const TreeEntry *, OrdersType> PhisToOrders; 4315 4316 // AltShuffles can also have a preferred ordering that leads to fewer 4317 // instructions, e.g., the addsub instruction in x86. 4318 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders; 4319 4320 // Maps a TreeEntry to the reorder indices of external users. 4321 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 4322 ExternalUserReorderMap; 4323 // FIXME: Workaround for syntax error reported by MSVC buildbots. 4324 TargetTransformInfo &TTIRef = *TTI; 4325 // Find all reorderable nodes with the given VF. 4326 // Currently the are vectorized stores,loads,extracts + some gathering of 4327 // extracts. 4328 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries, 4329 &GathersToOrders, &ExternalUserReorderMap, 4330 &AltShufflesToOrders, &PhisToOrders]( 4331 const std::unique_ptr<TreeEntry> &TE) { 4332 // Look for external users that will probably be vectorized. 4333 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 4334 findExternalStoreUsersReorderIndices(TE.get()); 4335 if (!ExternalUserReorderIndices.empty()) { 4336 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4337 ExternalUserReorderMap.try_emplace(TE.get(), 4338 std::move(ExternalUserReorderIndices)); 4339 } 4340 4341 // Patterns like [fadd,fsub] can be combined into a single instruction in 4342 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need 4343 // to take into account their order when looking for the most used order. 4344 if (TE->isAltShuffle()) { 4345 VectorType *VecTy = 4346 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size()); 4347 unsigned Opcode0 = TE->getOpcode(); 4348 unsigned Opcode1 = TE->getAltOpcode(); 4349 // The opcode mask selects between the two opcodes. 4350 SmallBitVector OpcodeMask(TE->Scalars.size(), false); 4351 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) 4352 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1) 4353 OpcodeMask.set(Lane); 4354 // If this pattern is supported by the target then we consider the order. 4355 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 4356 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4357 AltShufflesToOrders.try_emplace(TE.get(), OrdersType()); 4358 } 4359 // TODO: Check the reverse order too. 4360 } 4361 4362 if (std::optional<OrdersType> CurrentOrder = 4363 getReorderingData(*TE, /*TopToBottom=*/true)) { 4364 // Do not include ordering for nodes used in the alt opcode vectorization, 4365 // better to reorder them during bottom-to-top stage. If follow the order 4366 // here, it causes reordering of the whole graph though actually it is 4367 // profitable just to reorder the subgraph that starts from the alternate 4368 // opcode vectorization node. Such nodes already end-up with the shuffle 4369 // instruction and it is just enough to change this shuffle rather than 4370 // rotate the scalars for the whole graph. 4371 unsigned Cnt = 0; 4372 const TreeEntry *UserTE = TE.get(); 4373 while (UserTE && Cnt < RecursionMaxDepth) { 4374 if (UserTE->UserTreeIndices.size() != 1) 4375 break; 4376 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 4377 return EI.UserTE->State == TreeEntry::Vectorize && 4378 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 4379 })) 4380 return; 4381 UserTE = UserTE->UserTreeIndices.back().UserTE; 4382 ++Cnt; 4383 } 4384 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4385 if (!(TE->State == TreeEntry::Vectorize || 4386 TE->State == TreeEntry::PossibleStridedVectorize) || 4387 !TE->ReuseShuffleIndices.empty()) 4388 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4389 if (TE->State == TreeEntry::Vectorize && 4390 TE->getOpcode() == Instruction::PHI) 4391 PhisToOrders.try_emplace(TE.get(), *CurrentOrder); 4392 } 4393 }); 4394 4395 // Reorder the graph nodes according to their vectorization factor. 4396 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1; 4397 VF /= 2) { 4398 auto It = VFToOrderedEntries.find(VF); 4399 if (It == VFToOrderedEntries.end()) 4400 continue; 4401 // Try to find the most profitable order. We just are looking for the most 4402 // used order and reorder scalar elements in the nodes according to this 4403 // mostly used order. 4404 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 4405 // All operands are reordered and used only in this node - propagate the 4406 // most used order to the user node. 4407 MapVector<OrdersType, unsigned, 4408 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4409 OrdersUses; 4410 // Last chance orders - scatter vectorize. Try to use their orders if no 4411 // other orders or the order is counted already. 4412 SmallVector<OrdersType> StridedVectorizeOrders; 4413 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4414 for (const TreeEntry *OpTE : OrderedEntries) { 4415 // No need to reorder this nodes, still need to extend and to use shuffle, 4416 // just need to merge reordering shuffle and the reuse shuffle. 4417 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4418 continue; 4419 // Count number of orders uses. 4420 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders, 4421 &PhisToOrders]() -> const OrdersType & { 4422 if (OpTE->State == TreeEntry::NeedToGather || 4423 !OpTE->ReuseShuffleIndices.empty()) { 4424 auto It = GathersToOrders.find(OpTE); 4425 if (It != GathersToOrders.end()) 4426 return It->second; 4427 } 4428 if (OpTE->isAltShuffle()) { 4429 auto It = AltShufflesToOrders.find(OpTE); 4430 if (It != AltShufflesToOrders.end()) 4431 return It->second; 4432 } 4433 if (OpTE->State == TreeEntry::Vectorize && 4434 OpTE->getOpcode() == Instruction::PHI) { 4435 auto It = PhisToOrders.find(OpTE); 4436 if (It != PhisToOrders.end()) 4437 return It->second; 4438 } 4439 return OpTE->ReorderIndices; 4440 }(); 4441 // First consider the order of the external scalar users. 4442 auto It = ExternalUserReorderMap.find(OpTE); 4443 if (It != ExternalUserReorderMap.end()) { 4444 const auto &ExternalUserReorderIndices = It->second; 4445 // If the OpTE vector factor != number of scalars - use natural order, 4446 // it is an attempt to reorder node with reused scalars but with 4447 // external uses. 4448 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) { 4449 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 4450 ExternalUserReorderIndices.size(); 4451 } else { 4452 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 4453 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 4454 } 4455 // No other useful reorder data in this entry. 4456 if (Order.empty()) 4457 continue; 4458 } 4459 // Postpone scatter orders. 4460 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4461 StridedVectorizeOrders.push_back(Order); 4462 continue; 4463 } 4464 // Stores actually store the mask, not the order, need to invert. 4465 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4466 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4467 SmallVector<int> Mask; 4468 inversePermutation(Order, Mask); 4469 unsigned E = Order.size(); 4470 OrdersType CurrentOrder(E, E); 4471 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4472 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4473 }); 4474 fixupOrderingIndices(CurrentOrder); 4475 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 4476 } else { 4477 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4478 } 4479 } 4480 // Set order of the user node. 4481 if (OrdersUses.empty()) { 4482 if (StridedVectorizeOrders.empty()) 4483 continue; 4484 // Add (potentially!) strided vectorize orders. 4485 for (OrdersType &Order : StridedVectorizeOrders) 4486 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4487 } else { 4488 // Account (potentially!) strided vectorize orders only if it was used 4489 // already. 4490 for (OrdersType &Order : StridedVectorizeOrders) { 4491 auto *It = OrdersUses.find(Order); 4492 if (It != OrdersUses.end()) 4493 ++It->second; 4494 } 4495 } 4496 // Choose the most used order. 4497 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4498 unsigned Cnt = OrdersUses.front().second; 4499 for (const auto &Pair : drop_begin(OrdersUses)) { 4500 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4501 BestOrder = Pair.first; 4502 Cnt = Pair.second; 4503 } 4504 } 4505 // Set order of the user node. 4506 if (BestOrder.empty()) 4507 continue; 4508 SmallVector<int> Mask; 4509 inversePermutation(BestOrder, Mask); 4510 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4511 unsigned E = BestOrder.size(); 4512 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4513 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4514 }); 4515 // Do an actual reordering, if profitable. 4516 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4517 // Just do the reordering for the nodes with the given VF. 4518 if (TE->Scalars.size() != VF) { 4519 if (TE->ReuseShuffleIndices.size() == VF) { 4520 // Need to reorder the reuses masks of the operands with smaller VF to 4521 // be able to find the match between the graph nodes and scalar 4522 // operands of the given node during vectorization/cost estimation. 4523 assert(all_of(TE->UserTreeIndices, 4524 [VF, &TE](const EdgeInfo &EI) { 4525 return EI.UserTE->Scalars.size() == VF || 4526 EI.UserTE->Scalars.size() == 4527 TE->Scalars.size(); 4528 }) && 4529 "All users must be of VF size."); 4530 // Update ordering of the operands with the smaller VF than the given 4531 // one. 4532 reorderNodeWithReuses(*TE, Mask); 4533 } 4534 continue; 4535 } 4536 if ((TE->State == TreeEntry::Vectorize || 4537 TE->State == TreeEntry::PossibleStridedVectorize) && 4538 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 4539 InsertElementInst>(TE->getMainOp()) && 4540 !TE->isAltShuffle()) { 4541 // Build correct orders for extract{element,value}, loads and 4542 // stores. 4543 reorderOrder(TE->ReorderIndices, Mask); 4544 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 4545 TE->reorderOperands(Mask); 4546 } else { 4547 // Reorder the node and its operands. 4548 TE->reorderOperands(Mask); 4549 assert(TE->ReorderIndices.empty() && 4550 "Expected empty reorder sequence."); 4551 reorderScalars(TE->Scalars, Mask); 4552 } 4553 if (!TE->ReuseShuffleIndices.empty()) { 4554 // Apply reversed order to keep the original ordering of the reused 4555 // elements to avoid extra reorder indices shuffling. 4556 OrdersType CurrentOrder; 4557 reorderOrder(CurrentOrder, MaskOrder); 4558 SmallVector<int> NewReuses; 4559 inversePermutation(CurrentOrder, NewReuses); 4560 addMask(NewReuses, TE->ReuseShuffleIndices); 4561 TE->ReuseShuffleIndices.swap(NewReuses); 4562 } 4563 } 4564 } 4565 } 4566 4567 bool BoUpSLP::canReorderOperands( 4568 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 4569 ArrayRef<TreeEntry *> ReorderableGathers, 4570 SmallVectorImpl<TreeEntry *> &GatherOps) { 4571 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 4572 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 4573 return OpData.first == I && 4574 OpData.second->State == TreeEntry::Vectorize; 4575 })) 4576 continue; 4577 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 4578 // FIXME: Do not reorder (possible!) strided vectorized nodes, they 4579 // require reordering of the operands, which is not implemented yet. 4580 if (TE->State == TreeEntry::PossibleStridedVectorize) 4581 return false; 4582 // Do not reorder if operand node is used by many user nodes. 4583 if (any_of(TE->UserTreeIndices, 4584 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 4585 return false; 4586 // Add the node to the list of the ordered nodes with the identity 4587 // order. 4588 Edges.emplace_back(I, TE); 4589 // Add ScatterVectorize nodes to the list of operands, where just 4590 // reordering of the scalars is required. Similar to the gathers, so 4591 // simply add to the list of gathered ops. 4592 // If there are reused scalars, process this node as a regular vectorize 4593 // node, just reorder reuses mask. 4594 if (TE->State != TreeEntry::Vectorize && 4595 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) 4596 GatherOps.push_back(TE); 4597 continue; 4598 } 4599 TreeEntry *Gather = nullptr; 4600 if (count_if(ReorderableGathers, 4601 [&Gather, UserTE, I](TreeEntry *TE) { 4602 assert(TE->State != TreeEntry::Vectorize && 4603 "Only non-vectorized nodes are expected."); 4604 if (any_of(TE->UserTreeIndices, 4605 [UserTE, I](const EdgeInfo &EI) { 4606 return EI.UserTE == UserTE && EI.EdgeIdx == I; 4607 })) { 4608 assert(TE->isSame(UserTE->getOperand(I)) && 4609 "Operand entry does not match operands."); 4610 Gather = TE; 4611 return true; 4612 } 4613 return false; 4614 }) > 1 && 4615 !allConstant(UserTE->getOperand(I))) 4616 return false; 4617 if (Gather) 4618 GatherOps.push_back(Gather); 4619 } 4620 return true; 4621 } 4622 4623 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 4624 SetVector<TreeEntry *> OrderedEntries; 4625 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4626 // Find all reorderable leaf nodes with the given VF. 4627 // Currently the are vectorized loads,extracts without alternate operands + 4628 // some gathering of extracts. 4629 SmallVector<TreeEntry *> NonVectorized; 4630 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4631 if (TE->State != TreeEntry::Vectorize && 4632 TE->State != TreeEntry::PossibleStridedVectorize) 4633 NonVectorized.push_back(TE.get()); 4634 if (std::optional<OrdersType> CurrentOrder = 4635 getReorderingData(*TE, /*TopToBottom=*/false)) { 4636 OrderedEntries.insert(TE.get()); 4637 if (!(TE->State == TreeEntry::Vectorize || 4638 TE->State == TreeEntry::PossibleStridedVectorize) || 4639 !TE->ReuseShuffleIndices.empty()) 4640 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4641 } 4642 } 4643 4644 // 1. Propagate order to the graph nodes, which use only reordered nodes. 4645 // I.e., if the node has operands, that are reordered, try to make at least 4646 // one operand order in the natural order and reorder others + reorder the 4647 // user node itself. 4648 SmallPtrSet<const TreeEntry *, 4> Visited; 4649 while (!OrderedEntries.empty()) { 4650 // 1. Filter out only reordered nodes. 4651 // 2. If the entry has multiple uses - skip it and jump to the next node. 4652 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 4653 SmallVector<TreeEntry *> Filtered; 4654 for (TreeEntry *TE : OrderedEntries) { 4655 if (!(TE->State == TreeEntry::Vectorize || 4656 TE->State == TreeEntry::PossibleStridedVectorize || 4657 (TE->State == TreeEntry::NeedToGather && 4658 GathersToOrders.count(TE))) || 4659 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4660 !all_of(drop_begin(TE->UserTreeIndices), 4661 [TE](const EdgeInfo &EI) { 4662 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 4663 }) || 4664 !Visited.insert(TE).second) { 4665 Filtered.push_back(TE); 4666 continue; 4667 } 4668 // Build a map between user nodes and their operands order to speedup 4669 // search. The graph currently does not provide this dependency directly. 4670 for (EdgeInfo &EI : TE->UserTreeIndices) { 4671 TreeEntry *UserTE = EI.UserTE; 4672 auto It = Users.find(UserTE); 4673 if (It == Users.end()) 4674 It = Users.insert({UserTE, {}}).first; 4675 It->second.emplace_back(EI.EdgeIdx, TE); 4676 } 4677 } 4678 // Erase filtered entries. 4679 for (TreeEntry *TE : Filtered) 4680 OrderedEntries.remove(TE); 4681 SmallVector< 4682 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>> 4683 UsersVec(Users.begin(), Users.end()); 4684 sort(UsersVec, [](const auto &Data1, const auto &Data2) { 4685 return Data1.first->Idx > Data2.first->Idx; 4686 }); 4687 for (auto &Data : UsersVec) { 4688 // Check that operands are used only in the User node. 4689 SmallVector<TreeEntry *> GatherOps; 4690 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 4691 GatherOps)) { 4692 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4693 OrderedEntries.remove(Op.second); 4694 continue; 4695 } 4696 // All operands are reordered and used only in this node - propagate the 4697 // most used order to the user node. 4698 MapVector<OrdersType, unsigned, 4699 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4700 OrdersUses; 4701 // Last chance orders - scatter vectorize. Try to use their orders if no 4702 // other orders or the order is counted already. 4703 SmallVector<std::pair<OrdersType, unsigned>> StridedVectorizeOrders; 4704 // Do the analysis for each tree entry only once, otherwise the order of 4705 // the same node my be considered several times, though might be not 4706 // profitable. 4707 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4708 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 4709 for (const auto &Op : Data.second) { 4710 TreeEntry *OpTE = Op.second; 4711 if (!VisitedOps.insert(OpTE).second) 4712 continue; 4713 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4714 continue; 4715 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 4716 if (OpTE->State == TreeEntry::NeedToGather || 4717 !OpTE->ReuseShuffleIndices.empty()) 4718 return GathersToOrders.find(OpTE)->second; 4719 return OpTE->ReorderIndices; 4720 }(); 4721 unsigned NumOps = count_if( 4722 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 4723 return P.second == OpTE; 4724 }); 4725 // Postpone scatter orders. 4726 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4727 StridedVectorizeOrders.emplace_back(Order, NumOps); 4728 continue; 4729 } 4730 // Stores actually store the mask, not the order, need to invert. 4731 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4732 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4733 SmallVector<int> Mask; 4734 inversePermutation(Order, Mask); 4735 unsigned E = Order.size(); 4736 OrdersType CurrentOrder(E, E); 4737 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4738 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4739 }); 4740 fixupOrderingIndices(CurrentOrder); 4741 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 4742 NumOps; 4743 } else { 4744 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 4745 } 4746 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 4747 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 4748 const TreeEntry *TE) { 4749 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4750 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 4751 (IgnoreReorder && TE->Idx == 0)) 4752 return true; 4753 if (TE->State == TreeEntry::NeedToGather) { 4754 auto It = GathersToOrders.find(TE); 4755 if (It != GathersToOrders.end()) 4756 return !It->second.empty(); 4757 return true; 4758 } 4759 return false; 4760 }; 4761 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 4762 TreeEntry *UserTE = EI.UserTE; 4763 if (!VisitedUsers.insert(UserTE).second) 4764 continue; 4765 // May reorder user node if it requires reordering, has reused 4766 // scalars, is an alternate op vectorize node or its op nodes require 4767 // reordering. 4768 if (AllowsReordering(UserTE)) 4769 continue; 4770 // Check if users allow reordering. 4771 // Currently look up just 1 level of operands to avoid increase of 4772 // the compile time. 4773 // Profitable to reorder if definitely more operands allow 4774 // reordering rather than those with natural order. 4775 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 4776 if (static_cast<unsigned>(count_if( 4777 Ops, [UserTE, &AllowsReordering]( 4778 const std::pair<unsigned, TreeEntry *> &Op) { 4779 return AllowsReordering(Op.second) && 4780 all_of(Op.second->UserTreeIndices, 4781 [UserTE](const EdgeInfo &EI) { 4782 return EI.UserTE == UserTE; 4783 }); 4784 })) <= Ops.size() / 2) 4785 ++Res.first->second; 4786 } 4787 } 4788 // If no orders - skip current nodes and jump to the next one, if any. 4789 if (OrdersUses.empty()) { 4790 if (StridedVectorizeOrders.empty() || 4791 (Data.first->ReorderIndices.empty() && 4792 Data.first->ReuseShuffleIndices.empty() && 4793 !(IgnoreReorder && 4794 Data.first == VectorizableTree.front().get()))) { 4795 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4796 OrderedEntries.remove(Op.second); 4797 continue; 4798 } 4799 // Add (potentially!) strided vectorize orders. 4800 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) 4801 OrdersUses.insert(std::make_pair(Pair.first, 0)).first->second += 4802 Pair.second; 4803 } else { 4804 // Account (potentially!) strided vectorize orders only if it was used 4805 // already. 4806 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) { 4807 auto *It = OrdersUses.find(Pair.first); 4808 if (It != OrdersUses.end()) 4809 It->second += Pair.second; 4810 } 4811 } 4812 // Choose the best order. 4813 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4814 unsigned Cnt = OrdersUses.front().second; 4815 for (const auto &Pair : drop_begin(OrdersUses)) { 4816 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4817 BestOrder = Pair.first; 4818 Cnt = Pair.second; 4819 } 4820 } 4821 // Set order of the user node (reordering of operands and user nodes). 4822 if (BestOrder.empty()) { 4823 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4824 OrderedEntries.remove(Op.second); 4825 continue; 4826 } 4827 // Erase operands from OrderedEntries list and adjust their orders. 4828 VisitedOps.clear(); 4829 SmallVector<int> Mask; 4830 inversePermutation(BestOrder, Mask); 4831 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4832 unsigned E = BestOrder.size(); 4833 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4834 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4835 }); 4836 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4837 TreeEntry *TE = Op.second; 4838 OrderedEntries.remove(TE); 4839 if (!VisitedOps.insert(TE).second) 4840 continue; 4841 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4842 reorderNodeWithReuses(*TE, Mask); 4843 continue; 4844 } 4845 // Gathers are processed separately. 4846 if (TE->State != TreeEntry::Vectorize && 4847 TE->State != TreeEntry::PossibleStridedVectorize && 4848 (TE->State != TreeEntry::ScatterVectorize || 4849 TE->ReorderIndices.empty())) 4850 continue; 4851 assert((BestOrder.size() == TE->ReorderIndices.size() || 4852 TE->ReorderIndices.empty()) && 4853 "Non-matching sizes of user/operand entries."); 4854 reorderOrder(TE->ReorderIndices, Mask); 4855 if (IgnoreReorder && TE == VectorizableTree.front().get()) 4856 IgnoreReorder = false; 4857 } 4858 // For gathers just need to reorder its scalars. 4859 for (TreeEntry *Gather : GatherOps) { 4860 assert(Gather->ReorderIndices.empty() && 4861 "Unexpected reordering of gathers."); 4862 if (!Gather->ReuseShuffleIndices.empty()) { 4863 // Just reorder reuses indices. 4864 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4865 continue; 4866 } 4867 reorderScalars(Gather->Scalars, Mask); 4868 OrderedEntries.remove(Gather); 4869 } 4870 // Reorder operands of the user node and set the ordering for the user 4871 // node itself. 4872 if (Data.first->State != TreeEntry::Vectorize || 4873 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4874 Data.first->getMainOp()) || 4875 Data.first->isAltShuffle()) 4876 Data.first->reorderOperands(Mask); 4877 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4878 Data.first->isAltShuffle() || 4879 Data.first->State == TreeEntry::PossibleStridedVectorize) { 4880 reorderScalars(Data.first->Scalars, Mask); 4881 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4882 if (Data.first->ReuseShuffleIndices.empty() && 4883 !Data.first->ReorderIndices.empty() && 4884 !Data.first->isAltShuffle()) { 4885 // Insert user node to the list to try to sink reordering deeper in 4886 // the graph. 4887 OrderedEntries.insert(Data.first); 4888 } 4889 } else { 4890 reorderOrder(Data.first->ReorderIndices, Mask); 4891 } 4892 } 4893 } 4894 // If the reordering is unnecessary, just remove the reorder. 4895 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4896 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4897 VectorizableTree.front()->ReorderIndices.clear(); 4898 } 4899 4900 void BoUpSLP::buildExternalUses( 4901 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4902 // Collect the values that we need to extract from the tree. 4903 for (auto &TEPtr : VectorizableTree) { 4904 TreeEntry *Entry = TEPtr.get(); 4905 4906 // No need to handle users of gathered values. 4907 if (Entry->State == TreeEntry::NeedToGather) 4908 continue; 4909 4910 // For each lane: 4911 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4912 Value *Scalar = Entry->Scalars[Lane]; 4913 if (!isa<Instruction>(Scalar)) 4914 continue; 4915 int FoundLane = Entry->findLaneForValue(Scalar); 4916 4917 // Check if the scalar is externally used as an extra arg. 4918 const auto *ExtI = ExternallyUsedValues.find(Scalar); 4919 if (ExtI != ExternallyUsedValues.end()) { 4920 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4921 << Lane << " from " << *Scalar << ".\n"); 4922 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4923 } 4924 for (User *U : Scalar->users()) { 4925 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4926 4927 Instruction *UserInst = dyn_cast<Instruction>(U); 4928 if (!UserInst || isDeleted(UserInst)) 4929 continue; 4930 4931 // Ignore users in the user ignore list. 4932 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4933 continue; 4934 4935 // Skip in-tree scalars that become vectors 4936 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4937 // Some in-tree scalars will remain as scalar in vectorized 4938 // instructions. If that is the case, the one in FoundLane will 4939 // be used. 4940 if (UseEntry->State == TreeEntry::ScatterVectorize || 4941 UseEntry->State == TreeEntry::PossibleStridedVectorize || 4942 !doesInTreeUserNeedToExtract( 4943 Scalar, cast<Instruction>(UseEntry->Scalars.front()), TLI)) { 4944 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4945 << ".\n"); 4946 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4947 continue; 4948 } 4949 U = nullptr; 4950 } 4951 4952 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *UserInst 4953 << " from lane " << Lane << " from " << *Scalar 4954 << ".\n"); 4955 ExternalUses.emplace_back(Scalar, U, FoundLane); 4956 } 4957 } 4958 } 4959 } 4960 4961 DenseMap<Value *, SmallVector<StoreInst *>> 4962 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4963 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap; 4964 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4965 Value *V = TE->Scalars[Lane]; 4966 // To save compilation time we don't visit if we have too many users. 4967 static constexpr unsigned UsersLimit = 4; 4968 if (V->hasNUsesOrMore(UsersLimit)) 4969 break; 4970 4971 // Collect stores per pointer object. 4972 for (User *U : V->users()) { 4973 auto *SI = dyn_cast<StoreInst>(U); 4974 if (SI == nullptr || !SI->isSimple() || 4975 !isValidElementType(SI->getValueOperand()->getType())) 4976 continue; 4977 // Skip entry if already 4978 if (getTreeEntry(U)) 4979 continue; 4980 4981 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 4982 auto &StoresVec = PtrToStoresMap[Ptr]; 4983 // For now just keep one store per pointer object per lane. 4984 // TODO: Extend this to support multiple stores per pointer per lane 4985 if (StoresVec.size() > Lane) 4986 continue; 4987 // Skip if in different BBs. 4988 if (!StoresVec.empty() && 4989 SI->getParent() != StoresVec.back()->getParent()) 4990 continue; 4991 // Make sure that the stores are of the same type. 4992 if (!StoresVec.empty() && 4993 SI->getValueOperand()->getType() != 4994 StoresVec.back()->getValueOperand()->getType()) 4995 continue; 4996 StoresVec.push_back(SI); 4997 } 4998 } 4999 return PtrToStoresMap; 5000 } 5001 5002 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec, 5003 OrdersType &ReorderIndices) const { 5004 // We check whether the stores in StoreVec can form a vector by sorting them 5005 // and checking whether they are consecutive. 5006 5007 // To avoid calling getPointersDiff() while sorting we create a vector of 5008 // pairs {store, offset from first} and sort this instead. 5009 SmallVector<std::pair<StoreInst *, int>> StoreOffsetVec(StoresVec.size()); 5010 StoreInst *S0 = StoresVec[0]; 5011 StoreOffsetVec[0] = {S0, 0}; 5012 Type *S0Ty = S0->getValueOperand()->getType(); 5013 Value *S0Ptr = S0->getPointerOperand(); 5014 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 5015 StoreInst *SI = StoresVec[Idx]; 5016 std::optional<int> Diff = 5017 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 5018 SI->getPointerOperand(), *DL, *SE, 5019 /*StrictCheck=*/true); 5020 // We failed to compare the pointers so just abandon this StoresVec. 5021 if (!Diff) 5022 return false; 5023 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 5024 } 5025 5026 // Sort the vector based on the pointers. We create a copy because we may 5027 // need the original later for calculating the reorder (shuffle) indices. 5028 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 5029 const std::pair<StoreInst *, int> &Pair2) { 5030 int Offset1 = Pair1.second; 5031 int Offset2 = Pair2.second; 5032 return Offset1 < Offset2; 5033 }); 5034 5035 // Check if the stores are consecutive by checking if their difference is 1. 5036 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 5037 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx - 1].second + 1) 5038 return false; 5039 5040 // Calculate the shuffle indices according to their offset against the sorted 5041 // StoreOffsetVec. 5042 ReorderIndices.reserve(StoresVec.size()); 5043 for (StoreInst *SI : StoresVec) { 5044 unsigned Idx = find_if(StoreOffsetVec, 5045 [SI](const std::pair<StoreInst *, int> &Pair) { 5046 return Pair.first == SI; 5047 }) - 5048 StoreOffsetVec.begin(); 5049 ReorderIndices.push_back(Idx); 5050 } 5051 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 5052 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 5053 // same convention here. 5054 auto IsIdentityOrder = [](const OrdersType &Order) { 5055 for (unsigned Idx : seq<unsigned>(0, Order.size())) 5056 if (Idx != Order[Idx]) 5057 return false; 5058 return true; 5059 }; 5060 if (IsIdentityOrder(ReorderIndices)) 5061 ReorderIndices.clear(); 5062 5063 return true; 5064 } 5065 5066 #ifndef NDEBUG 5067 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 5068 for (unsigned Idx : Order) 5069 dbgs() << Idx << ", "; 5070 dbgs() << "\n"; 5071 } 5072 #endif 5073 5074 SmallVector<BoUpSLP::OrdersType, 1> 5075 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 5076 unsigned NumLanes = TE->Scalars.size(); 5077 5078 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap = 5079 collectUserStores(TE); 5080 5081 // Holds the reorder indices for each candidate store vector that is a user of 5082 // the current TreeEntry. 5083 SmallVector<OrdersType, 1> ExternalReorderIndices; 5084 5085 // Now inspect the stores collected per pointer and look for vectorization 5086 // candidates. For each candidate calculate the reorder index vector and push 5087 // it into `ExternalReorderIndices` 5088 for (const auto &Pair : PtrToStoresMap) { 5089 auto &StoresVec = Pair.second; 5090 // If we have fewer than NumLanes stores, then we can't form a vector. 5091 if (StoresVec.size() != NumLanes) 5092 continue; 5093 5094 // If the stores are not consecutive then abandon this StoresVec. 5095 OrdersType ReorderIndices; 5096 if (!canFormVector(StoresVec, ReorderIndices)) 5097 continue; 5098 5099 // We now know that the scalars in StoresVec can form a vector instruction, 5100 // so set the reorder indices. 5101 ExternalReorderIndices.push_back(ReorderIndices); 5102 } 5103 return ExternalReorderIndices; 5104 } 5105 5106 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 5107 const SmallDenseSet<Value *> &UserIgnoreLst) { 5108 deleteTree(); 5109 UserIgnoreList = &UserIgnoreLst; 5110 if (!allSameType(Roots)) 5111 return; 5112 buildTree_rec(Roots, 0, EdgeInfo()); 5113 } 5114 5115 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 5116 deleteTree(); 5117 if (!allSameType(Roots)) 5118 return; 5119 buildTree_rec(Roots, 0, EdgeInfo()); 5120 } 5121 5122 /// \return true if the specified list of values has only one instruction that 5123 /// requires scheduling, false otherwise. 5124 #ifndef NDEBUG 5125 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 5126 Value *NeedsScheduling = nullptr; 5127 for (Value *V : VL) { 5128 if (doesNotNeedToBeScheduled(V)) 5129 continue; 5130 if (!NeedsScheduling) { 5131 NeedsScheduling = V; 5132 continue; 5133 } 5134 return false; 5135 } 5136 return NeedsScheduling; 5137 } 5138 #endif 5139 5140 /// Generates key/subkey pair for the given value to provide effective sorting 5141 /// of the values and better detection of the vectorizable values sequences. The 5142 /// keys/subkeys can be used for better sorting of the values themselves (keys) 5143 /// and in values subgroups (subkeys). 5144 static std::pair<size_t, size_t> generateKeySubkey( 5145 Value *V, const TargetLibraryInfo *TLI, 5146 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 5147 bool AllowAlternate) { 5148 hash_code Key = hash_value(V->getValueID() + 2); 5149 hash_code SubKey = hash_value(0); 5150 // Sort the loads by the distance between the pointers. 5151 if (auto *LI = dyn_cast<LoadInst>(V)) { 5152 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key); 5153 if (LI->isSimple()) 5154 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 5155 else 5156 Key = SubKey = hash_value(LI); 5157 } else if (isVectorLikeInstWithConstOps(V)) { 5158 // Sort extracts by the vector operands. 5159 if (isa<ExtractElementInst, UndefValue>(V)) 5160 Key = hash_value(Value::UndefValueVal + 1); 5161 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 5162 if (!isUndefVector(EI->getVectorOperand()).all() && 5163 !isa<UndefValue>(EI->getIndexOperand())) 5164 SubKey = hash_value(EI->getVectorOperand()); 5165 } 5166 } else if (auto *I = dyn_cast<Instruction>(V)) { 5167 // Sort other instructions just by the opcodes except for CMPInst. 5168 // For CMP also sort by the predicate kind. 5169 if ((isa<BinaryOperator, CastInst>(I)) && 5170 isValidForAlternation(I->getOpcode())) { 5171 if (AllowAlternate) 5172 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 5173 else 5174 Key = hash_combine(hash_value(I->getOpcode()), Key); 5175 SubKey = hash_combine( 5176 hash_value(I->getOpcode()), hash_value(I->getType()), 5177 hash_value(isa<BinaryOperator>(I) 5178 ? I->getType() 5179 : cast<CastInst>(I)->getOperand(0)->getType())); 5180 // For casts, look through the only operand to improve compile time. 5181 if (isa<CastInst>(I)) { 5182 std::pair<size_t, size_t> OpVals = 5183 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator, 5184 /*AllowAlternate=*/true); 5185 Key = hash_combine(OpVals.first, Key); 5186 SubKey = hash_combine(OpVals.first, SubKey); 5187 } 5188 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 5189 CmpInst::Predicate Pred = CI->getPredicate(); 5190 if (CI->isCommutative()) 5191 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 5192 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 5193 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 5194 hash_value(SwapPred), 5195 hash_value(CI->getOperand(0)->getType())); 5196 } else if (auto *Call = dyn_cast<CallInst>(I)) { 5197 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 5198 if (isTriviallyVectorizable(ID)) { 5199 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 5200 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) { 5201 SubKey = hash_combine(hash_value(I->getOpcode()), 5202 hash_value(Call->getCalledFunction())); 5203 } else { 5204 Key = hash_combine(hash_value(Call), Key); 5205 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 5206 } 5207 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 5208 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 5209 hash_value(Op.Tag), SubKey); 5210 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 5211 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 5212 SubKey = hash_value(Gep->getPointerOperand()); 5213 else 5214 SubKey = hash_value(Gep); 5215 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 5216 !isa<ConstantInt>(I->getOperand(1))) { 5217 // Do not try to vectorize instructions with potentially high cost. 5218 SubKey = hash_value(I); 5219 } else { 5220 SubKey = hash_value(I->getOpcode()); 5221 } 5222 Key = hash_combine(hash_value(I->getParent()), Key); 5223 } 5224 return std::make_pair(Key, SubKey); 5225 } 5226 5227 /// Checks if the specified instruction \p I is an alternate operation for 5228 /// the given \p MainOp and \p AltOp instructions. 5229 static bool isAlternateInstruction(const Instruction *I, 5230 const Instruction *MainOp, 5231 const Instruction *AltOp, 5232 const TargetLibraryInfo &TLI); 5233 5234 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState( 5235 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 5236 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const { 5237 assert(S.MainOp && "Expected instructions with same/alternate opcodes only."); 5238 5239 unsigned ShuffleOrOp = 5240 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode(); 5241 auto *VL0 = cast<Instruction>(S.OpValue); 5242 switch (ShuffleOrOp) { 5243 case Instruction::PHI: { 5244 // Check for terminator values (e.g. invoke). 5245 for (Value *V : VL) 5246 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 5247 Instruction *Term = dyn_cast<Instruction>(Incoming); 5248 if (Term && Term->isTerminator()) { 5249 LLVM_DEBUG(dbgs() 5250 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 5251 return TreeEntry::NeedToGather; 5252 } 5253 } 5254 5255 return TreeEntry::Vectorize; 5256 } 5257 case Instruction::ExtractValue: 5258 case Instruction::ExtractElement: { 5259 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 5260 if (Reuse || !CurrentOrder.empty()) 5261 return TreeEntry::Vectorize; 5262 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 5263 return TreeEntry::NeedToGather; 5264 } 5265 case Instruction::InsertElement: { 5266 // Check that we have a buildvector and not a shuffle of 2 or more 5267 // different vectors. 5268 ValueSet SourceVectors; 5269 for (Value *V : VL) { 5270 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 5271 assert(getInsertIndex(V) != std::nullopt && 5272 "Non-constant or undef index?"); 5273 } 5274 5275 if (count_if(VL, [&SourceVectors](Value *V) { 5276 return !SourceVectors.contains(V); 5277 }) >= 2) { 5278 // Found 2nd source vector - cancel. 5279 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 5280 "different source vectors.\n"); 5281 return TreeEntry::NeedToGather; 5282 } 5283 5284 return TreeEntry::Vectorize; 5285 } 5286 case Instruction::Load: { 5287 // Check that a vectorized load would load the same memory as a scalar 5288 // load. For example, we don't want to vectorize loads that are smaller 5289 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5290 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5291 // from such a struct, we read/write packed bits disagreeing with the 5292 // unvectorized version. 5293 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder, 5294 PointerOps)) { 5295 case LoadsState::Vectorize: 5296 return TreeEntry::Vectorize; 5297 case LoadsState::ScatterVectorize: 5298 return TreeEntry::ScatterVectorize; 5299 case LoadsState::PossibleStridedVectorize: 5300 return TreeEntry::PossibleStridedVectorize; 5301 case LoadsState::Gather: 5302 #ifndef NDEBUG 5303 Type *ScalarTy = VL0->getType(); 5304 if (DL->getTypeSizeInBits(ScalarTy) != 5305 DL->getTypeAllocSizeInBits(ScalarTy)) 5306 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 5307 else if (any_of(VL, 5308 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); })) 5309 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 5310 else 5311 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 5312 #endif // NDEBUG 5313 return TreeEntry::NeedToGather; 5314 } 5315 llvm_unreachable("Unexpected state of loads"); 5316 } 5317 case Instruction::ZExt: 5318 case Instruction::SExt: 5319 case Instruction::FPToUI: 5320 case Instruction::FPToSI: 5321 case Instruction::FPExt: 5322 case Instruction::PtrToInt: 5323 case Instruction::IntToPtr: 5324 case Instruction::SIToFP: 5325 case Instruction::UIToFP: 5326 case Instruction::Trunc: 5327 case Instruction::FPTrunc: 5328 case Instruction::BitCast: { 5329 Type *SrcTy = VL0->getOperand(0)->getType(); 5330 for (Value *V : VL) { 5331 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 5332 if (Ty != SrcTy || !isValidElementType(Ty)) { 5333 LLVM_DEBUG( 5334 dbgs() << "SLP: Gathering casts with different src types.\n"); 5335 return TreeEntry::NeedToGather; 5336 } 5337 } 5338 return TreeEntry::Vectorize; 5339 } 5340 case Instruction::ICmp: 5341 case Instruction::FCmp: { 5342 // Check that all of the compares have the same predicate. 5343 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5344 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 5345 Type *ComparedTy = VL0->getOperand(0)->getType(); 5346 for (Value *V : VL) { 5347 CmpInst *Cmp = cast<CmpInst>(V); 5348 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 5349 Cmp->getOperand(0)->getType() != ComparedTy) { 5350 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 5351 return TreeEntry::NeedToGather; 5352 } 5353 } 5354 return TreeEntry::Vectorize; 5355 } 5356 case Instruction::Select: 5357 case Instruction::FNeg: 5358 case Instruction::Add: 5359 case Instruction::FAdd: 5360 case Instruction::Sub: 5361 case Instruction::FSub: 5362 case Instruction::Mul: 5363 case Instruction::FMul: 5364 case Instruction::UDiv: 5365 case Instruction::SDiv: 5366 case Instruction::FDiv: 5367 case Instruction::URem: 5368 case Instruction::SRem: 5369 case Instruction::FRem: 5370 case Instruction::Shl: 5371 case Instruction::LShr: 5372 case Instruction::AShr: 5373 case Instruction::And: 5374 case Instruction::Or: 5375 case Instruction::Xor: 5376 return TreeEntry::Vectorize; 5377 case Instruction::GetElementPtr: { 5378 // We don't combine GEPs with complicated (nested) indexing. 5379 for (Value *V : VL) { 5380 auto *I = dyn_cast<GetElementPtrInst>(V); 5381 if (!I) 5382 continue; 5383 if (I->getNumOperands() != 2) { 5384 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5385 return TreeEntry::NeedToGather; 5386 } 5387 } 5388 5389 // We can't combine several GEPs into one vector if they operate on 5390 // different types. 5391 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5392 for (Value *V : VL) { 5393 auto *GEP = dyn_cast<GEPOperator>(V); 5394 if (!GEP) 5395 continue; 5396 Type *CurTy = GEP->getSourceElementType(); 5397 if (Ty0 != CurTy) { 5398 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 5399 return TreeEntry::NeedToGather; 5400 } 5401 } 5402 5403 // We don't combine GEPs with non-constant indexes. 5404 Type *Ty1 = VL0->getOperand(1)->getType(); 5405 for (Value *V : VL) { 5406 auto *I = dyn_cast<GetElementPtrInst>(V); 5407 if (!I) 5408 continue; 5409 auto *Op = I->getOperand(1); 5410 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5411 (Op->getType() != Ty1 && 5412 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5413 Op->getType()->getScalarSizeInBits() > 5414 DL->getIndexSizeInBits( 5415 V->getType()->getPointerAddressSpace())))) { 5416 LLVM_DEBUG( 5417 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5418 return TreeEntry::NeedToGather; 5419 } 5420 } 5421 5422 return TreeEntry::Vectorize; 5423 } 5424 case Instruction::Store: { 5425 // Check if the stores are consecutive or if we need to swizzle them. 5426 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5427 // Avoid types that are padded when being allocated as scalars, while 5428 // being packed together in a vector (such as i1). 5429 if (DL->getTypeSizeInBits(ScalarTy) != 5430 DL->getTypeAllocSizeInBits(ScalarTy)) { 5431 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5432 return TreeEntry::NeedToGather; 5433 } 5434 // Make sure all stores in the bundle are simple - we can't vectorize 5435 // atomic or volatile stores. 5436 for (Value *V : VL) { 5437 auto *SI = cast<StoreInst>(V); 5438 if (!SI->isSimple()) { 5439 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5440 return TreeEntry::NeedToGather; 5441 } 5442 PointerOps.push_back(SI->getPointerOperand()); 5443 } 5444 5445 // Check the order of pointer operands. 5446 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5447 Value *Ptr0; 5448 Value *PtrN; 5449 if (CurrentOrder.empty()) { 5450 Ptr0 = PointerOps.front(); 5451 PtrN = PointerOps.back(); 5452 } else { 5453 Ptr0 = PointerOps[CurrentOrder.front()]; 5454 PtrN = PointerOps[CurrentOrder.back()]; 5455 } 5456 std::optional<int> Dist = 5457 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5458 // Check that the sorted pointer operands are consecutive. 5459 if (static_cast<unsigned>(*Dist) == VL.size() - 1) 5460 return TreeEntry::Vectorize; 5461 } 5462 5463 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5464 return TreeEntry::NeedToGather; 5465 } 5466 case Instruction::Call: { 5467 // Check if the calls are all to the same vectorizable intrinsic or 5468 // library function. 5469 CallInst *CI = cast<CallInst>(VL0); 5470 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5471 5472 VFShape Shape = VFShape::get( 5473 CI->getFunctionType(), 5474 ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5475 false /*HasGlobalPred*/); 5476 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5477 5478 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5479 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5480 return TreeEntry::NeedToGather; 5481 } 5482 Function *F = CI->getCalledFunction(); 5483 unsigned NumArgs = CI->arg_size(); 5484 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr); 5485 for (unsigned J = 0; J != NumArgs; ++J) 5486 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) 5487 ScalarArgs[J] = CI->getArgOperand(J); 5488 for (Value *V : VL) { 5489 CallInst *CI2 = dyn_cast<CallInst>(V); 5490 if (!CI2 || CI2->getCalledFunction() != F || 5491 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5492 (VecFunc && 5493 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5494 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5495 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5496 << "\n"); 5497 return TreeEntry::NeedToGather; 5498 } 5499 // Some intrinsics have scalar arguments and should be same in order for 5500 // them to be vectorized. 5501 for (unsigned J = 0; J != NumArgs; ++J) { 5502 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) { 5503 Value *A1J = CI2->getArgOperand(J); 5504 if (ScalarArgs[J] != A1J) { 5505 LLVM_DEBUG(dbgs() 5506 << "SLP: mismatched arguments in call:" << *CI 5507 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n"); 5508 return TreeEntry::NeedToGather; 5509 } 5510 } 5511 } 5512 // Verify that the bundle operands are identical between the two calls. 5513 if (CI->hasOperandBundles() && 5514 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5515 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5516 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5517 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI 5518 << "!=" << *V << '\n'); 5519 return TreeEntry::NeedToGather; 5520 } 5521 } 5522 5523 return TreeEntry::Vectorize; 5524 } 5525 case Instruction::ShuffleVector: { 5526 // If this is not an alternate sequence of opcode like add-sub 5527 // then do not vectorize this instruction. 5528 if (!S.isAltShuffle()) { 5529 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5530 return TreeEntry::NeedToGather; 5531 } 5532 return TreeEntry::Vectorize; 5533 } 5534 default: 5535 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5536 return TreeEntry::NeedToGather; 5537 } 5538 } 5539 5540 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 5541 const EdgeInfo &UserTreeIdx) { 5542 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 5543 5544 SmallVector<int> ReuseShuffleIndicies; 5545 SmallVector<Value *> UniqueValues; 5546 SmallVector<Value *> NonUniqueValueVL; 5547 auto TryToFindDuplicates = [&](const InstructionsState &S, 5548 bool DoNotFail = false) { 5549 // Check that every instruction appears once in this bundle. 5550 DenseMap<Value *, unsigned> UniquePositions(VL.size()); 5551 for (Value *V : VL) { 5552 if (isConstant(V)) { 5553 ReuseShuffleIndicies.emplace_back( 5554 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size()); 5555 UniqueValues.emplace_back(V); 5556 continue; 5557 } 5558 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5559 ReuseShuffleIndicies.emplace_back(Res.first->second); 5560 if (Res.second) 5561 UniqueValues.emplace_back(V); 5562 } 5563 size_t NumUniqueScalarValues = UniqueValues.size(); 5564 if (NumUniqueScalarValues == VL.size()) { 5565 ReuseShuffleIndicies.clear(); 5566 } else { 5567 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 5568 if (NumUniqueScalarValues <= 1 || 5569 (UniquePositions.size() == 1 && all_of(UniqueValues, 5570 [](Value *V) { 5571 return isa<UndefValue>(V) || 5572 !isConstant(V); 5573 })) || 5574 !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) { 5575 if (DoNotFail && UniquePositions.size() > 1 && 5576 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() && 5577 all_of(UniqueValues, [=](Value *V) { 5578 return isa<ExtractElementInst>(V) || 5579 areAllUsersVectorized(cast<Instruction>(V), 5580 UserIgnoreList); 5581 })) { 5582 unsigned PWSz = PowerOf2Ceil(UniqueValues.size()); 5583 if (PWSz == VL.size()) { 5584 ReuseShuffleIndicies.clear(); 5585 } else { 5586 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end()); 5587 NonUniqueValueVL.append(PWSz - UniqueValues.size(), 5588 UniqueValues.back()); 5589 VL = NonUniqueValueVL; 5590 } 5591 return true; 5592 } 5593 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 5594 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5595 return false; 5596 } 5597 VL = UniqueValues; 5598 } 5599 return true; 5600 }; 5601 5602 InstructionsState S = getSameOpcode(VL, *TLI); 5603 5604 // Don't vectorize ephemeral values. 5605 if (!EphValues.empty()) { 5606 for (Value *V : VL) { 5607 if (EphValues.count(V)) { 5608 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5609 << ") is ephemeral.\n"); 5610 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5611 return; 5612 } 5613 } 5614 } 5615 5616 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of 5617 // a load), in which case peek through to include it in the tree, without 5618 // ballooning over-budget. 5619 if (Depth >= RecursionMaxDepth && 5620 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp && 5621 VL.size() >= 4 && 5622 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) { 5623 return match(I, 5624 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) && 5625 cast<Instruction>(I)->getOpcode() == 5626 cast<Instruction>(S.MainOp)->getOpcode(); 5627 })))) { 5628 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 5629 if (TryToFindDuplicates(S)) 5630 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5631 ReuseShuffleIndicies); 5632 return; 5633 } 5634 5635 // Don't handle scalable vectors 5636 if (S.getOpcode() == Instruction::ExtractElement && 5637 isa<ScalableVectorType>( 5638 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 5639 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 5640 if (TryToFindDuplicates(S)) 5641 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5642 ReuseShuffleIndicies); 5643 return; 5644 } 5645 5646 // Don't handle vectors. 5647 if (S.OpValue->getType()->isVectorTy() && 5648 !isa<InsertElementInst>(S.OpValue)) { 5649 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 5650 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5651 return; 5652 } 5653 5654 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 5655 if (SI->getValueOperand()->getType()->isVectorTy()) { 5656 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 5657 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5658 return; 5659 } 5660 5661 // If all of the operands are identical or constant we have a simple solution. 5662 // If we deal with insert/extract instructions, they all must have constant 5663 // indices, otherwise we should gather them, not try to vectorize. 5664 // If alternate op node with 2 elements with gathered operands - do not 5665 // vectorize. 5666 auto &&NotProfitableForVectorization = [&S, this, 5667 Depth](ArrayRef<Value *> VL) { 5668 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 5669 return false; 5670 if (VectorizableTree.size() < MinTreeSize) 5671 return false; 5672 if (Depth >= RecursionMaxDepth - 1) 5673 return true; 5674 // Check if all operands are extracts, part of vector node or can build a 5675 // regular vectorize node. 5676 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 5677 for (Value *V : VL) { 5678 auto *I = cast<Instruction>(V); 5679 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 5680 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 5681 })); 5682 } 5683 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 5684 if ((IsCommutative && 5685 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 5686 (!IsCommutative && 5687 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 5688 return true; 5689 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 5690 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 5691 auto *I1 = cast<Instruction>(VL.front()); 5692 auto *I2 = cast<Instruction>(VL.back()); 5693 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5694 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5695 I2->getOperand(Op)); 5696 if (static_cast<unsigned>(count_if( 5697 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5698 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5699 })) >= S.MainOp->getNumOperands() / 2) 5700 return false; 5701 if (S.MainOp->getNumOperands() > 2) 5702 return true; 5703 if (IsCommutative) { 5704 // Check permuted operands. 5705 Candidates.clear(); 5706 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5707 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5708 I2->getOperand((Op + 1) % E)); 5709 if (any_of( 5710 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5711 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5712 })) 5713 return false; 5714 } 5715 return true; 5716 }; 5717 SmallVector<unsigned> SortedIndices; 5718 BasicBlock *BB = nullptr; 5719 bool IsScatterVectorizeUserTE = 5720 UserTreeIdx.UserTE && 5721 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5722 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize); 5723 bool AreAllSameInsts = 5724 (S.getOpcode() && allSameBlock(VL)) || 5725 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE && 5726 VL.size() > 2 && 5727 all_of(VL, 5728 [&BB](Value *V) { 5729 auto *I = dyn_cast<GetElementPtrInst>(V); 5730 if (!I) 5731 return doesNotNeedToBeScheduled(V); 5732 if (!BB) 5733 BB = I->getParent(); 5734 return BB == I->getParent() && I->getNumOperands() == 2; 5735 }) && 5736 BB && 5737 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE, 5738 SortedIndices)); 5739 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) || 5740 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>( 5741 S.OpValue) && 5742 !all_of(VL, isVectorLikeInstWithConstOps)) || 5743 NotProfitableForVectorization(VL)) { 5744 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 5745 if (TryToFindDuplicates(S)) 5746 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5747 ReuseShuffleIndicies); 5748 return; 5749 } 5750 5751 // We now know that this is a vector of instructions of the same type from 5752 // the same block. 5753 5754 // Check if this is a duplicate of another entry. 5755 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 5756 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 5757 if (!E->isSame(VL)) { 5758 auto It = MultiNodeScalars.find(S.OpValue); 5759 if (It != MultiNodeScalars.end()) { 5760 auto *TEIt = find_if(It->getSecond(), 5761 [&](TreeEntry *ME) { return ME->isSame(VL); }); 5762 if (TEIt != It->getSecond().end()) 5763 E = *TEIt; 5764 else 5765 E = nullptr; 5766 } else { 5767 E = nullptr; 5768 } 5769 } 5770 if (!E) { 5771 if (!doesNotNeedToBeScheduled(S.OpValue)) { 5772 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 5773 if (TryToFindDuplicates(S)) 5774 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5775 ReuseShuffleIndicies); 5776 return; 5777 } 5778 } else { 5779 // Record the reuse of the tree node. FIXME, currently this is only used 5780 // to properly draw the graph rather than for the actual vectorization. 5781 E->UserTreeIndices.push_back(UserTreeIdx); 5782 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 5783 << ".\n"); 5784 return; 5785 } 5786 } 5787 5788 // Check that none of the instructions in the bundle are already in the tree. 5789 for (Value *V : VL) { 5790 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) || 5791 doesNotNeedToBeScheduled(V)) 5792 continue; 5793 if (getTreeEntry(V)) { 5794 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5795 << ") is already in tree.\n"); 5796 if (TryToFindDuplicates(S)) 5797 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5798 ReuseShuffleIndicies); 5799 return; 5800 } 5801 } 5802 5803 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 5804 if (UserIgnoreList && !UserIgnoreList->empty()) { 5805 for (Value *V : VL) { 5806 if (UserIgnoreList && UserIgnoreList->contains(V)) { 5807 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 5808 if (TryToFindDuplicates(S)) 5809 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5810 ReuseShuffleIndicies); 5811 return; 5812 } 5813 } 5814 } 5815 5816 // Special processing for sorted pointers for ScatterVectorize node with 5817 // constant indeces only. 5818 if (AreAllSameInsts && UserTreeIdx.UserTE && 5819 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5820 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize) && 5821 !(S.getOpcode() && allSameBlock(VL))) { 5822 assert(S.OpValue->getType()->isPointerTy() && 5823 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 5824 2 && 5825 "Expected pointers only."); 5826 // Reset S to make it GetElementPtr kind of node. 5827 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 5828 assert(It != VL.end() && "Expected at least one GEP."); 5829 S = getSameOpcode(*It, *TLI); 5830 } 5831 5832 // Check that all of the users of the scalars that we want to vectorize are 5833 // schedulable. 5834 auto *VL0 = cast<Instruction>(S.OpValue); 5835 BB = VL0->getParent(); 5836 5837 if (!DT->isReachableFromEntry(BB)) { 5838 // Don't go into unreachable blocks. They may contain instructions with 5839 // dependency cycles which confuse the final scheduling. 5840 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 5841 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5842 return; 5843 } 5844 5845 // Don't go into catchswitch blocks, which can happen with PHIs. 5846 // Such blocks can only have PHIs and the catchswitch. There is no 5847 // place to insert a shuffle if we need to, so just avoid that issue. 5848 if (isa<CatchSwitchInst>(BB->getTerminator())) { 5849 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n"); 5850 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5851 return; 5852 } 5853 5854 // Check that every instruction appears once in this bundle. 5855 if (!TryToFindDuplicates(S, /*DoNotFail=*/true)) 5856 return; 5857 5858 // Perform specific checks for each particular instruction kind. 5859 OrdersType CurrentOrder; 5860 SmallVector<Value *> PointerOps; 5861 TreeEntry::EntryState State = getScalarsVectorizationState( 5862 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps); 5863 if (State == TreeEntry::NeedToGather) { 5864 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5865 ReuseShuffleIndicies); 5866 return; 5867 } 5868 5869 auto &BSRef = BlocksSchedules[BB]; 5870 if (!BSRef) 5871 BSRef = std::make_unique<BlockScheduling>(BB); 5872 5873 BlockScheduling &BS = *BSRef; 5874 5875 std::optional<ScheduleData *> Bundle = 5876 BS.tryScheduleBundle(UniqueValues, this, S); 5877 #ifdef EXPENSIVE_CHECKS 5878 // Make sure we didn't break any internal invariants 5879 BS.verify(); 5880 #endif 5881 if (!Bundle) { 5882 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 5883 assert((!BS.getScheduleData(VL0) || 5884 !BS.getScheduleData(VL0)->isPartOfBundle()) && 5885 "tryScheduleBundle should cancelScheduling on failure"); 5886 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5887 ReuseShuffleIndicies); 5888 return; 5889 } 5890 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 5891 5892 unsigned ShuffleOrOp = S.isAltShuffle() ? 5893 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 5894 switch (ShuffleOrOp) { 5895 case Instruction::PHI: { 5896 auto *PH = cast<PHINode>(VL0); 5897 5898 TreeEntry *TE = 5899 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 5900 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 5901 5902 // Keeps the reordered operands to avoid code duplication. 5903 SmallVector<ValueList, 2> OperandsVec; 5904 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 5905 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 5906 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 5907 TE->setOperand(I, Operands); 5908 OperandsVec.push_back(Operands); 5909 continue; 5910 } 5911 ValueList Operands; 5912 // Prepare the operand vector. 5913 for (Value *V : VL) 5914 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 5915 PH->getIncomingBlock(I))); 5916 TE->setOperand(I, Operands); 5917 OperandsVec.push_back(Operands); 5918 } 5919 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 5920 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 5921 return; 5922 } 5923 case Instruction::ExtractValue: 5924 case Instruction::ExtractElement: { 5925 if (CurrentOrder.empty()) { 5926 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 5927 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5928 ReuseShuffleIndicies); 5929 // This is a special case, as it does not gather, but at the same time 5930 // we are not extending buildTree_rec() towards the operands. 5931 ValueList Op0; 5932 Op0.assign(VL.size(), VL0->getOperand(0)); 5933 VectorizableTree.back()->setOperand(0, Op0); 5934 return; 5935 } 5936 LLVM_DEBUG({ 5937 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 5938 "with order"; 5939 for (unsigned Idx : CurrentOrder) 5940 dbgs() << " " << Idx; 5941 dbgs() << "\n"; 5942 }); 5943 fixupOrderingIndices(CurrentOrder); 5944 // Insert new order with initial value 0, if it does not exist, 5945 // otherwise return the iterator to the existing one. 5946 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5947 ReuseShuffleIndicies, CurrentOrder); 5948 // This is a special case, as it does not gather, but at the same time 5949 // we are not extending buildTree_rec() towards the operands. 5950 ValueList Op0; 5951 Op0.assign(VL.size(), VL0->getOperand(0)); 5952 VectorizableTree.back()->setOperand(0, Op0); 5953 return; 5954 } 5955 case Instruction::InsertElement: { 5956 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 5957 5958 auto OrdCompare = [](const std::pair<int, int> &P1, 5959 const std::pair<int, int> &P2) { 5960 return P1.first > P2.first; 5961 }; 5962 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 5963 decltype(OrdCompare)> 5964 Indices(OrdCompare); 5965 for (int I = 0, E = VL.size(); I < E; ++I) { 5966 unsigned Idx = *getInsertIndex(VL[I]); 5967 Indices.emplace(Idx, I); 5968 } 5969 OrdersType CurrentOrder(VL.size(), VL.size()); 5970 bool IsIdentity = true; 5971 for (int I = 0, E = VL.size(); I < E; ++I) { 5972 CurrentOrder[Indices.top().second] = I; 5973 IsIdentity &= Indices.top().second == I; 5974 Indices.pop(); 5975 } 5976 if (IsIdentity) 5977 CurrentOrder.clear(); 5978 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5979 std::nullopt, CurrentOrder); 5980 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 5981 5982 constexpr int NumOps = 2; 5983 ValueList VectorOperands[NumOps]; 5984 for (int I = 0; I < NumOps; ++I) { 5985 for (Value *V : VL) 5986 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 5987 5988 TE->setOperand(I, VectorOperands[I]); 5989 } 5990 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 5991 return; 5992 } 5993 case Instruction::Load: { 5994 // Check that a vectorized load would load the same memory as a scalar 5995 // load. For example, we don't want to vectorize loads that are smaller 5996 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5997 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5998 // from such a struct, we read/write packed bits disagreeing with the 5999 // unvectorized version. 6000 TreeEntry *TE = nullptr; 6001 fixupOrderingIndices(CurrentOrder); 6002 switch (State) { 6003 case TreeEntry::Vectorize: 6004 if (CurrentOrder.empty()) { 6005 // Original loads are consecutive and does not require reordering. 6006 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6007 ReuseShuffleIndicies); 6008 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 6009 } else { 6010 // Need to reorder. 6011 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6012 ReuseShuffleIndicies, CurrentOrder); 6013 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 6014 } 6015 TE->setOperandsInOrder(); 6016 break; 6017 case TreeEntry::PossibleStridedVectorize: 6018 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6019 if (CurrentOrder.empty()) { 6020 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6021 UserTreeIdx, ReuseShuffleIndicies); 6022 } else { 6023 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6024 UserTreeIdx, ReuseShuffleIndicies, CurrentOrder); 6025 } 6026 TE->setOperandsInOrder(); 6027 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6028 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6029 break; 6030 case TreeEntry::ScatterVectorize: 6031 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6032 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 6033 UserTreeIdx, ReuseShuffleIndicies); 6034 TE->setOperandsInOrder(); 6035 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6036 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6037 break; 6038 case TreeEntry::NeedToGather: 6039 llvm_unreachable("Unexpected loads state."); 6040 } 6041 return; 6042 } 6043 case Instruction::ZExt: 6044 case Instruction::SExt: 6045 case Instruction::FPToUI: 6046 case Instruction::FPToSI: 6047 case Instruction::FPExt: 6048 case Instruction::PtrToInt: 6049 case Instruction::IntToPtr: 6050 case Instruction::SIToFP: 6051 case Instruction::UIToFP: 6052 case Instruction::Trunc: 6053 case Instruction::FPTrunc: 6054 case Instruction::BitCast: { 6055 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6056 ReuseShuffleIndicies); 6057 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 6058 6059 TE->setOperandsInOrder(); 6060 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6061 ValueList Operands; 6062 // Prepare the operand vector. 6063 for (Value *V : VL) 6064 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6065 6066 buildTree_rec(Operands, Depth + 1, {TE, I}); 6067 } 6068 return; 6069 } 6070 case Instruction::ICmp: 6071 case Instruction::FCmp: { 6072 // Check that all of the compares have the same predicate. 6073 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6074 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6075 ReuseShuffleIndicies); 6076 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 6077 6078 ValueList Left, Right; 6079 if (cast<CmpInst>(VL0)->isCommutative()) { 6080 // Commutative predicate - collect + sort operands of the instructions 6081 // so that each side is more likely to have the same opcode. 6082 assert(P0 == CmpInst::getSwappedPredicate(P0) && 6083 "Commutative Predicate mismatch"); 6084 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6085 } else { 6086 // Collect operands - commute if it uses the swapped predicate. 6087 for (Value *V : VL) { 6088 auto *Cmp = cast<CmpInst>(V); 6089 Value *LHS = Cmp->getOperand(0); 6090 Value *RHS = Cmp->getOperand(1); 6091 if (Cmp->getPredicate() != P0) 6092 std::swap(LHS, RHS); 6093 Left.push_back(LHS); 6094 Right.push_back(RHS); 6095 } 6096 } 6097 TE->setOperand(0, Left); 6098 TE->setOperand(1, Right); 6099 buildTree_rec(Left, Depth + 1, {TE, 0}); 6100 buildTree_rec(Right, Depth + 1, {TE, 1}); 6101 return; 6102 } 6103 case Instruction::Select: 6104 case Instruction::FNeg: 6105 case Instruction::Add: 6106 case Instruction::FAdd: 6107 case Instruction::Sub: 6108 case Instruction::FSub: 6109 case Instruction::Mul: 6110 case Instruction::FMul: 6111 case Instruction::UDiv: 6112 case Instruction::SDiv: 6113 case Instruction::FDiv: 6114 case Instruction::URem: 6115 case Instruction::SRem: 6116 case Instruction::FRem: 6117 case Instruction::Shl: 6118 case Instruction::LShr: 6119 case Instruction::AShr: 6120 case Instruction::And: 6121 case Instruction::Or: 6122 case Instruction::Xor: { 6123 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6124 ReuseShuffleIndicies); 6125 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 6126 6127 // Sort operands of the instructions so that each side is more likely to 6128 // have the same opcode. 6129 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 6130 ValueList Left, Right; 6131 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6132 TE->setOperand(0, Left); 6133 TE->setOperand(1, Right); 6134 buildTree_rec(Left, Depth + 1, {TE, 0}); 6135 buildTree_rec(Right, Depth + 1, {TE, 1}); 6136 return; 6137 } 6138 6139 TE->setOperandsInOrder(); 6140 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6141 ValueList Operands; 6142 // Prepare the operand vector. 6143 for (Value *V : VL) 6144 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6145 6146 buildTree_rec(Operands, Depth + 1, {TE, I}); 6147 } 6148 return; 6149 } 6150 case Instruction::GetElementPtr: { 6151 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6152 ReuseShuffleIndicies); 6153 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 6154 SmallVector<ValueList, 2> Operands(2); 6155 // Prepare the operand vector for pointer operands. 6156 for (Value *V : VL) { 6157 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6158 if (!GEP) { 6159 Operands.front().push_back(V); 6160 continue; 6161 } 6162 Operands.front().push_back(GEP->getPointerOperand()); 6163 } 6164 TE->setOperand(0, Operands.front()); 6165 // Need to cast all indices to the same type before vectorization to 6166 // avoid crash. 6167 // Required to be able to find correct matches between different gather 6168 // nodes and reuse the vectorized values rather than trying to gather them 6169 // again. 6170 int IndexIdx = 1; 6171 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 6172 Type *Ty = all_of(VL, 6173 [VL0Ty, IndexIdx](Value *V) { 6174 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6175 if (!GEP) 6176 return true; 6177 return VL0Ty == GEP->getOperand(IndexIdx)->getType(); 6178 }) 6179 ? VL0Ty 6180 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6181 ->getPointerOperandType() 6182 ->getScalarType()); 6183 // Prepare the operand vector. 6184 for (Value *V : VL) { 6185 auto *I = dyn_cast<GetElementPtrInst>(V); 6186 if (!I) { 6187 Operands.back().push_back( 6188 ConstantInt::get(Ty, 0, /*isSigned=*/false)); 6189 continue; 6190 } 6191 auto *Op = I->getOperand(IndexIdx); 6192 auto *CI = dyn_cast<ConstantInt>(Op); 6193 if (!CI) 6194 Operands.back().push_back(Op); 6195 else 6196 Operands.back().push_back(ConstantFoldIntegerCast( 6197 CI, Ty, CI->getValue().isSignBitSet(), *DL)); 6198 } 6199 TE->setOperand(IndexIdx, Operands.back()); 6200 6201 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 6202 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 6203 return; 6204 } 6205 case Instruction::Store: { 6206 // Check if the stores are consecutive or if we need to swizzle them. 6207 ValueList Operands(VL.size()); 6208 auto *OIter = Operands.begin(); 6209 for (Value *V : VL) { 6210 auto *SI = cast<StoreInst>(V); 6211 *OIter = SI->getValueOperand(); 6212 ++OIter; 6213 } 6214 // Check that the sorted pointer operands are consecutive. 6215 if (CurrentOrder.empty()) { 6216 // Original stores are consecutive and does not require reordering. 6217 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6218 ReuseShuffleIndicies); 6219 TE->setOperandsInOrder(); 6220 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6221 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 6222 } else { 6223 fixupOrderingIndices(CurrentOrder); 6224 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6225 ReuseShuffleIndicies, CurrentOrder); 6226 TE->setOperandsInOrder(); 6227 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6228 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 6229 } 6230 return; 6231 } 6232 case Instruction::Call: { 6233 // Check if the calls are all to the same vectorizable intrinsic or 6234 // library function. 6235 CallInst *CI = cast<CallInst>(VL0); 6236 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6237 6238 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6239 ReuseShuffleIndicies); 6240 TE->setOperandsInOrder(); 6241 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 6242 // For scalar operands no need to create an entry since no need to 6243 // vectorize it. 6244 if (isVectorIntrinsicWithScalarOpAtArg(ID, I)) 6245 continue; 6246 ValueList Operands; 6247 // Prepare the operand vector. 6248 for (Value *V : VL) { 6249 auto *CI2 = cast<CallInst>(V); 6250 Operands.push_back(CI2->getArgOperand(I)); 6251 } 6252 buildTree_rec(Operands, Depth + 1, {TE, I}); 6253 } 6254 return; 6255 } 6256 case Instruction::ShuffleVector: { 6257 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6258 ReuseShuffleIndicies); 6259 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 6260 6261 // Reorder operands if reordering would enable vectorization. 6262 auto *CI = dyn_cast<CmpInst>(VL0); 6263 if (isa<BinaryOperator>(VL0) || CI) { 6264 ValueList Left, Right; 6265 if (!CI || all_of(VL, [](Value *V) { 6266 return cast<CmpInst>(V)->isCommutative(); 6267 })) { 6268 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, 6269 *this); 6270 } else { 6271 auto *MainCI = cast<CmpInst>(S.MainOp); 6272 auto *AltCI = cast<CmpInst>(S.AltOp); 6273 CmpInst::Predicate MainP = MainCI->getPredicate(); 6274 CmpInst::Predicate AltP = AltCI->getPredicate(); 6275 assert(MainP != AltP && 6276 "Expected different main/alternate predicates."); 6277 // Collect operands - commute if it uses the swapped predicate or 6278 // alternate operation. 6279 for (Value *V : VL) { 6280 auto *Cmp = cast<CmpInst>(V); 6281 Value *LHS = Cmp->getOperand(0); 6282 Value *RHS = Cmp->getOperand(1); 6283 6284 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) { 6285 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6286 std::swap(LHS, RHS); 6287 } else { 6288 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6289 std::swap(LHS, RHS); 6290 } 6291 Left.push_back(LHS); 6292 Right.push_back(RHS); 6293 } 6294 } 6295 TE->setOperand(0, Left); 6296 TE->setOperand(1, Right); 6297 buildTree_rec(Left, Depth + 1, {TE, 0}); 6298 buildTree_rec(Right, Depth + 1, {TE, 1}); 6299 return; 6300 } 6301 6302 TE->setOperandsInOrder(); 6303 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6304 ValueList Operands; 6305 // Prepare the operand vector. 6306 for (Value *V : VL) 6307 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6308 6309 buildTree_rec(Operands, Depth + 1, {TE, I}); 6310 } 6311 return; 6312 } 6313 default: 6314 break; 6315 } 6316 llvm_unreachable("Unexpected vectorization of the instructions."); 6317 } 6318 6319 unsigned BoUpSLP::canMapToVector(Type *T) const { 6320 unsigned N = 1; 6321 Type *EltTy = T; 6322 6323 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) { 6324 if (auto *ST = dyn_cast<StructType>(EltTy)) { 6325 // Check that struct is homogeneous. 6326 for (const auto *Ty : ST->elements()) 6327 if (Ty != *ST->element_begin()) 6328 return 0; 6329 N *= ST->getNumElements(); 6330 EltTy = *ST->element_begin(); 6331 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 6332 N *= AT->getNumElements(); 6333 EltTy = AT->getElementType(); 6334 } else { 6335 auto *VT = cast<FixedVectorType>(EltTy); 6336 N *= VT->getNumElements(); 6337 EltTy = VT->getElementType(); 6338 } 6339 } 6340 6341 if (!isValidElementType(EltTy)) 6342 return 0; 6343 uint64_t VTSize = DL->getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 6344 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || 6345 VTSize != DL->getTypeStoreSizeInBits(T)) 6346 return 0; 6347 return N; 6348 } 6349 6350 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 6351 SmallVectorImpl<unsigned> &CurrentOrder, 6352 bool ResizeAllowed) const { 6353 const auto *It = find_if(VL, [](Value *V) { 6354 return isa<ExtractElementInst, ExtractValueInst>(V); 6355 }); 6356 assert(It != VL.end() && "Expected at least one extract instruction."); 6357 auto *E0 = cast<Instruction>(*It); 6358 assert(all_of(VL, 6359 [](Value *V) { 6360 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 6361 V); 6362 }) && 6363 "Invalid opcode"); 6364 // Check if all of the extracts come from the same vector and from the 6365 // correct offset. 6366 Value *Vec = E0->getOperand(0); 6367 6368 CurrentOrder.clear(); 6369 6370 // We have to extract from a vector/aggregate with the same number of elements. 6371 unsigned NElts; 6372 if (E0->getOpcode() == Instruction::ExtractValue) { 6373 NElts = canMapToVector(Vec->getType()); 6374 if (!NElts) 6375 return false; 6376 // Check if load can be rewritten as load of vector. 6377 LoadInst *LI = dyn_cast<LoadInst>(Vec); 6378 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 6379 return false; 6380 } else { 6381 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 6382 } 6383 6384 unsigned E = VL.size(); 6385 if (!ResizeAllowed && NElts != E) 6386 return false; 6387 SmallVector<int> Indices(E, PoisonMaskElem); 6388 unsigned MinIdx = NElts, MaxIdx = 0; 6389 for (auto [I, V] : enumerate(VL)) { 6390 auto *Inst = dyn_cast<Instruction>(V); 6391 if (!Inst) 6392 continue; 6393 if (Inst->getOperand(0) != Vec) 6394 return false; 6395 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 6396 if (isa<UndefValue>(EE->getIndexOperand())) 6397 continue; 6398 std::optional<unsigned> Idx = getExtractIndex(Inst); 6399 if (!Idx) 6400 return false; 6401 const unsigned ExtIdx = *Idx; 6402 if (ExtIdx >= NElts) 6403 continue; 6404 Indices[I] = ExtIdx; 6405 if (MinIdx > ExtIdx) 6406 MinIdx = ExtIdx; 6407 if (MaxIdx < ExtIdx) 6408 MaxIdx = ExtIdx; 6409 } 6410 if (MaxIdx - MinIdx + 1 > E) 6411 return false; 6412 if (MaxIdx + 1 <= E) 6413 MinIdx = 0; 6414 6415 // Check that all of the indices extract from the correct offset. 6416 bool ShouldKeepOrder = true; 6417 // Assign to all items the initial value E + 1 so we can check if the extract 6418 // instruction index was used already. 6419 // Also, later we can check that all the indices are used and we have a 6420 // consecutive access in the extract instructions, by checking that no 6421 // element of CurrentOrder still has value E + 1. 6422 CurrentOrder.assign(E, E); 6423 for (unsigned I = 0; I < E; ++I) { 6424 if (Indices[I] == PoisonMaskElem) 6425 continue; 6426 const unsigned ExtIdx = Indices[I] - MinIdx; 6427 if (CurrentOrder[ExtIdx] != E) { 6428 CurrentOrder.clear(); 6429 return false; 6430 } 6431 ShouldKeepOrder &= ExtIdx == I; 6432 CurrentOrder[ExtIdx] = I; 6433 } 6434 if (ShouldKeepOrder) 6435 CurrentOrder.clear(); 6436 6437 return ShouldKeepOrder; 6438 } 6439 6440 bool BoUpSLP::areAllUsersVectorized( 6441 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const { 6442 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) || 6443 all_of(I->users(), [this](User *U) { 6444 return ScalarToTreeEntry.contains(U) || 6445 isVectorLikeInstWithConstOps(U) || 6446 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 6447 }); 6448 } 6449 6450 static std::pair<InstructionCost, InstructionCost> 6451 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 6452 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 6453 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6454 6455 // Calculate the cost of the scalar and vector calls. 6456 SmallVector<Type *, 4> VecTys; 6457 for (Use &Arg : CI->args()) 6458 VecTys.push_back( 6459 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 6460 FastMathFlags FMF; 6461 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 6462 FMF = FPCI->getFastMathFlags(); 6463 SmallVector<const Value *> Arguments(CI->args()); 6464 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 6465 dyn_cast<IntrinsicInst>(CI)); 6466 auto IntrinsicCost = 6467 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 6468 6469 auto Shape = VFShape::get(CI->getFunctionType(), 6470 ElementCount::getFixed(VecTy->getNumElements()), 6471 false /*HasGlobalPred*/); 6472 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 6473 auto LibCost = IntrinsicCost; 6474 if (!CI->isNoBuiltin() && VecFunc) { 6475 // Calculate the cost of the vector library call. 6476 // If the corresponding vector call is cheaper, return its cost. 6477 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 6478 TTI::TCK_RecipThroughput); 6479 } 6480 return {IntrinsicCost, LibCost}; 6481 } 6482 6483 void BoUpSLP::TreeEntry::buildAltOpShuffleMask( 6484 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask, 6485 SmallVectorImpl<Value *> *OpScalars, 6486 SmallVectorImpl<Value *> *AltScalars) const { 6487 unsigned Sz = Scalars.size(); 6488 Mask.assign(Sz, PoisonMaskElem); 6489 SmallVector<int> OrderMask; 6490 if (!ReorderIndices.empty()) 6491 inversePermutation(ReorderIndices, OrderMask); 6492 for (unsigned I = 0; I < Sz; ++I) { 6493 unsigned Idx = I; 6494 if (!ReorderIndices.empty()) 6495 Idx = OrderMask[I]; 6496 auto *OpInst = cast<Instruction>(Scalars[Idx]); 6497 if (IsAltOp(OpInst)) { 6498 Mask[I] = Sz + Idx; 6499 if (AltScalars) 6500 AltScalars->push_back(OpInst); 6501 } else { 6502 Mask[I] = Idx; 6503 if (OpScalars) 6504 OpScalars->push_back(OpInst); 6505 } 6506 } 6507 if (!ReuseShuffleIndices.empty()) { 6508 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem); 6509 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) { 6510 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem; 6511 }); 6512 Mask.swap(NewMask); 6513 } 6514 } 6515 6516 static bool isAlternateInstruction(const Instruction *I, 6517 const Instruction *MainOp, 6518 const Instruction *AltOp, 6519 const TargetLibraryInfo &TLI) { 6520 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) { 6521 auto *AltCI = cast<CmpInst>(AltOp); 6522 CmpInst::Predicate MainP = MainCI->getPredicate(); 6523 CmpInst::Predicate AltP = AltCI->getPredicate(); 6524 assert(MainP != AltP && "Expected different main/alternate predicates."); 6525 auto *CI = cast<CmpInst>(I); 6526 if (isCmpSameOrSwapped(MainCI, CI, TLI)) 6527 return false; 6528 if (isCmpSameOrSwapped(AltCI, CI, TLI)) 6529 return true; 6530 CmpInst::Predicate P = CI->getPredicate(); 6531 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P); 6532 6533 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && 6534 "CmpInst expected to match either main or alternate predicate or " 6535 "their swap."); 6536 (void)AltP; 6537 return MainP != P && MainP != SwappedP; 6538 } 6539 return I->getOpcode() == AltOp->getOpcode(); 6540 } 6541 6542 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) { 6543 assert(!Ops.empty()); 6544 const auto *Op0 = Ops.front(); 6545 6546 const bool IsConstant = all_of(Ops, [](Value *V) { 6547 // TODO: We should allow undef elements here 6548 return isConstant(V) && !isa<UndefValue>(V); 6549 }); 6550 const bool IsUniform = all_of(Ops, [=](Value *V) { 6551 // TODO: We should allow undef elements here 6552 return V == Op0; 6553 }); 6554 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) { 6555 // TODO: We should allow undef elements here 6556 if (auto *CI = dyn_cast<ConstantInt>(V)) 6557 return CI->getValue().isPowerOf2(); 6558 return false; 6559 }); 6560 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) { 6561 // TODO: We should allow undef elements here 6562 if (auto *CI = dyn_cast<ConstantInt>(V)) 6563 return CI->getValue().isNegatedPowerOf2(); 6564 return false; 6565 }); 6566 6567 TTI::OperandValueKind VK = TTI::OK_AnyValue; 6568 if (IsConstant && IsUniform) 6569 VK = TTI::OK_UniformConstantValue; 6570 else if (IsConstant) 6571 VK = TTI::OK_NonUniformConstantValue; 6572 else if (IsUniform) 6573 VK = TTI::OK_UniformValue; 6574 6575 TTI::OperandValueProperties VP = TTI::OP_None; 6576 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP; 6577 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP; 6578 6579 return {VK, VP}; 6580 } 6581 6582 namespace { 6583 /// The base class for shuffle instruction emission and shuffle cost estimation. 6584 class BaseShuffleAnalysis { 6585 protected: 6586 /// Checks if the mask is an identity mask. 6587 /// \param IsStrict if is true the function returns false if mask size does 6588 /// not match vector size. 6589 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy, 6590 bool IsStrict) { 6591 int Limit = Mask.size(); 6592 int VF = VecTy->getNumElements(); 6593 int Index = -1; 6594 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit)) 6595 return true; 6596 if (!IsStrict) { 6597 // Consider extract subvector starting from index 0. 6598 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 6599 Index == 0) 6600 return true; 6601 // All VF-size submasks are identity (e.g. 6602 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4). 6603 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) { 6604 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF); 6605 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) || 6606 ShuffleVectorInst::isIdentityMask(Slice, VF); 6607 })) 6608 return true; 6609 } 6610 return false; 6611 } 6612 6613 /// Tries to combine 2 different masks into single one. 6614 /// \param LocalVF Vector length of the permuted input vector. \p Mask may 6615 /// change the size of the vector, \p LocalVF is the original size of the 6616 /// shuffled vector. 6617 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask, 6618 ArrayRef<int> ExtMask) { 6619 unsigned VF = Mask.size(); 6620 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 6621 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 6622 if (ExtMask[I] == PoisonMaskElem) 6623 continue; 6624 int MaskedIdx = Mask[ExtMask[I] % VF]; 6625 NewMask[I] = 6626 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF; 6627 } 6628 Mask.swap(NewMask); 6629 } 6630 6631 /// Looks through shuffles trying to reduce final number of shuffles in the 6632 /// code. The function looks through the previously emitted shuffle 6633 /// instructions and properly mark indices in mask as undef. 6634 /// For example, given the code 6635 /// \code 6636 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 6637 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 6638 /// \endcode 6639 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 6640 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6641 /// <0, 1, 2, 3> for the shuffle. 6642 /// If 2 operands are of different size, the smallest one will be resized and 6643 /// the mask recalculated properly. 6644 /// For example, given the code 6645 /// \code 6646 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 6647 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 6648 /// \endcode 6649 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 6650 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6651 /// <0, 1, 2, 3> for the shuffle. 6652 /// So, it tries to transform permutations to simple vector merge, if 6653 /// possible. 6654 /// \param V The input vector which must be shuffled using the given \p Mask. 6655 /// If the better candidate is found, \p V is set to this best candidate 6656 /// vector. 6657 /// \param Mask The input mask for the shuffle. If the best candidate is found 6658 /// during looking-through-shuffles attempt, it is updated accordingly. 6659 /// \param SinglePermute true if the shuffle operation is originally a 6660 /// single-value-permutation. In this case the look-through-shuffles procedure 6661 /// may look for resizing shuffles as the best candidates. 6662 /// \return true if the shuffle results in the non-resizing identity shuffle 6663 /// (and thus can be ignored), false - otherwise. 6664 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask, 6665 bool SinglePermute) { 6666 Value *Op = V; 6667 ShuffleVectorInst *IdentityOp = nullptr; 6668 SmallVector<int> IdentityMask; 6669 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) { 6670 // Exit if not a fixed vector type or changing size shuffle. 6671 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType()); 6672 if (!SVTy) 6673 break; 6674 // Remember the identity or broadcast mask, if it is not a resizing 6675 // shuffle. If no better candidates are found, this Op and Mask will be 6676 // used in the final shuffle. 6677 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) { 6678 if (!IdentityOp || !SinglePermute || 6679 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) && 6680 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask, 6681 IdentityMask.size()))) { 6682 IdentityOp = SV; 6683 // Store current mask in the IdentityMask so later we did not lost 6684 // this info if IdentityOp is selected as the best candidate for the 6685 // permutation. 6686 IdentityMask.assign(Mask); 6687 } 6688 } 6689 // Remember the broadcast mask. If no better candidates are found, this Op 6690 // and Mask will be used in the final shuffle. 6691 // Zero splat can be used as identity too, since it might be used with 6692 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling. 6693 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is 6694 // expensive, the analysis founds out, that the source vector is just a 6695 // broadcast, this original mask can be transformed to identity mask <0, 6696 // 1, 2, 3>. 6697 // \code 6698 // %0 = shuffle %v, poison, zeroinitalizer 6699 // %res = shuffle %0, poison, <3, 1, 2, 0> 6700 // \endcode 6701 // may be transformed to 6702 // \code 6703 // %0 = shuffle %v, poison, zeroinitalizer 6704 // %res = shuffle %0, poison, <0, 1, 2, 3> 6705 // \endcode 6706 if (SV->isZeroEltSplat()) { 6707 IdentityOp = SV; 6708 IdentityMask.assign(Mask); 6709 } 6710 int LocalVF = Mask.size(); 6711 if (auto *SVOpTy = 6712 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType())) 6713 LocalVF = SVOpTy->getNumElements(); 6714 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem); 6715 for (auto [Idx, I] : enumerate(Mask)) { 6716 if (I == PoisonMaskElem || 6717 static_cast<unsigned>(I) >= SV->getShuffleMask().size()) 6718 continue; 6719 ExtMask[Idx] = SV->getMaskValue(I); 6720 } 6721 bool IsOp1Undef = 6722 isUndefVector(SV->getOperand(0), 6723 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg)) 6724 .all(); 6725 bool IsOp2Undef = 6726 isUndefVector(SV->getOperand(1), 6727 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg)) 6728 .all(); 6729 if (!IsOp1Undef && !IsOp2Undef) { 6730 // Update mask and mark undef elems. 6731 for (int &I : Mask) { 6732 if (I == PoisonMaskElem) 6733 continue; 6734 if (SV->getMaskValue(I % SV->getShuffleMask().size()) == 6735 PoisonMaskElem) 6736 I = PoisonMaskElem; 6737 } 6738 break; 6739 } 6740 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 6741 SV->getShuffleMask().end()); 6742 combineMasks(LocalVF, ShuffleMask, Mask); 6743 Mask.swap(ShuffleMask); 6744 if (IsOp2Undef) 6745 Op = SV->getOperand(0); 6746 else 6747 Op = SV->getOperand(1); 6748 } 6749 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType()); 6750 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) || 6751 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) { 6752 if (IdentityOp) { 6753 V = IdentityOp; 6754 assert(Mask.size() == IdentityMask.size() && 6755 "Expected masks of same sizes."); 6756 // Clear known poison elements. 6757 for (auto [I, Idx] : enumerate(Mask)) 6758 if (Idx == PoisonMaskElem) 6759 IdentityMask[I] = PoisonMaskElem; 6760 Mask.swap(IdentityMask); 6761 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V); 6762 return SinglePermute && 6763 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()), 6764 /*IsStrict=*/true) || 6765 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() && 6766 Shuffle->isZeroEltSplat() && 6767 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size()))); 6768 } 6769 V = Op; 6770 return false; 6771 } 6772 V = Op; 6773 return true; 6774 } 6775 6776 /// Smart shuffle instruction emission, walks through shuffles trees and 6777 /// tries to find the best matching vector for the actual shuffle 6778 /// instruction. 6779 template <typename T, typename ShuffleBuilderTy> 6780 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask, 6781 ShuffleBuilderTy &Builder) { 6782 assert(V1 && "Expected at least one vector value."); 6783 if (V2) 6784 Builder.resizeToMatch(V1, V2); 6785 int VF = Mask.size(); 6786 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 6787 VF = FTy->getNumElements(); 6788 if (V2 && 6789 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) { 6790 // Peek through shuffles. 6791 Value *Op1 = V1; 6792 Value *Op2 = V2; 6793 int VF = 6794 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 6795 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 6796 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 6797 for (int I = 0, E = Mask.size(); I < E; ++I) { 6798 if (Mask[I] < VF) 6799 CombinedMask1[I] = Mask[I]; 6800 else 6801 CombinedMask2[I] = Mask[I] - VF; 6802 } 6803 Value *PrevOp1; 6804 Value *PrevOp2; 6805 do { 6806 PrevOp1 = Op1; 6807 PrevOp2 = Op2; 6808 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false); 6809 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false); 6810 // Check if we have 2 resizing shuffles - need to peek through operands 6811 // again. 6812 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 6813 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) { 6814 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem); 6815 for (auto [Idx, I] : enumerate(CombinedMask1)) { 6816 if (I == PoisonMaskElem) 6817 continue; 6818 ExtMask1[Idx] = SV1->getMaskValue(I); 6819 } 6820 SmallBitVector UseMask1 = buildUseMask( 6821 cast<FixedVectorType>(SV1->getOperand(1)->getType()) 6822 ->getNumElements(), 6823 ExtMask1, UseMask::SecondArg); 6824 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem); 6825 for (auto [Idx, I] : enumerate(CombinedMask2)) { 6826 if (I == PoisonMaskElem) 6827 continue; 6828 ExtMask2[Idx] = SV2->getMaskValue(I); 6829 } 6830 SmallBitVector UseMask2 = buildUseMask( 6831 cast<FixedVectorType>(SV2->getOperand(1)->getType()) 6832 ->getNumElements(), 6833 ExtMask2, UseMask::SecondArg); 6834 if (SV1->getOperand(0)->getType() == 6835 SV2->getOperand(0)->getType() && 6836 SV1->getOperand(0)->getType() != SV1->getType() && 6837 isUndefVector(SV1->getOperand(1), UseMask1).all() && 6838 isUndefVector(SV2->getOperand(1), UseMask2).all()) { 6839 Op1 = SV1->getOperand(0); 6840 Op2 = SV2->getOperand(0); 6841 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 6842 SV1->getShuffleMask().end()); 6843 int LocalVF = ShuffleMask1.size(); 6844 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType())) 6845 LocalVF = FTy->getNumElements(); 6846 combineMasks(LocalVF, ShuffleMask1, CombinedMask1); 6847 CombinedMask1.swap(ShuffleMask1); 6848 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 6849 SV2->getShuffleMask().end()); 6850 LocalVF = ShuffleMask2.size(); 6851 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType())) 6852 LocalVF = FTy->getNumElements(); 6853 combineMasks(LocalVF, ShuffleMask2, CombinedMask2); 6854 CombinedMask2.swap(ShuffleMask2); 6855 } 6856 } 6857 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 6858 Builder.resizeToMatch(Op1, Op2); 6859 VF = std::max(cast<VectorType>(Op1->getType()) 6860 ->getElementCount() 6861 .getKnownMinValue(), 6862 cast<VectorType>(Op2->getType()) 6863 ->getElementCount() 6864 .getKnownMinValue()); 6865 for (int I = 0, E = Mask.size(); I < E; ++I) { 6866 if (CombinedMask2[I] != PoisonMaskElem) { 6867 assert(CombinedMask1[I] == PoisonMaskElem && 6868 "Expected undefined mask element"); 6869 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 6870 } 6871 } 6872 if (Op1 == Op2 && 6873 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) || 6874 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) && 6875 isa<ShuffleVectorInst>(Op1) && 6876 cast<ShuffleVectorInst>(Op1)->getShuffleMask() == 6877 ArrayRef(CombinedMask1)))) 6878 return Builder.createIdentity(Op1); 6879 return Builder.createShuffleVector( 6880 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 6881 CombinedMask1); 6882 } 6883 if (isa<PoisonValue>(V1)) 6884 return Builder.createPoison( 6885 cast<VectorType>(V1->getType())->getElementType(), Mask.size()); 6886 SmallVector<int> NewMask(Mask.begin(), Mask.end()); 6887 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true); 6888 assert(V1 && "Expected non-null value after looking through shuffles."); 6889 6890 if (!IsIdentity) 6891 return Builder.createShuffleVector(V1, NewMask); 6892 return Builder.createIdentity(V1); 6893 } 6894 }; 6895 } // namespace 6896 6897 /// Merges shuffle masks and emits final shuffle instruction, if required. It 6898 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 6899 /// when the actual shuffle instruction is generated only if this is actually 6900 /// required. Otherwise, the shuffle instruction emission is delayed till the 6901 /// end of the process, to reduce the number of emitted instructions and further 6902 /// analysis/transformations. 6903 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { 6904 bool IsFinalized = false; 6905 SmallVector<int> CommonMask; 6906 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors; 6907 const TargetTransformInfo &TTI; 6908 InstructionCost Cost = 0; 6909 SmallDenseSet<Value *> VectorizedVals; 6910 BoUpSLP &R; 6911 SmallPtrSetImpl<Value *> &CheckedExtracts; 6912 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6913 /// While set, still trying to estimate the cost for the same nodes and we 6914 /// can delay actual cost estimation (virtual shuffle instruction emission). 6915 /// May help better estimate the cost if same nodes must be permuted + allows 6916 /// to move most of the long shuffles cost estimation to TTI. 6917 bool SameNodesEstimated = true; 6918 6919 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) { 6920 if (Ty->getScalarType()->isPointerTy()) { 6921 Constant *Res = ConstantExpr::getIntToPtr( 6922 ConstantInt::getAllOnesValue( 6923 IntegerType::get(Ty->getContext(), 6924 DL.getTypeStoreSizeInBits(Ty->getScalarType()))), 6925 Ty->getScalarType()); 6926 if (auto *VTy = dyn_cast<VectorType>(Ty)) 6927 Res = ConstantVector::getSplat(VTy->getElementCount(), Res); 6928 return Res; 6929 } 6930 return Constant::getAllOnesValue(Ty); 6931 } 6932 6933 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) { 6934 if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof)) 6935 return TTI::TCC_Free; 6936 auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size()); 6937 InstructionCost GatherCost = 0; 6938 SmallVector<Value *> Gathers(VL.begin(), VL.end()); 6939 // Improve gather cost for gather of loads, if we can group some of the 6940 // loads into vector loads. 6941 InstructionsState S = getSameOpcode(VL, *R.TLI); 6942 const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType()); 6943 unsigned MinVF = R.getMinVF(2 * Sz); 6944 if (VL.size() > 2 && 6945 ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) || 6946 (InVectors.empty() && 6947 any_of(seq<unsigned>(0, VL.size() / MinVF), 6948 [&](unsigned Idx) { 6949 ArrayRef<Value *> SubVL = VL.slice(Idx * MinVF, MinVF); 6950 InstructionsState S = getSameOpcode(SubVL, *R.TLI); 6951 return S.getOpcode() == Instruction::Load && 6952 !S.isAltShuffle(); 6953 }))) && 6954 !all_of(Gathers, [&](Value *V) { return R.getTreeEntry(V); }) && 6955 !isSplat(Gathers)) { 6956 SetVector<Value *> VectorizedLoads; 6957 SmallVector<LoadInst *> VectorizedStarts; 6958 SmallVector<std::pair<unsigned, unsigned>> ScatterVectorized; 6959 unsigned StartIdx = 0; 6960 unsigned VF = VL.size() / 2; 6961 for (; VF >= MinVF; VF /= 2) { 6962 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 6963 Cnt += VF) { 6964 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 6965 if (S.getOpcode() != Instruction::Load || S.isAltShuffle()) { 6966 InstructionsState SliceS = getSameOpcode(Slice, *R.TLI); 6967 if (SliceS.getOpcode() != Instruction::Load || 6968 SliceS.isAltShuffle()) 6969 continue; 6970 } 6971 if (!VectorizedLoads.count(Slice.front()) && 6972 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 6973 SmallVector<Value *> PointerOps; 6974 OrdersType CurrentOrder; 6975 LoadsState LS = 6976 canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE, 6977 *R.LI, *R.TLI, CurrentOrder, PointerOps); 6978 switch (LS) { 6979 case LoadsState::Vectorize: 6980 case LoadsState::ScatterVectorize: 6981 case LoadsState::PossibleStridedVectorize: 6982 // Mark the vectorized loads so that we don't vectorize them 6983 // again. 6984 // TODO: better handling of loads with reorders. 6985 if (LS == LoadsState::Vectorize && CurrentOrder.empty()) 6986 VectorizedStarts.push_back(cast<LoadInst>(Slice.front())); 6987 else 6988 ScatterVectorized.emplace_back(Cnt, VF); 6989 VectorizedLoads.insert(Slice.begin(), Slice.end()); 6990 // If we vectorized initial block, no need to try to vectorize 6991 // it again. 6992 if (Cnt == StartIdx) 6993 StartIdx += VF; 6994 break; 6995 case LoadsState::Gather: 6996 break; 6997 } 6998 } 6999 } 7000 // Check if the whole array was vectorized already - exit. 7001 if (StartIdx >= VL.size()) 7002 break; 7003 // Found vectorizable parts - exit. 7004 if (!VectorizedLoads.empty()) 7005 break; 7006 } 7007 if (!VectorizedLoads.empty()) { 7008 unsigned NumParts = TTI.getNumberOfParts(VecTy); 7009 bool NeedInsertSubvectorAnalysis = 7010 !NumParts || (VL.size() / VF) > NumParts; 7011 // Get the cost for gathered loads. 7012 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 7013 if (VectorizedLoads.contains(VL[I])) 7014 continue; 7015 GatherCost += getBuildVectorCost(VL.slice(I, VF), Root); 7016 } 7017 // Exclude potentially vectorized loads from list of gathered 7018 // scalars. 7019 Gathers.assign(Gathers.size(), PoisonValue::get(VL.front()->getType())); 7020 // The cost for vectorized loads. 7021 InstructionCost ScalarsCost = 0; 7022 for (Value *V : VectorizedLoads) { 7023 auto *LI = cast<LoadInst>(V); 7024 ScalarsCost += 7025 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), 7026 LI->getAlign(), LI->getPointerAddressSpace(), 7027 CostKind, TTI::OperandValueInfo(), LI); 7028 } 7029 auto *LoadTy = FixedVectorType::get(VL.front()->getType(), VF); 7030 for (LoadInst *LI : VectorizedStarts) { 7031 Align Alignment = LI->getAlign(); 7032 GatherCost += 7033 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 7034 LI->getPointerAddressSpace(), CostKind, 7035 TTI::OperandValueInfo(), LI); 7036 } 7037 for (std::pair<unsigned, unsigned> P : ScatterVectorized) { 7038 auto *LI0 = cast<LoadInst>(VL[P.first]); 7039 Align CommonAlignment = LI0->getAlign(); 7040 for (Value *V : VL.slice(P.first + 1, VF - 1)) 7041 CommonAlignment = 7042 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7043 GatherCost += TTI.getGatherScatterOpCost( 7044 Instruction::Load, LoadTy, LI0->getPointerOperand(), 7045 /*VariableMask=*/false, CommonAlignment, CostKind, LI0); 7046 } 7047 if (NeedInsertSubvectorAnalysis) { 7048 // Add the cost for the subvectors insert. 7049 for (int I = VF, E = VL.size(); I < E; I += VF) 7050 GatherCost += TTI.getShuffleCost(TTI::SK_InsertSubvector, VecTy, 7051 std::nullopt, CostKind, I, LoadTy); 7052 } 7053 GatherCost -= ScalarsCost; 7054 } 7055 } else if (!Root && isSplat(VL)) { 7056 // Found the broadcasting of the single scalar, calculate the cost as 7057 // the broadcast. 7058 const auto *It = 7059 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); }); 7060 assert(It != VL.end() && "Expected at least one non-undef value."); 7061 // Add broadcast for non-identity shuffle only. 7062 bool NeedShuffle = 7063 count(VL, *It) > 1 && 7064 (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof)); 7065 InstructionCost InsertCost = TTI.getVectorInstrCost( 7066 Instruction::InsertElement, VecTy, CostKind, 7067 NeedShuffle ? 0 : std::distance(VL.begin(), It), 7068 PoisonValue::get(VecTy), *It); 7069 return InsertCost + 7070 (NeedShuffle ? TTI.getShuffleCost( 7071 TargetTransformInfo::SK_Broadcast, VecTy, 7072 /*Mask=*/std::nullopt, CostKind, /*Index=*/0, 7073 /*SubTp=*/nullptr, /*Args=*/*It) 7074 : TTI::TCC_Free); 7075 } 7076 return GatherCost + 7077 (all_of(Gathers, UndefValue::classof) 7078 ? TTI::TCC_Free 7079 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers))); 7080 }; 7081 7082 /// Compute the cost of creating a vector containing the extracted values from 7083 /// \p VL. 7084 InstructionCost 7085 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask, 7086 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7087 unsigned NumParts) { 7088 assert(VL.size() > NumParts && "Unexpected scalarized shuffle."); 7089 unsigned NumElts = 7090 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) { 7091 auto *EE = dyn_cast<ExtractElementInst>(V); 7092 if (!EE) 7093 return Sz; 7094 auto *VecTy = cast<FixedVectorType>(EE->getVectorOperandType()); 7095 return std::max(Sz, VecTy->getNumElements()); 7096 }); 7097 unsigned NumSrcRegs = TTI.getNumberOfParts( 7098 FixedVectorType::get(VL.front()->getType(), NumElts)); 7099 if (NumSrcRegs == 0) 7100 NumSrcRegs = 1; 7101 // FIXME: this must be moved to TTI for better estimation. 7102 unsigned EltsPerVector = PowerOf2Ceil(std::max( 7103 divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs))); 7104 auto CheckPerRegistersShuffle = 7105 [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> { 7106 DenseSet<int> RegIndices; 7107 // Check that if trying to permute same single/2 input vectors. 7108 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc; 7109 int FirstRegId = -1; 7110 for (int &I : Mask) { 7111 if (I == PoisonMaskElem) 7112 continue; 7113 int RegId = (I / NumElts) * NumParts + (I % NumElts) / EltsPerVector; 7114 if (FirstRegId < 0) 7115 FirstRegId = RegId; 7116 RegIndices.insert(RegId); 7117 if (RegIndices.size() > 2) 7118 return std::nullopt; 7119 if (RegIndices.size() == 2) 7120 ShuffleKind = TTI::SK_PermuteTwoSrc; 7121 I = (I % NumElts) % EltsPerVector + 7122 (RegId == FirstRegId ? 0 : EltsPerVector); 7123 } 7124 return ShuffleKind; 7125 }; 7126 InstructionCost Cost = 0; 7127 7128 // Process extracts in blocks of EltsPerVector to check if the source vector 7129 // operand can be re-used directly. If not, add the cost of creating a 7130 // shuffle to extract the values into a vector register. 7131 for (unsigned Part = 0; Part < NumParts; ++Part) { 7132 if (!ShuffleKinds[Part]) 7133 continue; 7134 ArrayRef<int> MaskSlice = 7135 Mask.slice(Part * EltsPerVector, 7136 (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0) 7137 ? Mask.size() % EltsPerVector 7138 : EltsPerVector); 7139 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem); 7140 copy(MaskSlice, SubMask.begin()); 7141 std::optional<TTI::ShuffleKind> RegShuffleKind = 7142 CheckPerRegistersShuffle(SubMask); 7143 if (!RegShuffleKind) { 7144 Cost += TTI.getShuffleCost( 7145 *ShuffleKinds[Part], 7146 FixedVectorType::get(VL.front()->getType(), NumElts), MaskSlice); 7147 continue; 7148 } 7149 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc || 7150 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) { 7151 Cost += TTI.getShuffleCost( 7152 *RegShuffleKind, 7153 FixedVectorType::get(VL.front()->getType(), EltsPerVector), 7154 SubMask); 7155 } 7156 } 7157 return Cost; 7158 } 7159 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 7160 /// shuffle emission. 7161 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 7162 ArrayRef<int> Mask) { 7163 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7164 if (Mask[Idx] != PoisonMaskElem) 7165 CommonMask[Idx] = Idx; 7166 } 7167 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given 7168 /// mask \p Mask, register number \p Part, that includes \p SliceSize 7169 /// elements. 7170 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2, 7171 ArrayRef<int> Mask, unsigned Part, 7172 unsigned SliceSize) { 7173 if (SameNodesEstimated) { 7174 // Delay the cost estimation if the same nodes are reshuffling. 7175 // If we already requested the cost of reshuffling of E1 and E2 before, no 7176 // need to estimate another cost with the sub-Mask, instead include this 7177 // sub-Mask into the CommonMask to estimate it later and avoid double cost 7178 // estimation. 7179 if ((InVectors.size() == 2 && 7180 InVectors.front().get<const TreeEntry *>() == &E1 && 7181 InVectors.back().get<const TreeEntry *>() == E2) || 7182 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) { 7183 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize), 7184 [](int Idx) { return Idx == PoisonMaskElem; }) && 7185 "Expected all poisoned elements."); 7186 ArrayRef<int> SubMask = 7187 ArrayRef(Mask).slice(Part * SliceSize, SliceSize); 7188 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part)); 7189 return; 7190 } 7191 // Found non-matching nodes - need to estimate the cost for the matched 7192 // and transform mask. 7193 Cost += createShuffle(InVectors.front(), 7194 InVectors.size() == 1 ? nullptr : InVectors.back(), 7195 CommonMask); 7196 transformMaskAfterShuffle(CommonMask, CommonMask); 7197 } 7198 SameNodesEstimated = false; 7199 Cost += createShuffle(&E1, E2, Mask); 7200 transformMaskAfterShuffle(CommonMask, Mask); 7201 } 7202 7203 class ShuffleCostBuilder { 7204 const TargetTransformInfo &TTI; 7205 7206 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) { 7207 int Index = -1; 7208 return Mask.empty() || 7209 (VF == Mask.size() && 7210 ShuffleVectorInst::isIdentityMask(Mask, VF)) || 7211 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 7212 Index == 0); 7213 } 7214 7215 public: 7216 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {} 7217 ~ShuffleCostBuilder() = default; 7218 InstructionCost createShuffleVector(Value *V1, Value *, 7219 ArrayRef<int> Mask) const { 7220 // Empty mask or identity mask are free. 7221 unsigned VF = 7222 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7223 if (isEmptyOrIdentity(Mask, VF)) 7224 return TTI::TCC_Free; 7225 return TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, 7226 cast<VectorType>(V1->getType()), Mask); 7227 } 7228 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const { 7229 // Empty mask or identity mask are free. 7230 unsigned VF = 7231 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7232 if (isEmptyOrIdentity(Mask, VF)) 7233 return TTI::TCC_Free; 7234 return TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, 7235 cast<VectorType>(V1->getType()), Mask); 7236 } 7237 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; } 7238 InstructionCost createPoison(Type *Ty, unsigned VF) const { 7239 return TTI::TCC_Free; 7240 } 7241 void resizeToMatch(Value *&, Value *&) const {} 7242 }; 7243 7244 /// Smart shuffle instruction emission, walks through shuffles trees and 7245 /// tries to find the best matching vector for the actual shuffle 7246 /// instruction. 7247 InstructionCost 7248 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1, 7249 const PointerUnion<Value *, const TreeEntry *> &P2, 7250 ArrayRef<int> Mask) { 7251 ShuffleCostBuilder Builder(TTI); 7252 SmallVector<int> CommonMask(Mask.begin(), Mask.end()); 7253 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>(); 7254 unsigned CommonVF = Mask.size(); 7255 if (!V1 && !V2 && !P2.isNull()) { 7256 // Shuffle 2 entry nodes. 7257 const TreeEntry *E = P1.get<const TreeEntry *>(); 7258 unsigned VF = E->getVectorFactor(); 7259 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7260 CommonVF = std::max(VF, E2->getVectorFactor()); 7261 assert(all_of(Mask, 7262 [=](int Idx) { 7263 return Idx < 2 * static_cast<int>(CommonVF); 7264 }) && 7265 "All elements in mask must be less than 2 * CommonVF."); 7266 if (E->Scalars.size() == E2->Scalars.size()) { 7267 SmallVector<int> EMask = E->getCommonMask(); 7268 SmallVector<int> E2Mask = E2->getCommonMask(); 7269 if (!EMask.empty() || !E2Mask.empty()) { 7270 for (int &Idx : CommonMask) { 7271 if (Idx == PoisonMaskElem) 7272 continue; 7273 if (Idx < static_cast<int>(CommonVF) && !EMask.empty()) 7274 Idx = EMask[Idx]; 7275 else if (Idx >= static_cast<int>(CommonVF)) 7276 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) + 7277 E->Scalars.size(); 7278 } 7279 } 7280 CommonVF = E->Scalars.size(); 7281 } 7282 V1 = Constant::getNullValue( 7283 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7284 V2 = getAllOnesValue( 7285 *R.DL, FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7286 } else if (!V1 && P2.isNull()) { 7287 // Shuffle single entry node. 7288 const TreeEntry *E = P1.get<const TreeEntry *>(); 7289 unsigned VF = E->getVectorFactor(); 7290 CommonVF = VF; 7291 assert( 7292 all_of(Mask, 7293 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7294 "All elements in mask must be less than CommonVF."); 7295 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) { 7296 SmallVector<int> EMask = E->getCommonMask(); 7297 assert(!EMask.empty() && "Expected non-empty common mask."); 7298 for (int &Idx : CommonMask) { 7299 if (Idx != PoisonMaskElem) 7300 Idx = EMask[Idx]; 7301 } 7302 CommonVF = E->Scalars.size(); 7303 } 7304 V1 = Constant::getNullValue( 7305 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7306 } else if (V1 && P2.isNull()) { 7307 // Shuffle single vector. 7308 CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7309 assert( 7310 all_of(Mask, 7311 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7312 "All elements in mask must be less than CommonVF."); 7313 } else if (V1 && !V2) { 7314 // Shuffle vector and tree node. 7315 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7316 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7317 CommonVF = std::max(VF, E2->getVectorFactor()); 7318 assert(all_of(Mask, 7319 [=](int Idx) { 7320 return Idx < 2 * static_cast<int>(CommonVF); 7321 }) && 7322 "All elements in mask must be less than 2 * CommonVF."); 7323 if (E2->Scalars.size() == VF && VF != CommonVF) { 7324 SmallVector<int> E2Mask = E2->getCommonMask(); 7325 assert(!E2Mask.empty() && "Expected non-empty common mask."); 7326 for (int &Idx : CommonMask) { 7327 if (Idx == PoisonMaskElem) 7328 continue; 7329 if (Idx >= static_cast<int>(CommonVF)) 7330 Idx = E2Mask[Idx - CommonVF] + VF; 7331 } 7332 CommonVF = VF; 7333 } 7334 V1 = Constant::getNullValue( 7335 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7336 V2 = getAllOnesValue( 7337 *R.DL, 7338 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7339 } else if (!V1 && V2) { 7340 // Shuffle vector and tree node. 7341 unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 7342 const TreeEntry *E1 = P1.get<const TreeEntry *>(); 7343 CommonVF = std::max(VF, E1->getVectorFactor()); 7344 assert(all_of(Mask, 7345 [=](int Idx) { 7346 return Idx < 2 * static_cast<int>(CommonVF); 7347 }) && 7348 "All elements in mask must be less than 2 * CommonVF."); 7349 if (E1->Scalars.size() == VF && VF != CommonVF) { 7350 SmallVector<int> E1Mask = E1->getCommonMask(); 7351 assert(!E1Mask.empty() && "Expected non-empty common mask."); 7352 for (int &Idx : CommonMask) { 7353 if (Idx == PoisonMaskElem) 7354 continue; 7355 if (Idx >= static_cast<int>(CommonVF)) 7356 Idx = E1Mask[Idx - CommonVF] + VF; 7357 } 7358 CommonVF = VF; 7359 } 7360 V1 = Constant::getNullValue( 7361 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7362 V2 = getAllOnesValue( 7363 *R.DL, 7364 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7365 } else { 7366 assert(V1 && V2 && "Expected both vectors."); 7367 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7368 CommonVF = 7369 std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements()); 7370 assert(all_of(Mask, 7371 [=](int Idx) { 7372 return Idx < 2 * static_cast<int>(CommonVF); 7373 }) && 7374 "All elements in mask must be less than 2 * CommonVF."); 7375 if (V1->getType() != V2->getType()) { 7376 V1 = Constant::getNullValue(FixedVectorType::get( 7377 cast<FixedVectorType>(V1->getType())->getElementType(), CommonVF)); 7378 V2 = getAllOnesValue( 7379 *R.DL, FixedVectorType::get( 7380 cast<FixedVectorType>(V1->getType())->getElementType(), 7381 CommonVF)); 7382 } 7383 } 7384 InVectors.front() = Constant::getNullValue(FixedVectorType::get( 7385 cast<FixedVectorType>(V1->getType())->getElementType(), 7386 CommonMask.size())); 7387 if (InVectors.size() == 2) 7388 InVectors.pop_back(); 7389 return BaseShuffleAnalysis::createShuffle<InstructionCost>( 7390 V1, V2, CommonMask, Builder); 7391 } 7392 7393 public: 7394 ShuffleCostEstimator(TargetTransformInfo &TTI, 7395 ArrayRef<Value *> VectorizedVals, BoUpSLP &R, 7396 SmallPtrSetImpl<Value *> &CheckedExtracts) 7397 : TTI(TTI), VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), 7398 R(R), CheckedExtracts(CheckedExtracts) {} 7399 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 7400 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7401 unsigned NumParts, bool &UseVecBaseAsInput) { 7402 UseVecBaseAsInput = false; 7403 if (Mask.empty()) 7404 return nullptr; 7405 Value *VecBase = nullptr; 7406 ArrayRef<Value *> VL = E->Scalars; 7407 // If the resulting type is scalarized, do not adjust the cost. 7408 if (NumParts == VL.size()) 7409 return nullptr; 7410 // Check if it can be considered reused if same extractelements were 7411 // vectorized already. 7412 bool PrevNodeFound = any_of( 7413 ArrayRef(R.VectorizableTree).take_front(E->Idx), 7414 [&](const std::unique_ptr<TreeEntry> &TE) { 7415 return ((!TE->isAltShuffle() && 7416 TE->getOpcode() == Instruction::ExtractElement) || 7417 TE->State == TreeEntry::NeedToGather) && 7418 all_of(enumerate(TE->Scalars), [&](auto &&Data) { 7419 return VL.size() > Data.index() && 7420 (Mask[Data.index()] == PoisonMaskElem || 7421 isa<UndefValue>(VL[Data.index()]) || 7422 Data.value() == VL[Data.index()]); 7423 }); 7424 }); 7425 SmallPtrSet<Value *, 4> UniqueBases; 7426 unsigned SliceSize = VL.size() / NumParts; 7427 for (unsigned Part = 0; Part < NumParts; ++Part) { 7428 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 7429 for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) { 7430 // Ignore non-extractelement scalars. 7431 if (isa<UndefValue>(V) || 7432 (!SubMask.empty() && SubMask[I] == PoisonMaskElem)) 7433 continue; 7434 // If all users of instruction are going to be vectorized and this 7435 // instruction itself is not going to be vectorized, consider this 7436 // instruction as dead and remove its cost from the final cost of the 7437 // vectorized tree. 7438 // Also, avoid adjusting the cost for extractelements with multiple uses 7439 // in different graph entries. 7440 auto *EE = cast<ExtractElementInst>(V); 7441 VecBase = EE->getVectorOperand(); 7442 UniqueBases.insert(VecBase); 7443 const TreeEntry *VE = R.getTreeEntry(V); 7444 if (!CheckedExtracts.insert(V).second || 7445 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) || 7446 (VE && VE != E)) 7447 continue; 7448 std::optional<unsigned> EEIdx = getExtractIndex(EE); 7449 if (!EEIdx) 7450 continue; 7451 unsigned Idx = *EEIdx; 7452 // Take credit for instruction that will become dead. 7453 if (EE->hasOneUse() || !PrevNodeFound) { 7454 Instruction *Ext = EE->user_back(); 7455 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) { 7456 return isa<GetElementPtrInst>(U); 7457 })) { 7458 // Use getExtractWithExtendCost() to calculate the cost of 7459 // extractelement/ext pair. 7460 Cost -= 7461 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 7462 EE->getVectorOperandType(), Idx); 7463 // Add back the cost of s|zext which is subtracted separately. 7464 Cost += TTI.getCastInstrCost( 7465 Ext->getOpcode(), Ext->getType(), EE->getType(), 7466 TTI::getCastContextHint(Ext), CostKind, Ext); 7467 continue; 7468 } 7469 } 7470 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(), 7471 CostKind, Idx); 7472 } 7473 } 7474 // Check that gather of extractelements can be represented as just a 7475 // shuffle of a single/two vectors the scalars are extracted from. 7476 // Found the bunch of extractelement instructions that must be gathered 7477 // into a vector and can be represented as a permutation elements in a 7478 // single input vector or of 2 input vectors. 7479 // Done for reused if same extractelements were vectorized already. 7480 if (!PrevNodeFound) 7481 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts); 7482 InVectors.assign(1, E); 7483 CommonMask.assign(Mask.begin(), Mask.end()); 7484 transformMaskAfterShuffle(CommonMask, CommonMask); 7485 SameNodesEstimated = false; 7486 if (NumParts != 1 && UniqueBases.size() != 1) { 7487 UseVecBaseAsInput = true; 7488 VecBase = Constant::getNullValue( 7489 FixedVectorType::get(VL.front()->getType(), CommonMask.size())); 7490 } 7491 return VecBase; 7492 } 7493 /// Checks if the specified entry \p E needs to be delayed because of its 7494 /// dependency nodes. 7495 std::optional<InstructionCost> 7496 needToDelay(const TreeEntry *, 7497 ArrayRef<SmallVector<const TreeEntry *>>) const { 7498 // No need to delay the cost estimation during analysis. 7499 return std::nullopt; 7500 } 7501 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 7502 if (&E1 == &E2) { 7503 assert(all_of(Mask, 7504 [&](int Idx) { 7505 return Idx < static_cast<int>(E1.getVectorFactor()); 7506 }) && 7507 "Expected single vector shuffle mask."); 7508 add(E1, Mask); 7509 return; 7510 } 7511 if (InVectors.empty()) { 7512 CommonMask.assign(Mask.begin(), Mask.end()); 7513 InVectors.assign({&E1, &E2}); 7514 return; 7515 } 7516 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7517 auto *MaskVecTy = 7518 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7519 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7520 if (NumParts == 0 || NumParts >= Mask.size()) 7521 NumParts = 1; 7522 unsigned SliceSize = Mask.size() / NumParts; 7523 const auto *It = 7524 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7525 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7526 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize); 7527 } 7528 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 7529 if (InVectors.empty()) { 7530 CommonMask.assign(Mask.begin(), Mask.end()); 7531 InVectors.assign(1, &E1); 7532 return; 7533 } 7534 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7535 auto *MaskVecTy = 7536 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7537 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7538 if (NumParts == 0 || NumParts >= Mask.size()) 7539 NumParts = 1; 7540 unsigned SliceSize = Mask.size() / NumParts; 7541 const auto *It = 7542 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7543 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7544 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize); 7545 if (!SameNodesEstimated && InVectors.size() == 1) 7546 InVectors.emplace_back(&E1); 7547 } 7548 /// Adds 2 input vectors and the mask for their shuffling. 7549 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 7550 // May come only for shuffling of 2 vectors with extractelements, already 7551 // handled in adjustExtracts. 7552 assert(InVectors.size() == 1 && 7553 all_of(enumerate(CommonMask), 7554 [&](auto P) { 7555 if (P.value() == PoisonMaskElem) 7556 return Mask[P.index()] == PoisonMaskElem; 7557 auto *EI = 7558 cast<ExtractElementInst>(InVectors.front() 7559 .get<const TreeEntry *>() 7560 ->Scalars[P.index()]); 7561 return EI->getVectorOperand() == V1 || 7562 EI->getVectorOperand() == V2; 7563 }) && 7564 "Expected extractelement vectors."); 7565 } 7566 /// Adds another one input vector and the mask for the shuffling. 7567 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) { 7568 if (InVectors.empty()) { 7569 assert(CommonMask.empty() && !ForExtracts && 7570 "Expected empty input mask/vectors."); 7571 CommonMask.assign(Mask.begin(), Mask.end()); 7572 InVectors.assign(1, V1); 7573 return; 7574 } 7575 if (ForExtracts) { 7576 // No need to add vectors here, already handled them in adjustExtracts. 7577 assert(InVectors.size() == 1 && 7578 InVectors.front().is<const TreeEntry *>() && !CommonMask.empty() && 7579 all_of(enumerate(CommonMask), 7580 [&](auto P) { 7581 Value *Scalar = InVectors.front() 7582 .get<const TreeEntry *>() 7583 ->Scalars[P.index()]; 7584 if (P.value() == PoisonMaskElem) 7585 return P.value() == Mask[P.index()] || 7586 isa<UndefValue>(Scalar); 7587 if (isa<Constant>(V1)) 7588 return true; 7589 auto *EI = cast<ExtractElementInst>(Scalar); 7590 return EI->getVectorOperand() == V1; 7591 }) && 7592 "Expected only tree entry for extractelement vectors."); 7593 return; 7594 } 7595 assert(!InVectors.empty() && !CommonMask.empty() && 7596 "Expected only tree entries from extracts/reused buildvectors."); 7597 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7598 if (InVectors.size() == 2) { 7599 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask); 7600 transformMaskAfterShuffle(CommonMask, CommonMask); 7601 VF = std::max<unsigned>(VF, CommonMask.size()); 7602 } else if (const auto *InTE = 7603 InVectors.front().dyn_cast<const TreeEntry *>()) { 7604 VF = std::max(VF, InTE->getVectorFactor()); 7605 } else { 7606 VF = std::max( 7607 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType()) 7608 ->getNumElements()); 7609 } 7610 InVectors.push_back(V1); 7611 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7612 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 7613 CommonMask[Idx] = Mask[Idx] + VF; 7614 } 7615 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 7616 Value *Root = nullptr) { 7617 Cost += getBuildVectorCost(VL, Root); 7618 if (!Root) { 7619 // FIXME: Need to find a way to avoid use of getNullValue here. 7620 SmallVector<Constant *> Vals; 7621 unsigned VF = VL.size(); 7622 if (MaskVF != 0) 7623 VF = std::min(VF, MaskVF); 7624 for (Value *V : VL.take_front(VF)) { 7625 if (isa<UndefValue>(V)) { 7626 Vals.push_back(cast<Constant>(V)); 7627 continue; 7628 } 7629 Vals.push_back(Constant::getNullValue(V->getType())); 7630 } 7631 return ConstantVector::get(Vals); 7632 } 7633 return ConstantVector::getSplat( 7634 ElementCount::getFixed( 7635 cast<FixedVectorType>(Root->getType())->getNumElements()), 7636 getAllOnesValue(*R.DL, VL.front()->getType())); 7637 } 7638 InstructionCost createFreeze(InstructionCost Cost) { return Cost; } 7639 /// Finalize emission of the shuffles. 7640 InstructionCost 7641 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 7642 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 7643 IsFinalized = true; 7644 if (Action) { 7645 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front(); 7646 if (InVectors.size() == 2) 7647 Cost += createShuffle(Vec, InVectors.back(), CommonMask); 7648 else 7649 Cost += createShuffle(Vec, nullptr, CommonMask); 7650 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7651 if (CommonMask[Idx] != PoisonMaskElem) 7652 CommonMask[Idx] = Idx; 7653 assert(VF > 0 && 7654 "Expected vector length for the final value before action."); 7655 Value *V = Vec.get<Value *>(); 7656 Action(V, CommonMask); 7657 InVectors.front() = V; 7658 } 7659 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true); 7660 if (CommonMask.empty()) { 7661 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 7662 return Cost; 7663 } 7664 return Cost + 7665 createShuffle(InVectors.front(), 7666 InVectors.size() == 2 ? InVectors.back() : nullptr, 7667 CommonMask); 7668 } 7669 7670 ~ShuffleCostEstimator() { 7671 assert((IsFinalized || CommonMask.empty()) && 7672 "Shuffle construction must be finalized."); 7673 } 7674 }; 7675 7676 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E, 7677 unsigned Idx) const { 7678 Value *Op = E->getOperand(Idx).front(); 7679 if (const TreeEntry *TE = getTreeEntry(Op)) { 7680 if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) { 7681 return EI.EdgeIdx == Idx && EI.UserTE == E; 7682 }) != TE->UserTreeIndices.end()) 7683 return TE; 7684 auto MIt = MultiNodeScalars.find(Op); 7685 if (MIt != MultiNodeScalars.end()) { 7686 for (const TreeEntry *TE : MIt->second) { 7687 if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7688 return EI.EdgeIdx == Idx && EI.UserTE == E; 7689 }) != TE->UserTreeIndices.end()) 7690 return TE; 7691 } 7692 } 7693 } 7694 const auto *It = 7695 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 7696 return TE->State == TreeEntry::NeedToGather && 7697 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7698 return EI.EdgeIdx == Idx && EI.UserTE == E; 7699 }) != TE->UserTreeIndices.end(); 7700 }); 7701 assert(It != VectorizableTree.end() && "Expected vectorizable entry."); 7702 return It->get(); 7703 } 7704 7705 InstructionCost 7706 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, 7707 SmallPtrSetImpl<Value *> &CheckedExtracts) { 7708 ArrayRef<Value *> VL = E->Scalars; 7709 7710 Type *ScalarTy = VL[0]->getType(); 7711 if (E->State != TreeEntry::NeedToGather) { 7712 if (auto *SI = dyn_cast<StoreInst>(VL[0])) 7713 ScalarTy = SI->getValueOperand()->getType(); 7714 else if (auto *CI = dyn_cast<CmpInst>(VL[0])) 7715 ScalarTy = CI->getOperand(0)->getType(); 7716 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7717 ScalarTy = IE->getOperand(1)->getType(); 7718 } 7719 if (!FixedVectorType::isValidElementType(ScalarTy)) 7720 return InstructionCost::getInvalid(); 7721 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7722 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7723 7724 // If we have computed a smaller type for the expression, update VecTy so 7725 // that the costs will be accurate. 7726 auto It = MinBWs.find(E); 7727 if (It != MinBWs.end()) { 7728 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 7729 VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7730 } 7731 unsigned EntryVF = E->getVectorFactor(); 7732 auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF); 7733 7734 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7735 if (E->State == TreeEntry::NeedToGather) { 7736 if (allConstant(VL)) 7737 return 0; 7738 if (isa<InsertElementInst>(VL[0])) 7739 return InstructionCost::getInvalid(); 7740 return processBuildVector<ShuffleCostEstimator, InstructionCost>( 7741 E, *TTI, VectorizedVals, *this, CheckedExtracts); 7742 } 7743 InstructionCost CommonCost = 0; 7744 SmallVector<int> Mask; 7745 if (!E->ReorderIndices.empty() && 7746 E->State != TreeEntry::PossibleStridedVectorize) { 7747 SmallVector<int> NewMask; 7748 if (E->getOpcode() == Instruction::Store) { 7749 // For stores the order is actually a mask. 7750 NewMask.resize(E->ReorderIndices.size()); 7751 copy(E->ReorderIndices, NewMask.begin()); 7752 } else { 7753 inversePermutation(E->ReorderIndices, NewMask); 7754 } 7755 ::addMask(Mask, NewMask); 7756 } 7757 if (NeedToShuffleReuses) 7758 ::addMask(Mask, E->ReuseShuffleIndices); 7759 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 7760 CommonCost = 7761 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 7762 assert((E->State == TreeEntry::Vectorize || 7763 E->State == TreeEntry::ScatterVectorize || 7764 E->State == TreeEntry::PossibleStridedVectorize) && 7765 "Unhandled state"); 7766 assert(E->getOpcode() && 7767 ((allSameType(VL) && allSameBlock(VL)) || 7768 (E->getOpcode() == Instruction::GetElementPtr && 7769 E->getMainOp()->getType()->isPointerTy())) && 7770 "Invalid VL"); 7771 Instruction *VL0 = E->getMainOp(); 7772 unsigned ShuffleOrOp = 7773 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7774 SetVector<Value *> UniqueValues(VL.begin(), VL.end()); 7775 const unsigned Sz = UniqueValues.size(); 7776 SmallBitVector UsedScalars(Sz, false); 7777 for (unsigned I = 0; I < Sz; ++I) { 7778 if (getTreeEntry(UniqueValues[I]) == E) 7779 continue; 7780 UsedScalars.set(I); 7781 } 7782 auto GetCastContextHint = [&](Value *V) { 7783 if (const TreeEntry *OpTE = getTreeEntry(V)) { 7784 if (OpTE->State == TreeEntry::ScatterVectorize) 7785 return TTI::CastContextHint::GatherScatter; 7786 if (OpTE->State == TreeEntry::Vectorize && 7787 OpTE->getOpcode() == Instruction::Load && !OpTE->isAltShuffle()) { 7788 if (OpTE->ReorderIndices.empty()) 7789 return TTI::CastContextHint::Normal; 7790 SmallVector<int> Mask; 7791 inversePermutation(OpTE->ReorderIndices, Mask); 7792 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size())) 7793 return TTI::CastContextHint::Reversed; 7794 } 7795 } else { 7796 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI); 7797 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle()) 7798 return TTI::CastContextHint::GatherScatter; 7799 } 7800 return TTI::CastContextHint::None; 7801 }; 7802 auto GetCostDiff = 7803 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost, 7804 function_ref<InstructionCost(InstructionCost)> VectorCost) { 7805 // Calculate the cost of this instruction. 7806 InstructionCost ScalarCost = 0; 7807 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) { 7808 // For some of the instructions no need to calculate cost for each 7809 // particular instruction, we can use the cost of the single 7810 // instruction x total number of scalar instructions. 7811 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0); 7812 } else { 7813 for (unsigned I = 0; I < Sz; ++I) { 7814 if (UsedScalars.test(I)) 7815 continue; 7816 ScalarCost += ScalarEltCost(I); 7817 } 7818 } 7819 7820 InstructionCost VecCost = VectorCost(CommonCost); 7821 // Check if the current node must be resized, if the parent node is not 7822 // resized. 7823 if (!UnaryInstruction::isCast(E->getOpcode()) && E->Idx != 0) { 7824 const EdgeInfo &EI = E->UserTreeIndices.front(); 7825 if ((EI.UserTE->getOpcode() != Instruction::Select || 7826 EI.EdgeIdx != 0) && 7827 It != MinBWs.end()) { 7828 auto UserBWIt = MinBWs.find(EI.UserTE); 7829 Type *UserScalarTy = 7830 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType(); 7831 if (UserBWIt != MinBWs.end()) 7832 UserScalarTy = IntegerType::get(ScalarTy->getContext(), 7833 UserBWIt->second.first); 7834 if (ScalarTy != UserScalarTy) { 7835 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 7836 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy); 7837 unsigned VecOpcode; 7838 auto *SrcVecTy = 7839 FixedVectorType::get(UserScalarTy, E->getVectorFactor()); 7840 if (BWSz > SrcBWSz) 7841 VecOpcode = Instruction::Trunc; 7842 else 7843 VecOpcode = 7844 It->second.second ? Instruction::SExt : Instruction::ZExt; 7845 TTI::CastContextHint CCH = GetCastContextHint(VL0); 7846 VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, 7847 CostKind); 7848 ScalarCost += 7849 Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy, 7850 CCH, CostKind); 7851 } 7852 } 7853 } 7854 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost, 7855 ScalarCost, "Calculated costs for Tree")); 7856 return VecCost - ScalarCost; 7857 }; 7858 // Calculate cost difference from vectorizing set of GEPs. 7859 // Negative value means vectorizing is profitable. 7860 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) { 7861 InstructionCost ScalarCost = 0; 7862 InstructionCost VecCost = 0; 7863 // Here we differentiate two cases: (1) when Ptrs represent a regular 7864 // vectorization tree node (as they are pointer arguments of scattered 7865 // loads) or (2) when Ptrs are the arguments of loads or stores being 7866 // vectorized as plane wide unit-stride load/store since all the 7867 // loads/stores are known to be from/to adjacent locations. 7868 assert(E->State == TreeEntry::Vectorize && 7869 "Entry state expected to be Vectorize here."); 7870 if (isa<LoadInst, StoreInst>(VL0)) { 7871 // Case 2: estimate costs for pointer related costs when vectorizing to 7872 // a wide load/store. 7873 // Scalar cost is estimated as a set of pointers with known relationship 7874 // between them. 7875 // For vector code we will use BasePtr as argument for the wide load/store 7876 // but we also need to account all the instructions which are going to 7877 // stay in vectorized code due to uses outside of these scalar 7878 // loads/stores. 7879 ScalarCost = TTI->getPointersChainCost( 7880 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy, 7881 CostKind); 7882 7883 SmallVector<const Value *> PtrsRetainedInVecCode; 7884 for (Value *V : Ptrs) { 7885 if (V == BasePtr) { 7886 PtrsRetainedInVecCode.push_back(V); 7887 continue; 7888 } 7889 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7890 // For simplicity assume Ptr to stay in vectorized code if it's not a 7891 // GEP instruction. We don't care since it's cost considered free. 7892 // TODO: We should check for any uses outside of vectorizable tree 7893 // rather than just single use. 7894 if (!Ptr || !Ptr->hasOneUse()) 7895 PtrsRetainedInVecCode.push_back(V); 7896 } 7897 7898 if (PtrsRetainedInVecCode.size() == Ptrs.size()) { 7899 // If all pointers stay in vectorized code then we don't have 7900 // any savings on that. 7901 LLVM_DEBUG(dumpTreeCosts(E, 0, ScalarCost, ScalarCost, 7902 "Calculated GEPs cost for Tree")); 7903 return InstructionCost{TTI::TCC_Free}; 7904 } 7905 VecCost = TTI->getPointersChainCost( 7906 PtrsRetainedInVecCode, BasePtr, 7907 TTI::PointersChainInfo::getKnownStride(), VecTy, CostKind); 7908 } else { 7909 // Case 1: Ptrs are the arguments of loads that we are going to transform 7910 // into masked gather load intrinsic. 7911 // All the scalar GEPs will be removed as a result of vectorization. 7912 // For any external uses of some lanes extract element instructions will 7913 // be generated (which cost is estimated separately). 7914 TTI::PointersChainInfo PtrsInfo = 7915 all_of(Ptrs, 7916 [](const Value *V) { 7917 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7918 return Ptr && !Ptr->hasAllConstantIndices(); 7919 }) 7920 ? TTI::PointersChainInfo::getUnknownStride() 7921 : TTI::PointersChainInfo::getKnownStride(); 7922 7923 ScalarCost = TTI->getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, 7924 CostKind); 7925 if (auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr)) { 7926 SmallVector<const Value *> Indices(BaseGEP->indices()); 7927 VecCost = TTI->getGEPCost(BaseGEP->getSourceElementType(), 7928 BaseGEP->getPointerOperand(), Indices, VecTy, 7929 CostKind); 7930 } 7931 } 7932 7933 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost, 7934 "Calculated GEPs cost for Tree")); 7935 7936 return VecCost - ScalarCost; 7937 }; 7938 7939 switch (ShuffleOrOp) { 7940 case Instruction::PHI: { 7941 // Count reused scalars. 7942 InstructionCost ScalarCost = 0; 7943 SmallPtrSet<const TreeEntry *, 4> CountedOps; 7944 for (Value *V : UniqueValues) { 7945 auto *PHI = dyn_cast<PHINode>(V); 7946 if (!PHI) 7947 continue; 7948 7949 ValueList Operands(PHI->getNumIncomingValues(), nullptr); 7950 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) { 7951 Value *Op = PHI->getIncomingValue(I); 7952 Operands[I] = Op; 7953 } 7954 if (const TreeEntry *OpTE = getTreeEntry(Operands.front())) 7955 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second) 7956 if (!OpTE->ReuseShuffleIndices.empty()) 7957 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() - 7958 OpTE->Scalars.size()); 7959 } 7960 7961 return CommonCost - ScalarCost; 7962 } 7963 case Instruction::ExtractValue: 7964 case Instruction::ExtractElement: { 7965 auto GetScalarCost = [&](unsigned Idx) { 7966 auto *I = cast<Instruction>(UniqueValues[Idx]); 7967 VectorType *SrcVecTy; 7968 if (ShuffleOrOp == Instruction::ExtractElement) { 7969 auto *EE = cast<ExtractElementInst>(I); 7970 SrcVecTy = EE->getVectorOperandType(); 7971 } else { 7972 auto *EV = cast<ExtractValueInst>(I); 7973 Type *AggregateTy = EV->getAggregateOperand()->getType(); 7974 unsigned NumElts; 7975 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy)) 7976 NumElts = ATy->getNumElements(); 7977 else 7978 NumElts = AggregateTy->getStructNumElements(); 7979 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts); 7980 } 7981 if (I->hasOneUse()) { 7982 Instruction *Ext = I->user_back(); 7983 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 7984 all_of(Ext->users(), 7985 [](User *U) { return isa<GetElementPtrInst>(U); })) { 7986 // Use getExtractWithExtendCost() to calculate the cost of 7987 // extractelement/ext pair. 7988 InstructionCost Cost = TTI->getExtractWithExtendCost( 7989 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I)); 7990 // Subtract the cost of s|zext which is subtracted separately. 7991 Cost -= TTI->getCastInstrCost( 7992 Ext->getOpcode(), Ext->getType(), I->getType(), 7993 TTI::getCastContextHint(Ext), CostKind, Ext); 7994 return Cost; 7995 } 7996 } 7997 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy, 7998 CostKind, *getExtractIndex(I)); 7999 }; 8000 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; }; 8001 return GetCostDiff(GetScalarCost, GetVectorCost); 8002 } 8003 case Instruction::InsertElement: { 8004 assert(E->ReuseShuffleIndices.empty() && 8005 "Unique insertelements only are expected."); 8006 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 8007 unsigned const NumElts = SrcVecTy->getNumElements(); 8008 unsigned const NumScalars = VL.size(); 8009 8010 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy); 8011 8012 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 8013 unsigned OffsetBeg = *getInsertIndex(VL.front()); 8014 unsigned OffsetEnd = OffsetBeg; 8015 InsertMask[OffsetBeg] = 0; 8016 for (auto [I, V] : enumerate(VL.drop_front())) { 8017 unsigned Idx = *getInsertIndex(V); 8018 if (OffsetBeg > Idx) 8019 OffsetBeg = Idx; 8020 else if (OffsetEnd < Idx) 8021 OffsetEnd = Idx; 8022 InsertMask[Idx] = I + 1; 8023 } 8024 unsigned VecScalarsSz = PowerOf2Ceil(NumElts); 8025 if (NumOfParts > 0) 8026 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts); 8027 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) * 8028 VecScalarsSz; 8029 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz); 8030 unsigned InsertVecSz = std::min<unsigned>( 8031 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1), 8032 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz); 8033 bool IsWholeSubvector = 8034 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0); 8035 // Check if we can safely insert a subvector. If it is not possible, just 8036 // generate a whole-sized vector and shuffle the source vector and the new 8037 // subvector. 8038 if (OffsetBeg + InsertVecSz > VecSz) { 8039 // Align OffsetBeg to generate correct mask. 8040 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset); 8041 InsertVecSz = VecSz; 8042 } 8043 8044 APInt DemandedElts = APInt::getZero(NumElts); 8045 // TODO: Add support for Instruction::InsertValue. 8046 SmallVector<int> Mask; 8047 if (!E->ReorderIndices.empty()) { 8048 inversePermutation(E->ReorderIndices, Mask); 8049 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem); 8050 } else { 8051 Mask.assign(VecSz, PoisonMaskElem); 8052 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0); 8053 } 8054 bool IsIdentity = true; 8055 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem); 8056 Mask.swap(PrevMask); 8057 for (unsigned I = 0; I < NumScalars; ++I) { 8058 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 8059 DemandedElts.setBit(InsertIdx); 8060 IsIdentity &= InsertIdx - OffsetBeg == I; 8061 Mask[InsertIdx - OffsetBeg] = I; 8062 } 8063 assert(Offset < NumElts && "Failed to find vector index offset"); 8064 8065 InstructionCost Cost = 0; 8066 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 8067 /*Insert*/ true, /*Extract*/ false, 8068 CostKind); 8069 8070 // First cost - resize to actual vector size if not identity shuffle or 8071 // need to shift the vector. 8072 // Do not calculate the cost if the actual size is the register size and 8073 // we can merge this shuffle with the following SK_Select. 8074 auto *InsertVecTy = FixedVectorType::get(ScalarTy, InsertVecSz); 8075 if (!IsIdentity) 8076 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 8077 InsertVecTy, Mask); 8078 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 8079 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 8080 })); 8081 // Second cost - permutation with subvector, if some elements are from the 8082 // initial vector or inserting a subvector. 8083 // TODO: Implement the analysis of the FirstInsert->getOperand(0) 8084 // subvector of ActualVecTy. 8085 SmallBitVector InMask = 8086 isUndefVector(FirstInsert->getOperand(0), 8087 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask)); 8088 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { 8089 if (InsertVecSz != VecSz) { 8090 auto *ActualVecTy = FixedVectorType::get(ScalarTy, VecSz); 8091 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy, 8092 std::nullopt, CostKind, OffsetBeg - Offset, 8093 InsertVecTy); 8094 } else { 8095 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) 8096 Mask[I] = InMask.test(I) ? PoisonMaskElem : I; 8097 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset; 8098 I <= End; ++I) 8099 if (Mask[I] != PoisonMaskElem) 8100 Mask[I] = I + VecSz; 8101 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I) 8102 Mask[I] = 8103 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I; 8104 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask); 8105 } 8106 } 8107 return Cost; 8108 } 8109 case Instruction::ZExt: 8110 case Instruction::SExt: 8111 case Instruction::FPToUI: 8112 case Instruction::FPToSI: 8113 case Instruction::FPExt: 8114 case Instruction::PtrToInt: 8115 case Instruction::IntToPtr: 8116 case Instruction::SIToFP: 8117 case Instruction::UIToFP: 8118 case Instruction::Trunc: 8119 case Instruction::FPTrunc: 8120 case Instruction::BitCast: { 8121 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 8122 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 8123 auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8124 unsigned Opcode = ShuffleOrOp; 8125 unsigned VecOpcode = Opcode; 8126 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 8127 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 8128 // Check if the values are candidates to demote. 8129 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 8130 if (SrcIt != MinBWs.end()) { 8131 SrcBWSz = SrcIt->second.first; 8132 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz); 8133 SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8134 } 8135 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 8136 if (BWSz == SrcBWSz) { 8137 VecOpcode = Instruction::BitCast; 8138 } else if (BWSz < SrcBWSz) { 8139 VecOpcode = Instruction::Trunc; 8140 } else if (It != MinBWs.end()) { 8141 assert(BWSz > SrcBWSz && "Invalid cast!"); 8142 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 8143 } 8144 } 8145 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost { 8146 // Do not count cost here if minimum bitwidth is in effect and it is just 8147 // a bitcast (here it is just a noop). 8148 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8149 return TTI::TCC_Free; 8150 auto *VI = VL0->getOpcode() == Opcode 8151 ? cast<Instruction>(UniqueValues[Idx]) 8152 : nullptr; 8153 return TTI->getCastInstrCost(Opcode, VL0->getType(), 8154 VL0->getOperand(0)->getType(), 8155 TTI::getCastContextHint(VI), CostKind, VI); 8156 }; 8157 auto GetVectorCost = [=](InstructionCost CommonCost) { 8158 // Do not count cost here if minimum bitwidth is in effect and it is just 8159 // a bitcast (here it is just a noop). 8160 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8161 return CommonCost; 8162 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr; 8163 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0)); 8164 return CommonCost + 8165 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind, 8166 VecOpcode == Opcode ? VI : nullptr); 8167 }; 8168 return GetCostDiff(GetScalarCost, GetVectorCost); 8169 } 8170 case Instruction::FCmp: 8171 case Instruction::ICmp: 8172 case Instruction::Select: { 8173 CmpInst::Predicate VecPred, SwappedVecPred; 8174 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value()); 8175 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) || 8176 match(VL0, MatchCmp)) 8177 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred); 8178 else 8179 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy() 8180 ? CmpInst::BAD_FCMP_PREDICATE 8181 : CmpInst::BAD_ICMP_PREDICATE; 8182 auto GetScalarCost = [&](unsigned Idx) { 8183 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8184 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy() 8185 ? CmpInst::BAD_FCMP_PREDICATE 8186 : CmpInst::BAD_ICMP_PREDICATE; 8187 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 8188 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) && 8189 !match(VI, MatchCmp)) || 8190 (CurrentPred != VecPred && CurrentPred != SwappedVecPred)) 8191 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy() 8192 ? CmpInst::BAD_FCMP_PREDICATE 8193 : CmpInst::BAD_ICMP_PREDICATE; 8194 8195 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 8196 Builder.getInt1Ty(), CurrentPred, CostKind, 8197 VI); 8198 }; 8199 auto GetVectorCost = [&](InstructionCost CommonCost) { 8200 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8201 8202 InstructionCost VecCost = TTI->getCmpSelInstrCost( 8203 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 8204 // Check if it is possible and profitable to use min/max for selects 8205 // in VL. 8206 // 8207 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 8208 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 8209 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 8210 {VecTy, VecTy}); 8211 InstructionCost IntrinsicCost = 8212 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8213 // If the selects are the only uses of the compares, they will be 8214 // dead and we can adjust the cost by removing their cost. 8215 if (IntrinsicAndUse.second) 8216 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 8217 MaskTy, VecPred, CostKind); 8218 VecCost = std::min(VecCost, IntrinsicCost); 8219 } 8220 return VecCost + CommonCost; 8221 }; 8222 return GetCostDiff(GetScalarCost, GetVectorCost); 8223 } 8224 case Instruction::FNeg: 8225 case Instruction::Add: 8226 case Instruction::FAdd: 8227 case Instruction::Sub: 8228 case Instruction::FSub: 8229 case Instruction::Mul: 8230 case Instruction::FMul: 8231 case Instruction::UDiv: 8232 case Instruction::SDiv: 8233 case Instruction::FDiv: 8234 case Instruction::URem: 8235 case Instruction::SRem: 8236 case Instruction::FRem: 8237 case Instruction::Shl: 8238 case Instruction::LShr: 8239 case Instruction::AShr: 8240 case Instruction::And: 8241 case Instruction::Or: 8242 case Instruction::Xor: { 8243 auto GetScalarCost = [&](unsigned Idx) { 8244 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8245 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1; 8246 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0)); 8247 TTI::OperandValueInfo Op2Info = 8248 TTI::getOperandInfo(VI->getOperand(OpIdx)); 8249 SmallVector<const Value *> Operands(VI->operand_values()); 8250 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind, 8251 Op1Info, Op2Info, Operands, VI); 8252 }; 8253 auto GetVectorCost = [=](InstructionCost CommonCost) { 8254 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1; 8255 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); 8256 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); 8257 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, 8258 Op2Info) + 8259 CommonCost; 8260 }; 8261 return GetCostDiff(GetScalarCost, GetVectorCost); 8262 } 8263 case Instruction::GetElementPtr: { 8264 return CommonCost + GetGEPCostDiff(VL, VL0); 8265 } 8266 case Instruction::Load: { 8267 auto GetScalarCost = [&](unsigned Idx) { 8268 auto *VI = cast<LoadInst>(UniqueValues[Idx]); 8269 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(), 8270 VI->getPointerAddressSpace(), CostKind, 8271 TTI::OperandValueInfo(), VI); 8272 }; 8273 auto *LI0 = cast<LoadInst>(VL0); 8274 auto GetVectorCost = [&](InstructionCost CommonCost) { 8275 InstructionCost VecLdCost; 8276 if (E->State == TreeEntry::Vectorize) { 8277 VecLdCost = TTI->getMemoryOpCost( 8278 Instruction::Load, VecTy, LI0->getAlign(), 8279 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()); 8280 } else { 8281 assert((E->State == TreeEntry::ScatterVectorize || 8282 E->State == TreeEntry::PossibleStridedVectorize) && 8283 "Unknown EntryState"); 8284 Align CommonAlignment = LI0->getAlign(); 8285 for (Value *V : UniqueValues) 8286 CommonAlignment = 8287 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 8288 VecLdCost = TTI->getGatherScatterOpCost( 8289 Instruction::Load, VecTy, LI0->getPointerOperand(), 8290 /*VariableMask=*/false, CommonAlignment, CostKind); 8291 } 8292 return VecLdCost + CommonCost; 8293 }; 8294 8295 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost); 8296 // If this node generates masked gather load then it is not a terminal node. 8297 // Hence address operand cost is estimated separately. 8298 if (E->State == TreeEntry::ScatterVectorize || 8299 E->State == TreeEntry::PossibleStridedVectorize) 8300 return Cost; 8301 8302 // Estimate cost of GEPs since this tree node is a terminator. 8303 SmallVector<Value *> PointerOps(VL.size()); 8304 for (auto [I, V] : enumerate(VL)) 8305 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand(); 8306 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand()); 8307 } 8308 case Instruction::Store: { 8309 bool IsReorder = !E->ReorderIndices.empty(); 8310 auto GetScalarCost = [=](unsigned Idx) { 8311 auto *VI = cast<StoreInst>(VL[Idx]); 8312 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand()); 8313 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(), 8314 VI->getPointerAddressSpace(), CostKind, 8315 OpInfo, VI); 8316 }; 8317 auto *BaseSI = 8318 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 8319 auto GetVectorCost = [=](InstructionCost CommonCost) { 8320 // We know that we can merge the stores. Calculate the cost. 8321 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); 8322 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), 8323 BaseSI->getPointerAddressSpace(), CostKind, 8324 OpInfo) + 8325 CommonCost; 8326 }; 8327 SmallVector<Value *> PointerOps(VL.size()); 8328 for (auto [I, V] : enumerate(VL)) { 8329 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I; 8330 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand(); 8331 } 8332 8333 return GetCostDiff(GetScalarCost, GetVectorCost) + 8334 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand()); 8335 } 8336 case Instruction::Call: { 8337 auto GetScalarCost = [&](unsigned Idx) { 8338 auto *CI = cast<CallInst>(UniqueValues[Idx]); 8339 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8340 if (ID != Intrinsic::not_intrinsic) { 8341 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 8342 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8343 } 8344 return TTI->getCallInstrCost(CI->getCalledFunction(), 8345 CI->getFunctionType()->getReturnType(), 8346 CI->getFunctionType()->params(), CostKind); 8347 }; 8348 auto GetVectorCost = [=](InstructionCost CommonCost) { 8349 auto *CI = cast<CallInst>(VL0); 8350 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 8351 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost; 8352 }; 8353 return GetCostDiff(GetScalarCost, GetVectorCost); 8354 } 8355 case Instruction::ShuffleVector: { 8356 assert(E->isAltShuffle() && 8357 ((Instruction::isBinaryOp(E->getOpcode()) && 8358 Instruction::isBinaryOp(E->getAltOpcode())) || 8359 (Instruction::isCast(E->getOpcode()) && 8360 Instruction::isCast(E->getAltOpcode())) || 8361 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8362 "Invalid Shuffle Vector Operand"); 8363 // Try to find the previous shuffle node with the same operands and same 8364 // main/alternate ops. 8365 auto TryFindNodeWithEqualOperands = [=]() { 8366 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 8367 if (TE.get() == E) 8368 break; 8369 if (TE->isAltShuffle() && 8370 ((TE->getOpcode() == E->getOpcode() && 8371 TE->getAltOpcode() == E->getAltOpcode()) || 8372 (TE->getOpcode() == E->getAltOpcode() && 8373 TE->getAltOpcode() == E->getOpcode())) && 8374 TE->hasEqualOperands(*E)) 8375 return true; 8376 } 8377 return false; 8378 }; 8379 auto GetScalarCost = [&](unsigned Idx) { 8380 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8381 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode"); 8382 (void)E; 8383 return TTI->getInstructionCost(VI, CostKind); 8384 }; 8385 // FIXME: Workaround for syntax error reported by MSVC buildbots. 8386 TargetTransformInfo &TTIRef = *TTI; 8387 // Need to clear CommonCost since the final shuffle cost is included into 8388 // vector cost. 8389 auto GetVectorCost = [&](InstructionCost) { 8390 // VecCost is equal to sum of the cost of creating 2 vectors 8391 // and the cost of creating shuffle. 8392 InstructionCost VecCost = 0; 8393 if (TryFindNodeWithEqualOperands()) { 8394 LLVM_DEBUG({ 8395 dbgs() << "SLP: diamond match for alternate node found.\n"; 8396 E->dump(); 8397 }); 8398 // No need to add new vector costs here since we're going to reuse 8399 // same main/alternate vector ops, just do different shuffling. 8400 } else if (Instruction::isBinaryOp(E->getOpcode())) { 8401 VecCost = 8402 TTIRef.getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 8403 VecCost += 8404 TTIRef.getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind); 8405 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8406 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8407 VecCost = TTIRef.getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 8408 CI0->getPredicate(), CostKind, VL0); 8409 VecCost += TTIRef.getCmpSelInstrCost( 8410 E->getOpcode(), VecTy, MaskTy, 8411 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 8412 E->getAltOp()); 8413 } else { 8414 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 8415 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 8416 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 8417 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 8418 VecCost = TTIRef.getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 8419 TTI::CastContextHint::None, CostKind); 8420 VecCost += 8421 TTIRef.getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 8422 TTI::CastContextHint::None, CostKind); 8423 } 8424 SmallVector<int> Mask; 8425 E->buildAltOpShuffleMask( 8426 [E](Instruction *I) { 8427 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8428 return I->getOpcode() == E->getAltOpcode(); 8429 }, 8430 Mask); 8431 VecCost += TTIRef.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, 8432 FinalVecTy, Mask); 8433 // Patterns like [fadd,fsub] can be combined into a single instruction 8434 // in x86. Reordering them into [fsub,fadd] blocks this pattern. So we 8435 // need to take into account their order when looking for the most used 8436 // order. 8437 unsigned Opcode0 = E->getOpcode(); 8438 unsigned Opcode1 = E->getAltOpcode(); 8439 // The opcode mask selects between the two opcodes. 8440 SmallBitVector OpcodeMask(E->Scalars.size(), false); 8441 for (unsigned Lane : seq<unsigned>(0, E->Scalars.size())) 8442 if (cast<Instruction>(E->Scalars[Lane])->getOpcode() == Opcode1) 8443 OpcodeMask.set(Lane); 8444 // If this pattern is supported by the target then we consider the 8445 // order. 8446 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 8447 InstructionCost AltVecCost = TTIRef.getAltInstrCost( 8448 VecTy, Opcode0, Opcode1, OpcodeMask, CostKind); 8449 return AltVecCost < VecCost ? AltVecCost : VecCost; 8450 } 8451 // TODO: Check the reverse order too. 8452 return VecCost; 8453 }; 8454 return GetCostDiff(GetScalarCost, GetVectorCost); 8455 } 8456 default: 8457 llvm_unreachable("Unknown instruction"); 8458 } 8459 } 8460 8461 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 8462 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 8463 << VectorizableTree.size() << " is fully vectorizable .\n"); 8464 8465 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 8466 SmallVector<int> Mask; 8467 return TE->State == TreeEntry::NeedToGather && 8468 !any_of(TE->Scalars, 8469 [this](Value *V) { return EphValues.contains(V); }) && 8470 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 8471 TE->Scalars.size() < Limit || 8472 ((TE->getOpcode() == Instruction::ExtractElement || 8473 all_of(TE->Scalars, 8474 [](Value *V) { 8475 return isa<ExtractElementInst, UndefValue>(V); 8476 })) && 8477 isFixedVectorShuffle(TE->Scalars, Mask)) || 8478 (TE->State == TreeEntry::NeedToGather && 8479 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 8480 }; 8481 8482 // We only handle trees of heights 1 and 2. 8483 if (VectorizableTree.size() == 1 && 8484 (VectorizableTree[0]->State == TreeEntry::Vectorize || 8485 (ForReduction && 8486 AreVectorizableGathers(VectorizableTree[0].get(), 8487 VectorizableTree[0]->Scalars.size()) && 8488 VectorizableTree[0]->getVectorFactor() > 2))) 8489 return true; 8490 8491 if (VectorizableTree.size() != 2) 8492 return false; 8493 8494 // Handle splat and all-constants stores. Also try to vectorize tiny trees 8495 // with the second gather nodes if they have less scalar operands rather than 8496 // the initial tree element (may be profitable to shuffle the second gather) 8497 // or they are extractelements, which form shuffle. 8498 SmallVector<int> Mask; 8499 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 8500 AreVectorizableGathers(VectorizableTree[1].get(), 8501 VectorizableTree[0]->Scalars.size())) 8502 return true; 8503 8504 // Gathering cost would be too much for tiny trees. 8505 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 8506 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 8507 VectorizableTree[0]->State != TreeEntry::ScatterVectorize && 8508 VectorizableTree[0]->State != TreeEntry::PossibleStridedVectorize)) 8509 return false; 8510 8511 return true; 8512 } 8513 8514 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 8515 TargetTransformInfo *TTI, 8516 bool MustMatchOrInst) { 8517 // Look past the root to find a source value. Arbitrarily follow the 8518 // path through operand 0 of any 'or'. Also, peek through optional 8519 // shift-left-by-multiple-of-8-bits. 8520 Value *ZextLoad = Root; 8521 const APInt *ShAmtC; 8522 bool FoundOr = false; 8523 while (!isa<ConstantExpr>(ZextLoad) && 8524 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 8525 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 8526 ShAmtC->urem(8) == 0))) { 8527 auto *BinOp = cast<BinaryOperator>(ZextLoad); 8528 ZextLoad = BinOp->getOperand(0); 8529 if (BinOp->getOpcode() == Instruction::Or) 8530 FoundOr = true; 8531 } 8532 // Check if the input is an extended load of the required or/shift expression. 8533 Value *Load; 8534 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 8535 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 8536 return false; 8537 8538 // Require that the total load bit width is a legal integer type. 8539 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 8540 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 8541 Type *SrcTy = Load->getType(); 8542 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 8543 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 8544 return false; 8545 8546 // Everything matched - assume that we can fold the whole sequence using 8547 // load combining. 8548 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 8549 << *(cast<Instruction>(Root)) << "\n"); 8550 8551 return true; 8552 } 8553 8554 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 8555 if (RdxKind != RecurKind::Or) 8556 return false; 8557 8558 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8559 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 8560 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 8561 /* MatchOr */ false); 8562 } 8563 8564 bool BoUpSLP::isLoadCombineCandidate() const { 8565 // Peek through a final sequence of stores and check if all operations are 8566 // likely to be load-combined. 8567 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8568 for (Value *Scalar : VectorizableTree[0]->Scalars) { 8569 Value *X; 8570 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 8571 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 8572 return false; 8573 } 8574 return true; 8575 } 8576 8577 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 8578 // No need to vectorize inserts of gathered values. 8579 if (VectorizableTree.size() == 2 && 8580 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 8581 VectorizableTree[1]->State == TreeEntry::NeedToGather && 8582 (VectorizableTree[1]->getVectorFactor() <= 2 || 8583 !(isSplat(VectorizableTree[1]->Scalars) || 8584 allConstant(VectorizableTree[1]->Scalars)))) 8585 return true; 8586 8587 // If the graph includes only PHI nodes and gathers, it is defnitely not 8588 // profitable for the vectorization, we can skip it, if the cost threshold is 8589 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of 8590 // gathers/buildvectors. 8591 constexpr int Limit = 4; 8592 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() && 8593 !VectorizableTree.empty() && 8594 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 8595 return (TE->State == TreeEntry::NeedToGather && 8596 TE->getOpcode() != Instruction::ExtractElement && 8597 count_if(TE->Scalars, 8598 [](Value *V) { return isa<ExtractElementInst>(V); }) <= 8599 Limit) || 8600 TE->getOpcode() == Instruction::PHI; 8601 })) 8602 return true; 8603 8604 // We can vectorize the tree if its size is greater than or equal to the 8605 // minimum size specified by the MinTreeSize command line option. 8606 if (VectorizableTree.size() >= MinTreeSize) 8607 return false; 8608 8609 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 8610 // can vectorize it if we can prove it fully vectorizable. 8611 if (isFullyVectorizableTinyTree(ForReduction)) 8612 return false; 8613 8614 assert(VectorizableTree.empty() 8615 ? ExternalUses.empty() 8616 : true && "We shouldn't have any external users"); 8617 8618 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 8619 // vectorizable. 8620 return true; 8621 } 8622 8623 InstructionCost BoUpSLP::getSpillCost() const { 8624 // Walk from the bottom of the tree to the top, tracking which values are 8625 // live. When we see a call instruction that is not part of our tree, 8626 // query TTI to see if there is a cost to keeping values live over it 8627 // (for example, if spills and fills are required). 8628 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 8629 InstructionCost Cost = 0; 8630 8631 SmallPtrSet<Instruction *, 4> LiveValues; 8632 Instruction *PrevInst = nullptr; 8633 8634 // The entries in VectorizableTree are not necessarily ordered by their 8635 // position in basic blocks. Collect them and order them by dominance so later 8636 // instructions are guaranteed to be visited first. For instructions in 8637 // different basic blocks, we only scan to the beginning of the block, so 8638 // their order does not matter, as long as all instructions in a basic block 8639 // are grouped together. Using dominance ensures a deterministic order. 8640 SmallVector<Instruction *, 16> OrderedScalars; 8641 for (const auto &TEPtr : VectorizableTree) { 8642 if (TEPtr->State != TreeEntry::Vectorize) 8643 continue; 8644 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 8645 if (!Inst) 8646 continue; 8647 OrderedScalars.push_back(Inst); 8648 } 8649 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 8650 auto *NodeA = DT->getNode(A->getParent()); 8651 auto *NodeB = DT->getNode(B->getParent()); 8652 assert(NodeA && "Should only process reachable instructions"); 8653 assert(NodeB && "Should only process reachable instructions"); 8654 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 8655 "Different nodes should have different DFS numbers"); 8656 if (NodeA != NodeB) 8657 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); 8658 return B->comesBefore(A); 8659 }); 8660 8661 for (Instruction *Inst : OrderedScalars) { 8662 if (!PrevInst) { 8663 PrevInst = Inst; 8664 continue; 8665 } 8666 8667 // Update LiveValues. 8668 LiveValues.erase(PrevInst); 8669 for (auto &J : PrevInst->operands()) { 8670 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 8671 LiveValues.insert(cast<Instruction>(&*J)); 8672 } 8673 8674 LLVM_DEBUG({ 8675 dbgs() << "SLP: #LV: " << LiveValues.size(); 8676 for (auto *X : LiveValues) 8677 dbgs() << " " << X->getName(); 8678 dbgs() << ", Looking at "; 8679 Inst->dump(); 8680 }); 8681 8682 // Now find the sequence of instructions between PrevInst and Inst. 8683 unsigned NumCalls = 0; 8684 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 8685 PrevInstIt = 8686 PrevInst->getIterator().getReverse(); 8687 while (InstIt != PrevInstIt) { 8688 if (PrevInstIt == PrevInst->getParent()->rend()) { 8689 PrevInstIt = Inst->getParent()->rbegin(); 8690 continue; 8691 } 8692 8693 auto NoCallIntrinsic = [this](Instruction *I) { 8694 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 8695 if (II->isAssumeLikeIntrinsic()) 8696 return true; 8697 FastMathFlags FMF; 8698 SmallVector<Type *, 4> Tys; 8699 for (auto &ArgOp : II->args()) 8700 Tys.push_back(ArgOp->getType()); 8701 if (auto *FPMO = dyn_cast<FPMathOperator>(II)) 8702 FMF = FPMO->getFastMathFlags(); 8703 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys, 8704 FMF); 8705 InstructionCost IntrCost = 8706 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput); 8707 InstructionCost CallCost = TTI->getCallInstrCost( 8708 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput); 8709 if (IntrCost < CallCost) 8710 return true; 8711 } 8712 return false; 8713 }; 8714 8715 // Debug information does not impact spill cost. 8716 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && 8717 &*PrevInstIt != PrevInst) 8718 NumCalls++; 8719 8720 ++PrevInstIt; 8721 } 8722 8723 if (NumCalls) { 8724 SmallVector<Type *, 4> V; 8725 for (auto *II : LiveValues) { 8726 auto *ScalarTy = II->getType(); 8727 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 8728 ScalarTy = VectorTy->getElementType(); 8729 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 8730 } 8731 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 8732 } 8733 8734 PrevInst = Inst; 8735 } 8736 8737 return Cost; 8738 } 8739 8740 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 8741 /// buildvector sequence. 8742 static bool isFirstInsertElement(const InsertElementInst *IE1, 8743 const InsertElementInst *IE2) { 8744 if (IE1 == IE2) 8745 return false; 8746 const auto *I1 = IE1; 8747 const auto *I2 = IE2; 8748 const InsertElementInst *PrevI1; 8749 const InsertElementInst *PrevI2; 8750 unsigned Idx1 = *getInsertIndex(IE1); 8751 unsigned Idx2 = *getInsertIndex(IE2); 8752 do { 8753 if (I2 == IE1) 8754 return true; 8755 if (I1 == IE2) 8756 return false; 8757 PrevI1 = I1; 8758 PrevI2 = I2; 8759 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 8760 getInsertIndex(I1).value_or(Idx2) != Idx2) 8761 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 8762 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 8763 getInsertIndex(I2).value_or(Idx1) != Idx1) 8764 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 8765 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 8766 llvm_unreachable("Two different buildvectors not expected."); 8767 } 8768 8769 namespace { 8770 /// Returns incoming Value *, if the requested type is Value * too, or a default 8771 /// value, otherwise. 8772 struct ValueSelect { 8773 template <typename U> 8774 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) { 8775 return V; 8776 } 8777 template <typename U> 8778 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) { 8779 return U(); 8780 } 8781 }; 8782 } // namespace 8783 8784 /// Does the analysis of the provided shuffle masks and performs the requested 8785 /// actions on the vectors with the given shuffle masks. It tries to do it in 8786 /// several steps. 8787 /// 1. If the Base vector is not undef vector, resizing the very first mask to 8788 /// have common VF and perform action for 2 input vectors (including non-undef 8789 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 8790 /// and processed as a shuffle of 2 elements. 8791 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 8792 /// action only for 1 vector with the given mask, if it is not the identity 8793 /// mask. 8794 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 8795 /// vectors, combing the masks properly between the steps. 8796 template <typename T> 8797 static T *performExtractsShuffleAction( 8798 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 8799 function_ref<unsigned(T *)> GetVF, 8800 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction, 8801 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 8802 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 8803 SmallVector<int> Mask(ShuffleMask.begin()->second); 8804 auto VMIt = std::next(ShuffleMask.begin()); 8805 T *Prev = nullptr; 8806 SmallBitVector UseMask = 8807 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask); 8808 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask); 8809 if (!IsBaseUndef.all()) { 8810 // Base is not undef, need to combine it with the next subvectors. 8811 std::pair<T *, bool> Res = 8812 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false); 8813 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask); 8814 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 8815 if (Mask[Idx] == PoisonMaskElem) 8816 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx; 8817 else 8818 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 8819 } 8820 auto *V = ValueSelect::get<T *>(Base); 8821 (void)V; 8822 assert((!V || GetVF(V) == Mask.size()) && 8823 "Expected base vector of VF number of elements."); 8824 Prev = Action(Mask, {nullptr, Res.first}); 8825 } else if (ShuffleMask.size() == 1) { 8826 // Base is undef and only 1 vector is shuffled - perform the action only for 8827 // single vector, if the mask is not the identity mask. 8828 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask, 8829 /*ForSingleMask=*/true); 8830 if (Res.second) 8831 // Identity mask is found. 8832 Prev = Res.first; 8833 else 8834 Prev = Action(Mask, {ShuffleMask.begin()->first}); 8835 } else { 8836 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 8837 // shuffles step by step, combining shuffle between the steps. 8838 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 8839 unsigned Vec2VF = GetVF(VMIt->first); 8840 if (Vec1VF == Vec2VF) { 8841 // No need to resize the input vectors since they are of the same size, we 8842 // can shuffle them directly. 8843 ArrayRef<int> SecMask = VMIt->second; 8844 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8845 if (SecMask[I] != PoisonMaskElem) { 8846 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8847 Mask[I] = SecMask[I] + Vec1VF; 8848 } 8849 } 8850 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 8851 } else { 8852 // Vectors of different sizes - resize and reshuffle. 8853 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask, 8854 /*ForSingleMask=*/false); 8855 std::pair<T *, bool> Res2 = 8856 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8857 ArrayRef<int> SecMask = VMIt->second; 8858 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8859 if (Mask[I] != PoisonMaskElem) { 8860 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8861 if (Res1.second) 8862 Mask[I] = I; 8863 } else if (SecMask[I] != PoisonMaskElem) { 8864 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8865 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 8866 } 8867 } 8868 Prev = Action(Mask, {Res1.first, Res2.first}); 8869 } 8870 VMIt = std::next(VMIt); 8871 } 8872 bool IsBaseNotUndef = !IsBaseUndef.all(); 8873 (void)IsBaseNotUndef; 8874 // Perform requested actions for the remaining masks/vectors. 8875 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 8876 // Shuffle other input vectors, if any. 8877 std::pair<T *, bool> Res = 8878 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8879 ArrayRef<int> SecMask = VMIt->second; 8880 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8881 if (SecMask[I] != PoisonMaskElem) { 8882 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) && 8883 "Multiple uses of scalars."); 8884 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 8885 } else if (Mask[I] != PoisonMaskElem) { 8886 Mask[I] = I; 8887 } 8888 } 8889 Prev = Action(Mask, {Prev, Res.first}); 8890 } 8891 return Prev; 8892 } 8893 8894 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 8895 InstructionCost Cost = 0; 8896 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 8897 << VectorizableTree.size() << ".\n"); 8898 8899 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 8900 8901 SmallPtrSet<Value *, 4> CheckedExtracts; 8902 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 8903 TreeEntry &TE = *VectorizableTree[I]; 8904 if (TE.State == TreeEntry::NeedToGather) { 8905 if (const TreeEntry *E = getTreeEntry(TE.getMainOp()); 8906 E && E->getVectorFactor() == TE.getVectorFactor() && 8907 E->isSame(TE.Scalars)) { 8908 // Some gather nodes might be absolutely the same as some vectorizable 8909 // nodes after reordering, need to handle it. 8910 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle " 8911 << shortBundleName(TE.Scalars) << ".\n" 8912 << "SLP: Current total cost = " << Cost << "\n"); 8913 continue; 8914 } 8915 } 8916 8917 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts); 8918 Cost += C; 8919 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle " 8920 << shortBundleName(TE.Scalars) << ".\n" 8921 << "SLP: Current total cost = " << Cost << "\n"); 8922 } 8923 8924 SmallPtrSet<Value *, 16> ExtractCostCalculated; 8925 InstructionCost ExtractCost = 0; 8926 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 8927 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 8928 SmallVector<APInt> DemandedElts; 8929 SmallDenseSet<Value *, 4> UsedInserts; 8930 DenseSet<Value *> VectorCasts; 8931 for (ExternalUser &EU : ExternalUses) { 8932 // We only add extract cost once for the same scalar. 8933 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 8934 !ExtractCostCalculated.insert(EU.Scalar).second) 8935 continue; 8936 8937 // Uses by ephemeral values are free (because the ephemeral value will be 8938 // removed prior to code generation, and so the extraction will be 8939 // removed as well). 8940 if (EphValues.count(EU.User)) 8941 continue; 8942 8943 // No extract cost for vector "scalar" 8944 if (isa<FixedVectorType>(EU.Scalar->getType())) 8945 continue; 8946 8947 // If found user is an insertelement, do not calculate extract cost but try 8948 // to detect it as a final shuffled/identity match. 8949 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 8950 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 8951 if (!UsedInserts.insert(VU).second) 8952 continue; 8953 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 8954 if (InsertIdx) { 8955 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 8956 auto *It = find_if( 8957 FirstUsers, 8958 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) { 8959 return areTwoInsertFromSameBuildVector( 8960 VU, cast<InsertElementInst>(Pair.first), 8961 [this](InsertElementInst *II) -> Value * { 8962 Value *Op0 = II->getOperand(0); 8963 if (getTreeEntry(II) && !getTreeEntry(Op0)) 8964 return nullptr; 8965 return Op0; 8966 }); 8967 }); 8968 int VecId = -1; 8969 if (It == FirstUsers.end()) { 8970 (void)ShuffleMasks.emplace_back(); 8971 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 8972 if (Mask.empty()) 8973 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 8974 // Find the insertvector, vectorized in tree, if any. 8975 Value *Base = VU; 8976 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 8977 if (IEBase != EU.User && 8978 (!IEBase->hasOneUse() || 8979 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx)) 8980 break; 8981 // Build the mask for the vectorized insertelement instructions. 8982 if (const TreeEntry *E = getTreeEntry(IEBase)) { 8983 VU = IEBase; 8984 do { 8985 IEBase = cast<InsertElementInst>(Base); 8986 int Idx = *getInsertIndex(IEBase); 8987 assert(Mask[Idx] == PoisonMaskElem && 8988 "InsertElementInstruction used already."); 8989 Mask[Idx] = Idx; 8990 Base = IEBase->getOperand(0); 8991 } while (E == getTreeEntry(Base)); 8992 break; 8993 } 8994 Base = cast<InsertElementInst>(Base)->getOperand(0); 8995 } 8996 FirstUsers.emplace_back(VU, ScalarTE); 8997 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 8998 VecId = FirstUsers.size() - 1; 8999 auto It = MinBWs.find(ScalarTE); 9000 if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) { 9001 unsigned BWSz = It->second.second; 9002 unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType()); 9003 unsigned VecOpcode; 9004 if (BWSz < SrcBWSz) 9005 VecOpcode = Instruction::Trunc; 9006 else 9007 VecOpcode = 9008 It->second.second ? Instruction::SExt : Instruction::ZExt; 9009 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9010 InstructionCost C = TTI->getCastInstrCost( 9011 VecOpcode, FTy, 9012 FixedVectorType::get( 9013 IntegerType::get(FTy->getContext(), It->second.first), 9014 FTy->getNumElements()), 9015 TTI::CastContextHint::None, CostKind); 9016 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9017 << " for extending externally used vector with " 9018 "non-equal minimum bitwidth.\n"); 9019 Cost += C; 9020 } 9021 } else { 9022 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 9023 It->first = VU; 9024 VecId = std::distance(FirstUsers.begin(), It); 9025 } 9026 int InIdx = *InsertIdx; 9027 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 9028 if (Mask.empty()) 9029 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9030 Mask[InIdx] = EU.Lane; 9031 DemandedElts[VecId].setBit(InIdx); 9032 continue; 9033 } 9034 } 9035 } 9036 9037 // If we plan to rewrite the tree in a smaller type, we will need to sign 9038 // extend the extracted value back to the original type. Here, we account 9039 // for the extract and the added cost of the sign extend if needed. 9040 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 9041 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9042 auto It = MinBWs.find(getTreeEntry(EU.Scalar)); 9043 if (It != MinBWs.end()) { 9044 auto *MinTy = IntegerType::get(F->getContext(), It->second.first); 9045 unsigned Extend = 9046 It->second.second ? Instruction::SExt : Instruction::ZExt; 9047 VecTy = FixedVectorType::get(MinTy, BundleWidth); 9048 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 9049 VecTy, EU.Lane); 9050 } else { 9051 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 9052 CostKind, EU.Lane); 9053 } 9054 } 9055 // Add reduced value cost, if resized. 9056 if (!VectorizedVals.empty()) { 9057 auto BWIt = MinBWs.find(VectorizableTree.front().get()); 9058 if (BWIt != MinBWs.end()) { 9059 Type *DstTy = VectorizableTree.front()->Scalars.front()->getType(); 9060 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy); 9061 unsigned Opcode = Instruction::Trunc; 9062 if (OriginalSz < BWIt->second.first) 9063 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt; 9064 Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first); 9065 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy, 9066 TTI::CastContextHint::None, 9067 TTI::TCK_RecipThroughput); 9068 } 9069 } 9070 9071 InstructionCost SpillCost = getSpillCost(); 9072 Cost += SpillCost + ExtractCost; 9073 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask, 9074 bool) { 9075 InstructionCost C = 0; 9076 unsigned VF = Mask.size(); 9077 unsigned VecVF = TE->getVectorFactor(); 9078 if (VF != VecVF && 9079 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 9080 !ShuffleVectorInst::isIdentityMask(Mask, VF))) { 9081 SmallVector<int> OrigMask(VecVF, PoisonMaskElem); 9082 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 9083 OrigMask.begin()); 9084 C = TTI->getShuffleCost( 9085 TTI::SK_PermuteSingleSrc, 9086 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 9087 LLVM_DEBUG( 9088 dbgs() << "SLP: Adding cost " << C 9089 << " for final shuffle of insertelement external users.\n"; 9090 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9091 Cost += C; 9092 return std::make_pair(TE, true); 9093 } 9094 return std::make_pair(TE, false); 9095 }; 9096 // Calculate the cost of the reshuffled vectors, if any. 9097 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 9098 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 9099 auto Vector = ShuffleMasks[I].takeVector(); 9100 unsigned VF = 0; 9101 auto EstimateShufflesCost = [&](ArrayRef<int> Mask, 9102 ArrayRef<const TreeEntry *> TEs) { 9103 assert((TEs.size() == 1 || TEs.size() == 2) && 9104 "Expected exactly 1 or 2 tree entries."); 9105 if (TEs.size() == 1) { 9106 if (VF == 0) 9107 VF = TEs.front()->getVectorFactor(); 9108 auto *FTy = 9109 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9110 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) && 9111 !all_of(enumerate(Mask), [=](const auto &Data) { 9112 return Data.value() == PoisonMaskElem || 9113 (Data.index() < VF && 9114 static_cast<int>(Data.index()) == Data.value()); 9115 })) { 9116 InstructionCost C = 9117 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 9118 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9119 << " for final shuffle of insertelement " 9120 "external users.\n"; 9121 TEs.front()->dump(); 9122 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9123 Cost += C; 9124 } 9125 } else { 9126 if (VF == 0) { 9127 if (TEs.front() && 9128 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor()) 9129 VF = TEs.front()->getVectorFactor(); 9130 else 9131 VF = Mask.size(); 9132 } 9133 auto *FTy = 9134 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9135 InstructionCost C = 9136 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask); 9137 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9138 << " for final shuffle of vector node and external " 9139 "insertelement users.\n"; 9140 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 9141 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9142 Cost += C; 9143 } 9144 VF = Mask.size(); 9145 return TEs.back(); 9146 }; 9147 (void)performExtractsShuffleAction<const TreeEntry>( 9148 MutableArrayRef(Vector.data(), Vector.size()), Base, 9149 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 9150 EstimateShufflesCost); 9151 InstructionCost InsertCost = TTI->getScalarizationOverhead( 9152 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 9153 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput); 9154 Cost -= InsertCost; 9155 } 9156 9157 #ifndef NDEBUG 9158 SmallString<256> Str; 9159 { 9160 raw_svector_ostream OS(Str); 9161 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 9162 << "SLP: Extract Cost = " << ExtractCost << ".\n" 9163 << "SLP: Total Cost = " << Cost << ".\n"; 9164 } 9165 LLVM_DEBUG(dbgs() << Str); 9166 if (ViewSLPTree) 9167 ViewGraph(this, "SLP" + F->getName(), false, Str); 9168 #endif 9169 9170 return Cost; 9171 } 9172 9173 /// Tries to find extractelement instructions with constant indices from fixed 9174 /// vector type and gather such instructions into a bunch, which highly likely 9175 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9176 /// successful, the matched scalars are replaced by poison values in \p VL for 9177 /// future analysis. 9178 std::optional<TTI::ShuffleKind> 9179 BoUpSLP::tryToGatherSingleRegisterExtractElements( 9180 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const { 9181 // Scan list of gathered scalars for extractelements that can be represented 9182 // as shuffles. 9183 MapVector<Value *, SmallVector<int>> VectorOpToIdx; 9184 SmallVector<int> UndefVectorExtracts; 9185 for (int I = 0, E = VL.size(); I < E; ++I) { 9186 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9187 if (!EI) { 9188 if (isa<UndefValue>(VL[I])) 9189 UndefVectorExtracts.push_back(I); 9190 continue; 9191 } 9192 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType()); 9193 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand())) 9194 continue; 9195 std::optional<unsigned> Idx = getExtractIndex(EI); 9196 // Undefined index. 9197 if (!Idx) { 9198 UndefVectorExtracts.push_back(I); 9199 continue; 9200 } 9201 SmallBitVector ExtractMask(VecTy->getNumElements(), true); 9202 ExtractMask.reset(*Idx); 9203 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) { 9204 UndefVectorExtracts.push_back(I); 9205 continue; 9206 } 9207 VectorOpToIdx[EI->getVectorOperand()].push_back(I); 9208 } 9209 // Sort the vector operands by the maximum number of uses in extractelements. 9210 MapVector<unsigned, SmallVector<Value *>> VFToVector; 9211 for (const auto &Data : VectorOpToIdx) 9212 VFToVector[cast<FixedVectorType>(Data.first->getType())->getNumElements()] 9213 .push_back(Data.first); 9214 for (auto &Data : VFToVector) { 9215 stable_sort(Data.second, [&VectorOpToIdx](Value *V1, Value *V2) { 9216 return VectorOpToIdx.find(V1)->second.size() > 9217 VectorOpToIdx.find(V2)->second.size(); 9218 }); 9219 } 9220 // Find the best pair of the vectors with the same number of elements or a 9221 // single vector. 9222 const int UndefSz = UndefVectorExtracts.size(); 9223 unsigned SingleMax = 0; 9224 Value *SingleVec = nullptr; 9225 unsigned PairMax = 0; 9226 std::pair<Value *, Value *> PairVec(nullptr, nullptr); 9227 for (auto &Data : VFToVector) { 9228 Value *V1 = Data.second.front(); 9229 if (SingleMax < VectorOpToIdx[V1].size() + UndefSz) { 9230 SingleMax = VectorOpToIdx[V1].size() + UndefSz; 9231 SingleVec = V1; 9232 } 9233 Value *V2 = nullptr; 9234 if (Data.second.size() > 1) 9235 V2 = *std::next(Data.second.begin()); 9236 if (V2 && PairMax < VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + 9237 UndefSz) { 9238 PairMax = VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + UndefSz; 9239 PairVec = std::make_pair(V1, V2); 9240 } 9241 } 9242 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0) 9243 return std::nullopt; 9244 // Check if better to perform a shuffle of 2 vectors or just of a single 9245 // vector. 9246 SmallVector<Value *> SavedVL(VL.begin(), VL.end()); 9247 SmallVector<Value *> GatheredExtracts( 9248 VL.size(), PoisonValue::get(VL.front()->getType())); 9249 if (SingleMax >= PairMax && SingleMax) { 9250 for (int Idx : VectorOpToIdx[SingleVec]) 9251 std::swap(GatheredExtracts[Idx], VL[Idx]); 9252 } else { 9253 for (Value *V : {PairVec.first, PairVec.second}) 9254 for (int Idx : VectorOpToIdx[V]) 9255 std::swap(GatheredExtracts[Idx], VL[Idx]); 9256 } 9257 // Add extracts from undefs too. 9258 for (int Idx : UndefVectorExtracts) 9259 std::swap(GatheredExtracts[Idx], VL[Idx]); 9260 // Check that gather of extractelements can be represented as just a 9261 // shuffle of a single/two vectors the scalars are extracted from. 9262 std::optional<TTI::ShuffleKind> Res = 9263 isFixedVectorShuffle(GatheredExtracts, Mask); 9264 if (!Res) { 9265 // TODO: try to check other subsets if possible. 9266 // Restore the original VL if attempt was not successful. 9267 copy(SavedVL, VL.begin()); 9268 return std::nullopt; 9269 } 9270 // Restore unused scalars from mask, if some of the extractelements were not 9271 // selected for shuffle. 9272 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) { 9273 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) && 9274 isa<UndefValue>(GatheredExtracts[I])) { 9275 std::swap(VL[I], GatheredExtracts[I]); 9276 continue; 9277 } 9278 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9279 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) || 9280 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) || 9281 is_contained(UndefVectorExtracts, I)) 9282 continue; 9283 } 9284 return Res; 9285 } 9286 9287 /// Tries to find extractelement instructions with constant indices from fixed 9288 /// vector type and gather such instructions into a bunch, which highly likely 9289 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9290 /// successful, the matched scalars are replaced by poison values in \p VL for 9291 /// future analysis. 9292 SmallVector<std::optional<TTI::ShuffleKind>> 9293 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 9294 SmallVectorImpl<int> &Mask, 9295 unsigned NumParts) const { 9296 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1."); 9297 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts); 9298 Mask.assign(VL.size(), PoisonMaskElem); 9299 unsigned SliceSize = VL.size() / NumParts; 9300 for (unsigned Part = 0; Part < NumParts; ++Part) { 9301 // Scan list of gathered scalars for extractelements that can be represented 9302 // as shuffles. 9303 MutableArrayRef<Value *> SubVL = 9304 MutableArrayRef(VL).slice(Part * SliceSize, SliceSize); 9305 SmallVector<int> SubMask; 9306 std::optional<TTI::ShuffleKind> Res = 9307 tryToGatherSingleRegisterExtractElements(SubVL, SubMask); 9308 ShufflesRes[Part] = Res; 9309 copy(SubMask, std::next(Mask.begin(), Part * SliceSize)); 9310 } 9311 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) { 9312 return Res.has_value(); 9313 })) 9314 ShufflesRes.clear(); 9315 return ShufflesRes; 9316 } 9317 9318 std::optional<TargetTransformInfo::ShuffleKind> 9319 BoUpSLP::isGatherShuffledSingleRegisterEntry( 9320 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 9321 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) { 9322 Entries.clear(); 9323 // TODO: currently checking only for Scalars in the tree entry, need to count 9324 // reused elements too for better cost estimation. 9325 const EdgeInfo &TEUseEI = TE->UserTreeIndices.front(); 9326 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE); 9327 const BasicBlock *TEInsertBlock = nullptr; 9328 // Main node of PHI entries keeps the correct order of operands/incoming 9329 // blocks. 9330 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) { 9331 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx); 9332 TEInsertPt = TEInsertBlock->getTerminator(); 9333 } else { 9334 TEInsertBlock = TEInsertPt->getParent(); 9335 } 9336 auto *NodeUI = DT->getNode(TEInsertBlock); 9337 assert(NodeUI && "Should only process reachable instructions"); 9338 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end()); 9339 auto CheckOrdering = [&](const Instruction *InsertPt) { 9340 // Argument InsertPt is an instruction where vector code for some other 9341 // tree entry (one that shares one or more scalars with TE) is going to be 9342 // generated. This lambda returns true if insertion point of vector code 9343 // for the TE dominates that point (otherwise dependency is the other way 9344 // around). The other node is not limited to be of a gather kind. Gather 9345 // nodes are not scheduled and their vector code is inserted before their 9346 // first user. If user is PHI, that is supposed to be at the end of a 9347 // predecessor block. Otherwise it is the last instruction among scalars of 9348 // the user node. So, instead of checking dependency between instructions 9349 // themselves, we check dependency between their insertion points for vector 9350 // code (since each scalar instruction ends up as a lane of a vector 9351 // instruction). 9352 const BasicBlock *InsertBlock = InsertPt->getParent(); 9353 auto *NodeEUI = DT->getNode(InsertBlock); 9354 if (!NodeEUI) 9355 return false; 9356 assert((NodeUI == NodeEUI) == 9357 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && 9358 "Different nodes should have different DFS numbers"); 9359 // Check the order of the gather nodes users. 9360 if (TEInsertPt->getParent() != InsertBlock && 9361 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI))) 9362 return false; 9363 if (TEInsertPt->getParent() == InsertBlock && 9364 TEInsertPt->comesBefore(InsertPt)) 9365 return false; 9366 return true; 9367 }; 9368 // Find all tree entries used by the gathered values. If no common entries 9369 // found - not a shuffle. 9370 // Here we build a set of tree nodes for each gathered value and trying to 9371 // find the intersection between these sets. If we have at least one common 9372 // tree node for each gathered value - we have just a permutation of the 9373 // single vector. If we have 2 different sets, we're in situation where we 9374 // have a permutation of 2 input vectors. 9375 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 9376 DenseMap<Value *, int> UsedValuesEntry; 9377 for (Value *V : VL) { 9378 if (isConstant(V)) 9379 continue; 9380 // Build a list of tree entries where V is used. 9381 SmallPtrSet<const TreeEntry *, 4> VToTEs; 9382 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) { 9383 if (TEPtr == TE) 9384 continue; 9385 assert(any_of(TEPtr->Scalars, 9386 [&](Value *V) { return GatheredScalars.contains(V); }) && 9387 "Must contain at least single gathered value."); 9388 assert(TEPtr->UserTreeIndices.size() == 1 && 9389 "Expected only single user of a gather node."); 9390 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front(); 9391 9392 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp()); 9393 const Instruction *InsertPt = 9394 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator() 9395 : &getLastInstructionInBundle(UseEI.UserTE); 9396 if (TEInsertPt == InsertPt) { 9397 // If 2 gathers are operands of the same entry (regardless of whether 9398 // user is PHI or else), compare operands indices, use the earlier one 9399 // as the base. 9400 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx) 9401 continue; 9402 // If the user instruction is used for some reason in different 9403 // vectorized nodes - make it depend on index. 9404 if (TEUseEI.UserTE != UseEI.UserTE && 9405 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx) 9406 continue; 9407 } 9408 9409 // Check if the user node of the TE comes after user node of TEPtr, 9410 // otherwise TEPtr depends on TE. 9411 if ((TEInsertBlock != InsertPt->getParent() || 9412 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) && 9413 !CheckOrdering(InsertPt)) 9414 continue; 9415 VToTEs.insert(TEPtr); 9416 } 9417 if (const TreeEntry *VTE = getTreeEntry(V)) { 9418 Instruction &LastBundleInst = getLastInstructionInBundle(VTE); 9419 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst)) 9420 continue; 9421 auto It = MinBWs.find(VTE); 9422 // If vectorize node is demoted - do not match. 9423 if (It != MinBWs.end() && 9424 It->second.first != DL->getTypeSizeInBits(V->getType())) 9425 continue; 9426 VToTEs.insert(VTE); 9427 } 9428 if (VToTEs.empty()) 9429 continue; 9430 if (UsedTEs.empty()) { 9431 // The first iteration, just insert the list of nodes to vector. 9432 UsedTEs.push_back(VToTEs); 9433 UsedValuesEntry.try_emplace(V, 0); 9434 } else { 9435 // Need to check if there are any previously used tree nodes which use V. 9436 // If there are no such nodes, consider that we have another one input 9437 // vector. 9438 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 9439 unsigned Idx = 0; 9440 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 9441 // Do we have a non-empty intersection of previously listed tree entries 9442 // and tree entries using current V? 9443 set_intersect(VToTEs, Set); 9444 if (!VToTEs.empty()) { 9445 // Yes, write the new subset and continue analysis for the next 9446 // scalar. 9447 Set.swap(VToTEs); 9448 break; 9449 } 9450 VToTEs = SavedVToTEs; 9451 ++Idx; 9452 } 9453 // No non-empty intersection found - need to add a second set of possible 9454 // source vectors. 9455 if (Idx == UsedTEs.size()) { 9456 // If the number of input vectors is greater than 2 - not a permutation, 9457 // fallback to the regular gather. 9458 // TODO: support multiple reshuffled nodes. 9459 if (UsedTEs.size() == 2) 9460 continue; 9461 UsedTEs.push_back(SavedVToTEs); 9462 Idx = UsedTEs.size() - 1; 9463 } 9464 UsedValuesEntry.try_emplace(V, Idx); 9465 } 9466 } 9467 9468 if (UsedTEs.empty()) { 9469 Entries.clear(); 9470 return std::nullopt; 9471 } 9472 9473 unsigned VF = 0; 9474 if (UsedTEs.size() == 1) { 9475 // Keep the order to avoid non-determinism. 9476 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(), 9477 UsedTEs.front().end()); 9478 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9479 return TE1->Idx < TE2->Idx; 9480 }); 9481 // Try to find the perfect match in another gather node at first. 9482 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) { 9483 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars); 9484 }); 9485 if (It != FirstEntries.end() && 9486 ((*It)->getVectorFactor() == VL.size() || 9487 ((*It)->getVectorFactor() == TE->Scalars.size() && 9488 TE->ReuseShuffleIndices.size() == VL.size() && 9489 (*It)->isSame(TE->Scalars)))) { 9490 Entries.push_back(*It); 9491 if ((*It)->getVectorFactor() == VL.size()) { 9492 std::iota(std::next(Mask.begin(), Part * VL.size()), 9493 std::next(Mask.begin(), (Part + 1) * VL.size()), 0); 9494 } else { 9495 SmallVector<int> CommonMask = TE->getCommonMask(); 9496 copy(CommonMask, Mask.begin()); 9497 } 9498 // Clear undef scalars. 9499 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9500 if (isa<PoisonValue>(VL[I])) 9501 Mask[I] = PoisonMaskElem; 9502 return TargetTransformInfo::SK_PermuteSingleSrc; 9503 } 9504 // No perfect match, just shuffle, so choose the first tree node from the 9505 // tree. 9506 Entries.push_back(FirstEntries.front()); 9507 } else { 9508 // Try to find nodes with the same vector factor. 9509 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 9510 // Keep the order of tree nodes to avoid non-determinism. 9511 DenseMap<int, const TreeEntry *> VFToTE; 9512 for (const TreeEntry *TE : UsedTEs.front()) { 9513 unsigned VF = TE->getVectorFactor(); 9514 auto It = VFToTE.find(VF); 9515 if (It != VFToTE.end()) { 9516 if (It->second->Idx > TE->Idx) 9517 It->getSecond() = TE; 9518 continue; 9519 } 9520 VFToTE.try_emplace(VF, TE); 9521 } 9522 // Same, keep the order to avoid non-determinism. 9523 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(), 9524 UsedTEs.back().end()); 9525 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9526 return TE1->Idx < TE2->Idx; 9527 }); 9528 for (const TreeEntry *TE : SecondEntries) { 9529 auto It = VFToTE.find(TE->getVectorFactor()); 9530 if (It != VFToTE.end()) { 9531 VF = It->first; 9532 Entries.push_back(It->second); 9533 Entries.push_back(TE); 9534 break; 9535 } 9536 } 9537 // No 2 source vectors with the same vector factor - just choose 2 with max 9538 // index. 9539 if (Entries.empty()) { 9540 Entries.push_back( 9541 *std::max_element(UsedTEs.front().begin(), UsedTEs.front().end(), 9542 [](const TreeEntry *TE1, const TreeEntry *TE2) { 9543 return TE1->Idx < TE2->Idx; 9544 })); 9545 Entries.push_back(SecondEntries.front()); 9546 VF = std::max(Entries.front()->getVectorFactor(), 9547 Entries.back()->getVectorFactor()); 9548 } 9549 } 9550 9551 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof); 9552 // Checks if the 2 PHIs are compatible in terms of high possibility to be 9553 // vectorized. 9554 auto AreCompatiblePHIs = [&](Value *V, Value *V1) { 9555 auto *PHI = cast<PHINode>(V); 9556 auto *PHI1 = cast<PHINode>(V1); 9557 // Check that all incoming values are compatible/from same parent (if they 9558 // are instructions). 9559 // The incoming values are compatible if they all are constants, or 9560 // instruction with the same/alternate opcodes from the same basic block. 9561 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 9562 Value *In = PHI->getIncomingValue(I); 9563 Value *In1 = PHI1->getIncomingValue(I); 9564 if (isConstant(In) && isConstant(In1)) 9565 continue; 9566 if (!getSameOpcode({In, In1}, *TLI).getOpcode()) 9567 return false; 9568 if (cast<Instruction>(In)->getParent() != 9569 cast<Instruction>(In1)->getParent()) 9570 return false; 9571 } 9572 return true; 9573 }; 9574 // Check if the value can be ignored during analysis for shuffled gathers. 9575 // We suppose it is better to ignore instruction, which do not form splats, 9576 // are not vectorized/not extractelements (these instructions will be handled 9577 // by extractelements processing) or may form vector node in future. 9578 auto MightBeIgnored = [=](Value *V) { 9579 auto *I = dyn_cast<Instruction>(V); 9580 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) && 9581 !isVectorLikeInstWithConstOps(I) && 9582 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I); 9583 }; 9584 // Check that the neighbor instruction may form a full vector node with the 9585 // current instruction V. It is possible, if they have same/alternate opcode 9586 // and same parent basic block. 9587 auto NeighborMightBeIgnored = [&](Value *V, int Idx) { 9588 Value *V1 = VL[Idx]; 9589 bool UsedInSameVTE = false; 9590 auto It = UsedValuesEntry.find(V1); 9591 if (It != UsedValuesEntry.end()) 9592 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second; 9593 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE && 9594 getSameOpcode({V, V1}, *TLI).getOpcode() && 9595 cast<Instruction>(V)->getParent() == 9596 cast<Instruction>(V1)->getParent() && 9597 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1)); 9598 }; 9599 // Build a shuffle mask for better cost estimation and vector emission. 9600 SmallBitVector UsedIdxs(Entries.size()); 9601 SmallVector<std::pair<unsigned, int>> EntryLanes; 9602 for (int I = 0, E = VL.size(); I < E; ++I) { 9603 Value *V = VL[I]; 9604 auto It = UsedValuesEntry.find(V); 9605 if (It == UsedValuesEntry.end()) 9606 continue; 9607 // Do not try to shuffle scalars, if they are constants, or instructions 9608 // that can be vectorized as a result of the following vector build 9609 // vectorization. 9610 if (isConstant(V) || (MightBeIgnored(V) && 9611 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) || 9612 (I != E - 1 && NeighborMightBeIgnored(V, I + 1))))) 9613 continue; 9614 unsigned Idx = It->second; 9615 EntryLanes.emplace_back(Idx, I); 9616 UsedIdxs.set(Idx); 9617 } 9618 // Iterate through all shuffled scalars and select entries, which can be used 9619 // for final shuffle. 9620 SmallVector<const TreeEntry *> TempEntries; 9621 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) { 9622 if (!UsedIdxs.test(I)) 9623 continue; 9624 // Fix the entry number for the given scalar. If it is the first entry, set 9625 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes). 9626 // These indices are used when calculating final shuffle mask as the vector 9627 // offset. 9628 for (std::pair<unsigned, int> &Pair : EntryLanes) 9629 if (Pair.first == I) 9630 Pair.first = TempEntries.size(); 9631 TempEntries.push_back(Entries[I]); 9632 } 9633 Entries.swap(TempEntries); 9634 if (EntryLanes.size() == Entries.size() && 9635 !VL.equals(ArrayRef(TE->Scalars) 9636 .slice(Part * VL.size(), 9637 std::min<int>(VL.size(), TE->Scalars.size())))) { 9638 // We may have here 1 or 2 entries only. If the number of scalars is equal 9639 // to the number of entries, no need to do the analysis, it is not very 9640 // profitable. Since VL is not the same as TE->Scalars, it means we already 9641 // have some shuffles before. Cut off not profitable case. 9642 Entries.clear(); 9643 return std::nullopt; 9644 } 9645 // Build the final mask, check for the identity shuffle, if possible. 9646 bool IsIdentity = Entries.size() == 1; 9647 // Pair.first is the offset to the vector, while Pair.second is the index of 9648 // scalar in the list. 9649 for (const std::pair<unsigned, int> &Pair : EntryLanes) { 9650 unsigned Idx = Part * VL.size() + Pair.second; 9651 Mask[Idx] = Pair.first * VF + 9652 Entries[Pair.first]->findLaneForValue(VL[Pair.second]); 9653 IsIdentity &= Mask[Idx] == Pair.second; 9654 } 9655 switch (Entries.size()) { 9656 case 1: 9657 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2) 9658 return TargetTransformInfo::SK_PermuteSingleSrc; 9659 break; 9660 case 2: 9661 if (EntryLanes.size() > 2 || VL.size() <= 2) 9662 return TargetTransformInfo::SK_PermuteTwoSrc; 9663 break; 9664 default: 9665 break; 9666 } 9667 Entries.clear(); 9668 // Clear the corresponding mask elements. 9669 std::fill(std::next(Mask.begin(), Part * VL.size()), 9670 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem); 9671 return std::nullopt; 9672 } 9673 9674 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 9675 BoUpSLP::isGatherShuffledEntry( 9676 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 9677 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 9678 unsigned NumParts) { 9679 assert(NumParts > 0 && NumParts < VL.size() && 9680 "Expected positive number of registers."); 9681 Entries.clear(); 9682 // No need to check for the topmost gather node. 9683 if (TE == VectorizableTree.front().get()) 9684 return {}; 9685 Mask.assign(VL.size(), PoisonMaskElem); 9686 assert(TE->UserTreeIndices.size() == 1 && 9687 "Expected only single user of the gather node."); 9688 assert(VL.size() % NumParts == 0 && 9689 "Number of scalars must be divisible by NumParts."); 9690 unsigned SliceSize = VL.size() / NumParts; 9691 SmallVector<std::optional<TTI::ShuffleKind>> Res; 9692 for (unsigned Part = 0; Part < NumParts; ++Part) { 9693 ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize); 9694 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back(); 9695 std::optional<TTI::ShuffleKind> SubRes = 9696 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part); 9697 if (!SubRes) 9698 SubEntries.clear(); 9699 Res.push_back(SubRes); 9700 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc && 9701 SubEntries.front()->getVectorFactor() == VL.size() && 9702 (SubEntries.front()->isSame(TE->Scalars) || 9703 SubEntries.front()->isSame(VL))) { 9704 SmallVector<const TreeEntry *> LocalSubEntries; 9705 LocalSubEntries.swap(SubEntries); 9706 Entries.clear(); 9707 Res.clear(); 9708 std::iota(Mask.begin(), Mask.end(), 0); 9709 // Clear undef scalars. 9710 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9711 if (isa<PoisonValue>(VL[I])) 9712 Mask[I] = PoisonMaskElem; 9713 Entries.emplace_back(1, LocalSubEntries.front()); 9714 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc); 9715 return Res; 9716 } 9717 } 9718 if (all_of(Res, 9719 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) { 9720 Entries.clear(); 9721 return {}; 9722 } 9723 return Res; 9724 } 9725 9726 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, 9727 bool ForPoisonSrc) const { 9728 // Find the type of the operands in VL. 9729 Type *ScalarTy = VL[0]->getType(); 9730 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 9731 ScalarTy = SI->getValueOperand()->getType(); 9732 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 9733 bool DuplicateNonConst = false; 9734 // Find the cost of inserting/extracting values from the vector. 9735 // Check if the same elements are inserted several times and count them as 9736 // shuffle candidates. 9737 APInt ShuffledElements = APInt::getZero(VL.size()); 9738 DenseSet<Value *> UniqueElements; 9739 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9740 InstructionCost Cost; 9741 auto EstimateInsertCost = [&](unsigned I, Value *V) { 9742 if (!ForPoisonSrc) 9743 Cost += 9744 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 9745 I, Constant::getNullValue(VecTy), V); 9746 }; 9747 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 9748 Value *V = VL[I]; 9749 // No need to shuffle duplicates for constants. 9750 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) { 9751 ShuffledElements.setBit(I); 9752 continue; 9753 } 9754 if (!UniqueElements.insert(V).second) { 9755 DuplicateNonConst = true; 9756 ShuffledElements.setBit(I); 9757 continue; 9758 } 9759 EstimateInsertCost(I, V); 9760 } 9761 if (ForPoisonSrc) 9762 Cost = 9763 TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true, 9764 /*Extract*/ false, CostKind); 9765 if (DuplicateNonConst) 9766 Cost += 9767 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 9768 return Cost; 9769 } 9770 9771 // Perform operand reordering on the instructions in VL and return the reordered 9772 // operands in Left and Right. 9773 void BoUpSLP::reorderInputsAccordingToOpcode( 9774 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 9775 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 9776 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) { 9777 if (VL.empty()) 9778 return; 9779 VLOperands Ops(VL, TLI, DL, SE, R); 9780 // Reorder the operands in place. 9781 Ops.reorder(); 9782 Left = Ops.getVL(0); 9783 Right = Ops.getVL(1); 9784 } 9785 9786 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { 9787 auto &Res = EntryToLastInstruction.FindAndConstruct(E); 9788 if (Res.second) 9789 return *Res.second; 9790 // Get the basic block this bundle is in. All instructions in the bundle 9791 // should be in this block (except for extractelement-like instructions with 9792 // constant indeces). 9793 auto *Front = E->getMainOp(); 9794 auto *BB = Front->getParent(); 9795 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 9796 if (E->getOpcode() == Instruction::GetElementPtr && 9797 !isa<GetElementPtrInst>(V)) 9798 return true; 9799 auto *I = cast<Instruction>(V); 9800 return !E->isOpcodeOrAlt(I) || I->getParent() == BB || 9801 isVectorLikeInstWithConstOps(I); 9802 })); 9803 9804 auto FindLastInst = [&]() { 9805 Instruction *LastInst = Front; 9806 for (Value *V : E->Scalars) { 9807 auto *I = dyn_cast<Instruction>(V); 9808 if (!I) 9809 continue; 9810 if (LastInst->getParent() == I->getParent()) { 9811 if (LastInst->comesBefore(I)) 9812 LastInst = I; 9813 continue; 9814 } 9815 assert(((E->getOpcode() == Instruction::GetElementPtr && 9816 !isa<GetElementPtrInst>(I)) || 9817 (isVectorLikeInstWithConstOps(LastInst) && 9818 isVectorLikeInstWithConstOps(I))) && 9819 "Expected vector-like or non-GEP in GEP node insts only."); 9820 if (!DT->isReachableFromEntry(LastInst->getParent())) { 9821 LastInst = I; 9822 continue; 9823 } 9824 if (!DT->isReachableFromEntry(I->getParent())) 9825 continue; 9826 auto *NodeA = DT->getNode(LastInst->getParent()); 9827 auto *NodeB = DT->getNode(I->getParent()); 9828 assert(NodeA && "Should only process reachable instructions"); 9829 assert(NodeB && "Should only process reachable instructions"); 9830 assert((NodeA == NodeB) == 9831 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9832 "Different nodes should have different DFS numbers"); 9833 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) 9834 LastInst = I; 9835 } 9836 BB = LastInst->getParent(); 9837 return LastInst; 9838 }; 9839 9840 auto FindFirstInst = [&]() { 9841 Instruction *FirstInst = Front; 9842 for (Value *V : E->Scalars) { 9843 auto *I = dyn_cast<Instruction>(V); 9844 if (!I) 9845 continue; 9846 if (FirstInst->getParent() == I->getParent()) { 9847 if (I->comesBefore(FirstInst)) 9848 FirstInst = I; 9849 continue; 9850 } 9851 assert(((E->getOpcode() == Instruction::GetElementPtr && 9852 !isa<GetElementPtrInst>(I)) || 9853 (isVectorLikeInstWithConstOps(FirstInst) && 9854 isVectorLikeInstWithConstOps(I))) && 9855 "Expected vector-like or non-GEP in GEP node insts only."); 9856 if (!DT->isReachableFromEntry(FirstInst->getParent())) { 9857 FirstInst = I; 9858 continue; 9859 } 9860 if (!DT->isReachableFromEntry(I->getParent())) 9861 continue; 9862 auto *NodeA = DT->getNode(FirstInst->getParent()); 9863 auto *NodeB = DT->getNode(I->getParent()); 9864 assert(NodeA && "Should only process reachable instructions"); 9865 assert(NodeB && "Should only process reachable instructions"); 9866 assert((NodeA == NodeB) == 9867 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9868 "Different nodes should have different DFS numbers"); 9869 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn()) 9870 FirstInst = I; 9871 } 9872 return FirstInst; 9873 }; 9874 9875 // Set the insert point to the beginning of the basic block if the entry 9876 // should not be scheduled. 9877 if (doesNotNeedToSchedule(E->Scalars) || 9878 (E->State != TreeEntry::NeedToGather && 9879 all_of(E->Scalars, isVectorLikeInstWithConstOps))) { 9880 if ((E->getOpcode() == Instruction::GetElementPtr && 9881 any_of(E->Scalars, 9882 [](Value *V) { 9883 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); 9884 })) || 9885 all_of(E->Scalars, [](Value *V) { 9886 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V); 9887 })) 9888 Res.second = FindLastInst(); 9889 else 9890 Res.second = FindFirstInst(); 9891 return *Res.second; 9892 } 9893 9894 // Find the last instruction. The common case should be that BB has been 9895 // scheduled, and the last instruction is VL.back(). So we start with 9896 // VL.back() and iterate over schedule data until we reach the end of the 9897 // bundle. The end of the bundle is marked by null ScheduleData. 9898 if (BlocksSchedules.count(BB)) { 9899 Value *V = E->isOneOf(E->Scalars.back()); 9900 if (doesNotNeedToBeScheduled(V)) 9901 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 9902 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 9903 if (Bundle && Bundle->isPartOfBundle()) 9904 for (; Bundle; Bundle = Bundle->NextInBundle) 9905 if (Bundle->OpValue == Bundle->Inst) 9906 Res.second = Bundle->Inst; 9907 } 9908 9909 // LastInst can still be null at this point if there's either not an entry 9910 // for BB in BlocksSchedules or there's no ScheduleData available for 9911 // VL.back(). This can be the case if buildTree_rec aborts for various 9912 // reasons (e.g., the maximum recursion depth is reached, the maximum region 9913 // size is reached, etc.). ScheduleData is initialized in the scheduling 9914 // "dry-run". 9915 // 9916 // If this happens, we can still find the last instruction by brute force. We 9917 // iterate forwards from Front (inclusive) until we either see all 9918 // instructions in the bundle or reach the end of the block. If Front is the 9919 // last instruction in program order, LastInst will be set to Front, and we 9920 // will visit all the remaining instructions in the block. 9921 // 9922 // One of the reasons we exit early from buildTree_rec is to place an upper 9923 // bound on compile-time. Thus, taking an additional compile-time hit here is 9924 // not ideal. However, this should be exceedingly rare since it requires that 9925 // we both exit early from buildTree_rec and that the bundle be out-of-order 9926 // (causing us to iterate all the way to the end of the block). 9927 if (!Res.second) 9928 Res.second = FindLastInst(); 9929 assert(Res.second && "Failed to find last instruction in bundle"); 9930 return *Res.second; 9931 } 9932 9933 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 9934 auto *Front = E->getMainOp(); 9935 Instruction *LastInst = &getLastInstructionInBundle(E); 9936 assert(LastInst && "Failed to find last instruction in bundle"); 9937 BasicBlock::iterator LastInstIt = LastInst->getIterator(); 9938 // If the instruction is PHI, set the insert point after all the PHIs. 9939 bool IsPHI = isa<PHINode>(LastInst); 9940 if (IsPHI) 9941 LastInstIt = LastInst->getParent()->getFirstNonPHIIt(); 9942 if (IsPHI || (E->State != TreeEntry::NeedToGather && 9943 doesNotNeedToSchedule(E->Scalars))) { 9944 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt); 9945 } else { 9946 // Set the insertion point after the last instruction in the bundle. Set the 9947 // debug location to Front. 9948 Builder.SetInsertPoint( 9949 LastInst->getParent(), 9950 LastInst->getNextNonDebugInstruction()->getIterator()); 9951 } 9952 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 9953 } 9954 9955 Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root) { 9956 // List of instructions/lanes from current block and/or the blocks which are 9957 // part of the current loop. These instructions will be inserted at the end to 9958 // make it possible to optimize loops and hoist invariant instructions out of 9959 // the loops body with better chances for success. 9960 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 9961 SmallSet<int, 4> PostponedIndices; 9962 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 9963 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 9964 SmallPtrSet<BasicBlock *, 4> Visited; 9965 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 9966 InsertBB = InsertBB->getSinglePredecessor(); 9967 return InsertBB && InsertBB == InstBB; 9968 }; 9969 for (int I = 0, E = VL.size(); I < E; ++I) { 9970 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 9971 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 9972 getTreeEntry(Inst) || 9973 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) && 9974 PostponedIndices.insert(I).second) 9975 PostponedInsts.emplace_back(Inst, I); 9976 } 9977 9978 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 9979 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 9980 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 9981 if (!InsElt) 9982 return Vec; 9983 GatherShuffleExtractSeq.insert(InsElt); 9984 CSEBlocks.insert(InsElt->getParent()); 9985 // Add to our 'need-to-extract' list. 9986 if (isa<Instruction>(V)) { 9987 if (TreeEntry *Entry = getTreeEntry(V)) { 9988 // Find which lane we need to extract. 9989 unsigned FoundLane = Entry->findLaneForValue(V); 9990 ExternalUses.emplace_back(V, InsElt, FoundLane); 9991 } 9992 } 9993 return Vec; 9994 }; 9995 Value *Val0 = 9996 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 9997 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 9998 Value *Vec = Root ? Root : PoisonValue::get(VecTy); 9999 SmallVector<int> NonConsts; 10000 // Insert constant values at first. 10001 for (int I = 0, E = VL.size(); I < E; ++I) { 10002 if (PostponedIndices.contains(I)) 10003 continue; 10004 if (!isConstant(VL[I])) { 10005 NonConsts.push_back(I); 10006 continue; 10007 } 10008 if (Root) { 10009 if (!isa<UndefValue>(VL[I])) { 10010 NonConsts.push_back(I); 10011 continue; 10012 } 10013 if (isa<PoisonValue>(VL[I])) 10014 continue; 10015 if (auto *SV = dyn_cast<ShuffleVectorInst>(Root)) { 10016 if (SV->getMaskValue(I) == PoisonMaskElem) 10017 continue; 10018 } 10019 } 10020 Vec = CreateInsertElement(Vec, VL[I], I); 10021 } 10022 // Insert non-constant values. 10023 for (int I : NonConsts) 10024 Vec = CreateInsertElement(Vec, VL[I], I); 10025 // Append instructions, which are/may be part of the loop, in the end to make 10026 // it possible to hoist non-loop-based instructions. 10027 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 10028 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 10029 10030 return Vec; 10031 } 10032 10033 /// Merges shuffle masks and emits final shuffle instruction, if required. It 10034 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 10035 /// when the actual shuffle instruction is generated only if this is actually 10036 /// required. Otherwise, the shuffle instruction emission is delayed till the 10037 /// end of the process, to reduce the number of emitted instructions and further 10038 /// analysis/transformations. 10039 /// The class also will look through the previously emitted shuffle instructions 10040 /// and properly mark indices in mask as undef. 10041 /// For example, given the code 10042 /// \code 10043 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 10044 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 10045 /// \endcode 10046 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 10047 /// look through %s1 and %s2 and emit 10048 /// \code 10049 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10050 /// \endcode 10051 /// instead. 10052 /// If 2 operands are of different size, the smallest one will be resized and 10053 /// the mask recalculated properly. 10054 /// For example, given the code 10055 /// \code 10056 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 10057 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 10058 /// \endcode 10059 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 10060 /// look through %s1 and %s2 and emit 10061 /// \code 10062 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10063 /// \endcode 10064 /// instead. 10065 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis { 10066 bool IsFinalized = false; 10067 /// Combined mask for all applied operands and masks. It is built during 10068 /// analysis and actual emission of shuffle vector instructions. 10069 SmallVector<int> CommonMask; 10070 /// List of operands for the shuffle vector instruction. It hold at max 2 10071 /// operands, if the 3rd is going to be added, the first 2 are combined into 10072 /// shuffle with \p CommonMask mask, the first operand sets to be the 10073 /// resulting shuffle and the second operand sets to be the newly added 10074 /// operand. The \p CommonMask is transformed in the proper way after that. 10075 SmallVector<Value *, 2> InVectors; 10076 IRBuilderBase &Builder; 10077 BoUpSLP &R; 10078 10079 class ShuffleIRBuilder { 10080 IRBuilderBase &Builder; 10081 /// Holds all of the instructions that we gathered. 10082 SetVector<Instruction *> &GatherShuffleExtractSeq; 10083 /// A list of blocks that we are going to CSE. 10084 DenseSet<BasicBlock *> &CSEBlocks; 10085 10086 public: 10087 ShuffleIRBuilder(IRBuilderBase &Builder, 10088 SetVector<Instruction *> &GatherShuffleExtractSeq, 10089 DenseSet<BasicBlock *> &CSEBlocks) 10090 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq), 10091 CSEBlocks(CSEBlocks) {} 10092 ~ShuffleIRBuilder() = default; 10093 /// Creates shufflevector for the 2 operands with the given mask. 10094 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) { 10095 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask); 10096 if (auto *I = dyn_cast<Instruction>(Vec)) { 10097 GatherShuffleExtractSeq.insert(I); 10098 CSEBlocks.insert(I->getParent()); 10099 } 10100 return Vec; 10101 } 10102 /// Creates permutation of the single vector operand with the given mask, if 10103 /// it is not identity mask. 10104 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) { 10105 if (Mask.empty()) 10106 return V1; 10107 unsigned VF = Mask.size(); 10108 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10109 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF)) 10110 return V1; 10111 Value *Vec = Builder.CreateShuffleVector(V1, Mask); 10112 if (auto *I = dyn_cast<Instruction>(Vec)) { 10113 GatherShuffleExtractSeq.insert(I); 10114 CSEBlocks.insert(I->getParent()); 10115 } 10116 return Vec; 10117 } 10118 Value *createIdentity(Value *V) { return V; } 10119 Value *createPoison(Type *Ty, unsigned VF) { 10120 return PoisonValue::get(FixedVectorType::get(Ty, VF)); 10121 } 10122 /// Resizes 2 input vector to match the sizes, if the they are not equal 10123 /// yet. The smallest vector is resized to the size of the larger vector. 10124 void resizeToMatch(Value *&V1, Value *&V2) { 10125 if (V1->getType() == V2->getType()) 10126 return; 10127 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10128 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 10129 int VF = std::max(V1VF, V2VF); 10130 int MinVF = std::min(V1VF, V2VF); 10131 SmallVector<int> IdentityMask(VF, PoisonMaskElem); 10132 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF), 10133 0); 10134 Value *&Op = MinVF == V1VF ? V1 : V2; 10135 Op = Builder.CreateShuffleVector(Op, IdentityMask); 10136 if (auto *I = dyn_cast<Instruction>(Op)) { 10137 GatherShuffleExtractSeq.insert(I); 10138 CSEBlocks.insert(I->getParent()); 10139 } 10140 if (MinVF == V1VF) 10141 V1 = Op; 10142 else 10143 V2 = Op; 10144 } 10145 }; 10146 10147 /// Smart shuffle instruction emission, walks through shuffles trees and 10148 /// tries to find the best matching vector for the actual shuffle 10149 /// instruction. 10150 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) { 10151 assert(V1 && "Expected at least one vector value."); 10152 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq, 10153 R.CSEBlocks); 10154 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask, 10155 ShuffleBuilder); 10156 } 10157 10158 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 10159 /// shuffle emission. 10160 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 10161 ArrayRef<int> Mask) { 10162 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10163 if (Mask[Idx] != PoisonMaskElem) 10164 CommonMask[Idx] = Idx; 10165 } 10166 10167 public: 10168 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R) 10169 : Builder(Builder), R(R) {} 10170 10171 /// Adjusts extractelements after reusing them. 10172 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 10173 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 10174 unsigned NumParts, bool &UseVecBaseAsInput) { 10175 UseVecBaseAsInput = false; 10176 SmallPtrSet<Value *, 4> UniqueBases; 10177 Value *VecBase = nullptr; 10178 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10179 int Idx = Mask[I]; 10180 if (Idx == PoisonMaskElem) 10181 continue; 10182 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10183 VecBase = EI->getVectorOperand(); 10184 if (const TreeEntry *TE = R.getTreeEntry(VecBase)) 10185 VecBase = TE->VectorizedValue; 10186 assert(VecBase && "Expected vectorized value."); 10187 UniqueBases.insert(VecBase); 10188 // If the only one use is vectorized - can delete the extractelement 10189 // itself. 10190 if (!EI->hasOneUse() || any_of(EI->users(), [&](User *U) { 10191 return !R.ScalarToTreeEntry.count(U); 10192 })) 10193 continue; 10194 R.eraseInstruction(EI); 10195 } 10196 if (NumParts == 1 || UniqueBases.size() == 1) 10197 return VecBase; 10198 UseVecBaseAsInput = true; 10199 auto TransformToIdentity = [](MutableArrayRef<int> Mask) { 10200 for (auto [I, Idx] : enumerate(Mask)) 10201 if (Idx != PoisonMaskElem) 10202 Idx = I; 10203 }; 10204 // Perform multi-register vector shuffle, joining them into a single virtual 10205 // long vector. 10206 // Need to shuffle each part independently and then insert all this parts 10207 // into a long virtual vector register, forming the original vector. 10208 Value *Vec = nullptr; 10209 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10210 unsigned SliceSize = E->Scalars.size() / NumParts; 10211 for (unsigned Part = 0; Part < NumParts; ++Part) { 10212 ArrayRef<Value *> VL = 10213 ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize); 10214 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 10215 constexpr int MaxBases = 2; 10216 SmallVector<Value *, MaxBases> Bases(MaxBases); 10217 #ifndef NDEBUG 10218 int PrevSize = 0; 10219 #endif // NDEBUG 10220 for (const auto [I, V]: enumerate(VL)) { 10221 if (SubMask[I] == PoisonMaskElem) 10222 continue; 10223 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand(); 10224 if (const TreeEntry *TE = R.getTreeEntry(VecOp)) 10225 VecOp = TE->VectorizedValue; 10226 assert(VecOp && "Expected vectorized value."); 10227 const int Size = 10228 cast<FixedVectorType>(VecOp->getType())->getNumElements(); 10229 #ifndef NDEBUG 10230 assert((PrevSize == Size || PrevSize == 0) && 10231 "Expected vectors of the same size."); 10232 PrevSize = Size; 10233 #endif // NDEBUG 10234 Bases[SubMask[I] < Size ? 0 : 1] = VecOp; 10235 } 10236 if (!Bases.front()) 10237 continue; 10238 Value *SubVec; 10239 if (Bases.back()) { 10240 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask); 10241 TransformToIdentity(SubMask); 10242 } else { 10243 SubVec = Bases.front(); 10244 } 10245 if (!Vec) { 10246 Vec = SubVec; 10247 assert((Part == 0 || all_of(seq<unsigned>(0, Part), 10248 [&](unsigned P) { 10249 ArrayRef<int> SubMask = 10250 Mask.slice(P * SliceSize, SliceSize); 10251 return all_of(SubMask, [](int Idx) { 10252 return Idx == PoisonMaskElem; 10253 }); 10254 })) && 10255 "Expected first part or all previous parts masked."); 10256 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10257 } else { 10258 unsigned VF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10259 if (Vec->getType() != SubVec->getType()) { 10260 unsigned SubVecVF = 10261 cast<FixedVectorType>(SubVec->getType())->getNumElements(); 10262 VF = std::max(VF, SubVecVF); 10263 } 10264 // Adjust SubMask. 10265 for (auto [I, Idx] : enumerate(SubMask)) 10266 if (Idx != PoisonMaskElem) 10267 Idx += VF; 10268 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10269 Vec = createShuffle(Vec, SubVec, VecMask); 10270 TransformToIdentity(VecMask); 10271 } 10272 } 10273 copy(VecMask, Mask.begin()); 10274 return Vec; 10275 } 10276 /// Checks if the specified entry \p E needs to be delayed because of its 10277 /// dependency nodes. 10278 std::optional<Value *> 10279 needToDelay(const TreeEntry *E, 10280 ArrayRef<SmallVector<const TreeEntry *>> Deps) const { 10281 // No need to delay emission if all deps are ready. 10282 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) { 10283 return all_of( 10284 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; }); 10285 })) 10286 return std::nullopt; 10287 // Postpone gather emission, will be emitted after the end of the 10288 // process to keep correct order. 10289 auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(), 10290 E->getVectorFactor()); 10291 return Builder.CreateAlignedLoad( 10292 VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())), 10293 MaybeAlign()); 10294 } 10295 /// Adds 2 input vectors (in form of tree entries) and the mask for their 10296 /// shuffling. 10297 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 10298 add(E1.VectorizedValue, E2.VectorizedValue, Mask); 10299 } 10300 /// Adds single input vector (in form of tree entry) and the mask for its 10301 /// shuffling. 10302 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 10303 add(E1.VectorizedValue, Mask); 10304 } 10305 /// Adds 2 input vectors and the mask for their shuffling. 10306 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 10307 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors."); 10308 if (InVectors.empty()) { 10309 InVectors.push_back(V1); 10310 InVectors.push_back(V2); 10311 CommonMask.assign(Mask.begin(), Mask.end()); 10312 return; 10313 } 10314 Value *Vec = InVectors.front(); 10315 if (InVectors.size() == 2) { 10316 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10317 transformMaskAfterShuffle(CommonMask, CommonMask); 10318 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() != 10319 Mask.size()) { 10320 Vec = createShuffle(Vec, nullptr, CommonMask); 10321 transformMaskAfterShuffle(CommonMask, CommonMask); 10322 } 10323 V1 = createShuffle(V1, V2, Mask); 10324 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10325 if (Mask[Idx] != PoisonMaskElem) 10326 CommonMask[Idx] = Idx + Sz; 10327 InVectors.front() = Vec; 10328 if (InVectors.size() == 2) 10329 InVectors.back() = V1; 10330 else 10331 InVectors.push_back(V1); 10332 } 10333 /// Adds another one input vector and the mask for the shuffling. 10334 void add(Value *V1, ArrayRef<int> Mask, bool = false) { 10335 if (InVectors.empty()) { 10336 if (!isa<FixedVectorType>(V1->getType())) { 10337 V1 = createShuffle(V1, nullptr, CommonMask); 10338 CommonMask.assign(Mask.size(), PoisonMaskElem); 10339 transformMaskAfterShuffle(CommonMask, Mask); 10340 } 10341 InVectors.push_back(V1); 10342 CommonMask.assign(Mask.begin(), Mask.end()); 10343 return; 10344 } 10345 const auto *It = find(InVectors, V1); 10346 if (It == InVectors.end()) { 10347 if (InVectors.size() == 2 || 10348 InVectors.front()->getType() != V1->getType() || 10349 !isa<FixedVectorType>(V1->getType())) { 10350 Value *V = InVectors.front(); 10351 if (InVectors.size() == 2) { 10352 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10353 transformMaskAfterShuffle(CommonMask, CommonMask); 10354 } else if (cast<FixedVectorType>(V->getType())->getNumElements() != 10355 CommonMask.size()) { 10356 V = createShuffle(InVectors.front(), nullptr, CommonMask); 10357 transformMaskAfterShuffle(CommonMask, CommonMask); 10358 } 10359 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10360 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem) 10361 CommonMask[Idx] = 10362 V->getType() != V1->getType() 10363 ? Idx + Sz 10364 : Mask[Idx] + cast<FixedVectorType>(V1->getType()) 10365 ->getNumElements(); 10366 if (V->getType() != V1->getType()) 10367 V1 = createShuffle(V1, nullptr, Mask); 10368 InVectors.front() = V; 10369 if (InVectors.size() == 2) 10370 InVectors.back() = V1; 10371 else 10372 InVectors.push_back(V1); 10373 return; 10374 } 10375 // Check if second vector is required if the used elements are already 10376 // used from the first one. 10377 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10378 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) { 10379 InVectors.push_back(V1); 10380 break; 10381 } 10382 } 10383 int VF = CommonMask.size(); 10384 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 10385 VF = FTy->getNumElements(); 10386 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10387 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 10388 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF); 10389 } 10390 /// Adds another one input vector and the mask for the shuffling. 10391 void addOrdered(Value *V1, ArrayRef<unsigned> Order) { 10392 SmallVector<int> NewMask; 10393 inversePermutation(Order, NewMask); 10394 add(V1, NewMask); 10395 } 10396 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 10397 Value *Root = nullptr) { 10398 return R.gather(VL, Root); 10399 } 10400 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); } 10401 /// Finalize emission of the shuffles. 10402 /// \param Action the action (if any) to be performed before final applying of 10403 /// the \p ExtMask mask. 10404 Value * 10405 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 10406 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 10407 IsFinalized = true; 10408 if (Action) { 10409 Value *Vec = InVectors.front(); 10410 if (InVectors.size() == 2) { 10411 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10412 InVectors.pop_back(); 10413 } else { 10414 Vec = createShuffle(Vec, nullptr, CommonMask); 10415 } 10416 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10417 if (CommonMask[Idx] != PoisonMaskElem) 10418 CommonMask[Idx] = Idx; 10419 assert(VF > 0 && 10420 "Expected vector length for the final value before action."); 10421 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10422 if (VecVF < VF) { 10423 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 10424 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0); 10425 Vec = createShuffle(Vec, nullptr, ResizeMask); 10426 } 10427 Action(Vec, CommonMask); 10428 InVectors.front() = Vec; 10429 } 10430 if (!ExtMask.empty()) { 10431 if (CommonMask.empty()) { 10432 CommonMask.assign(ExtMask.begin(), ExtMask.end()); 10433 } else { 10434 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 10435 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 10436 if (ExtMask[I] == PoisonMaskElem) 10437 continue; 10438 NewMask[I] = CommonMask[ExtMask[I]]; 10439 } 10440 CommonMask.swap(NewMask); 10441 } 10442 } 10443 if (CommonMask.empty()) { 10444 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 10445 return InVectors.front(); 10446 } 10447 if (InVectors.size() == 2) 10448 return createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10449 return createShuffle(InVectors.front(), nullptr, CommonMask); 10450 } 10451 10452 ~ShuffleInstructionBuilder() { 10453 assert((IsFinalized || CommonMask.empty()) && 10454 "Shuffle construction must be finalized."); 10455 } 10456 }; 10457 10458 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, 10459 bool PostponedPHIs) { 10460 ValueList &VL = E->getOperand(NodeIdx); 10461 if (E->State == TreeEntry::PossibleStridedVectorize && 10462 !E->ReorderIndices.empty()) { 10463 SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end()); 10464 reorderScalars(VL, Mask); 10465 } 10466 const unsigned VF = VL.size(); 10467 InstructionsState S = getSameOpcode(VL, *TLI); 10468 // Special processing for GEPs bundle, which may include non-gep values. 10469 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) { 10470 const auto *It = 10471 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 10472 if (It != VL.end()) 10473 S = getSameOpcode(*It, *TLI); 10474 } 10475 if (S.getOpcode()) { 10476 auto CheckSameVE = [&](const TreeEntry *VE) { 10477 return VE->isSame(VL) && 10478 (any_of(VE->UserTreeIndices, 10479 [E, NodeIdx](const EdgeInfo &EI) { 10480 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10481 }) || 10482 any_of(VectorizableTree, 10483 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) { 10484 return TE->isOperandGatherNode({E, NodeIdx}) && 10485 VE->isSame(TE->Scalars); 10486 })); 10487 }; 10488 TreeEntry *VE = getTreeEntry(S.OpValue); 10489 bool IsSameVE = VE && CheckSameVE(VE); 10490 if (!IsSameVE) { 10491 auto It = MultiNodeScalars.find(S.OpValue); 10492 if (It != MultiNodeScalars.end()) { 10493 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) { 10494 return TE != VE && CheckSameVE(TE); 10495 }); 10496 if (I != It->getSecond().end()) { 10497 VE = *I; 10498 IsSameVE = true; 10499 } 10500 } 10501 } 10502 if (IsSameVE) { 10503 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) { 10504 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 10505 ShuffleBuilder.add(V, Mask); 10506 return ShuffleBuilder.finalize(std::nullopt); 10507 }; 10508 Value *V = vectorizeTree(VE, PostponedPHIs); 10509 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 10510 if (!VE->ReuseShuffleIndices.empty()) { 10511 // Reshuffle to get only unique values. 10512 // If some of the scalars are duplicated in the vectorization 10513 // tree entry, we do not vectorize them but instead generate a 10514 // mask for the reuses. But if there are several users of the 10515 // same entry, they may have different vectorization factors. 10516 // This is especially important for PHI nodes. In this case, we 10517 // need to adapt the resulting instruction for the user 10518 // vectorization factor and have to reshuffle it again to take 10519 // only unique elements of the vector. Without this code the 10520 // function incorrectly returns reduced vector instruction with 10521 // the same elements, not with the unique ones. 10522 10523 // block: 10524 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 10525 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 10526 // ... (use %2) 10527 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 10528 // br %block 10529 SmallVector<int> UniqueIdxs(VF, PoisonMaskElem); 10530 SmallSet<int, 4> UsedIdxs; 10531 int Pos = 0; 10532 for (int Idx : VE->ReuseShuffleIndices) { 10533 if (Idx != static_cast<int>(VF) && Idx != PoisonMaskElem && 10534 UsedIdxs.insert(Idx).second) 10535 UniqueIdxs[Idx] = Pos; 10536 ++Pos; 10537 } 10538 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 10539 "less than original vector size."); 10540 UniqueIdxs.append(VF - UsedIdxs.size(), PoisonMaskElem); 10541 V = FinalShuffle(V, UniqueIdxs); 10542 } else { 10543 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 10544 "Expected vectorization factor less " 10545 "than original vector size."); 10546 SmallVector<int> UniformMask(VF, 0); 10547 std::iota(UniformMask.begin(), UniformMask.end(), 0); 10548 V = FinalShuffle(V, UniformMask); 10549 } 10550 } 10551 // Need to update the operand gather node, if actually the operand is not a 10552 // vectorized node, but the buildvector/gather node, which matches one of 10553 // the vectorized nodes. 10554 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) { 10555 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10556 }) == VE->UserTreeIndices.end()) { 10557 auto *It = find_if( 10558 VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 10559 return TE->State == TreeEntry::NeedToGather && 10560 TE->UserTreeIndices.front().UserTE == E && 10561 TE->UserTreeIndices.front().EdgeIdx == NodeIdx; 10562 }); 10563 assert(It != VectorizableTree.end() && "Expected gather node operand."); 10564 (*It)->VectorizedValue = V; 10565 } 10566 return V; 10567 } 10568 } 10569 10570 // Find the corresponding gather entry and vectorize it. 10571 // Allows to be more accurate with tree/graph transformations, checks for the 10572 // correctness of the transformations in many cases. 10573 auto *I = find_if(VectorizableTree, 10574 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) { 10575 return TE->isOperandGatherNode({E, NodeIdx}); 10576 }); 10577 assert(I != VectorizableTree.end() && "Gather node is not in the graph."); 10578 assert(I->get()->UserTreeIndices.size() == 1 && 10579 "Expected only single user for the gather node."); 10580 assert(I->get()->isSame(VL) && "Expected same list of scalars."); 10581 return vectorizeTree(I->get(), PostponedPHIs); 10582 } 10583 10584 template <typename BVTy, typename ResTy, typename... Args> 10585 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) { 10586 assert(E->State == TreeEntry::NeedToGather && "Expected gather node."); 10587 unsigned VF = E->getVectorFactor(); 10588 10589 bool NeedFreeze = false; 10590 SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(), 10591 E->ReuseShuffleIndices.end()); 10592 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end()); 10593 // Build a mask out of the reorder indices and reorder scalars per this 10594 // mask. 10595 SmallVector<int> ReorderMask; 10596 inversePermutation(E->ReorderIndices, ReorderMask); 10597 if (!ReorderMask.empty()) 10598 reorderScalars(GatheredScalars, ReorderMask); 10599 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF) { 10600 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) { 10601 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10602 })) 10603 return false; 10604 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE; 10605 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx; 10606 if (UserTE->getNumOperands() != 2) 10607 return false; 10608 auto *It = 10609 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) { 10610 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) { 10611 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx; 10612 }) != TE->UserTreeIndices.end(); 10613 }); 10614 if (It == VectorizableTree.end()) 10615 return false; 10616 int Idx; 10617 if ((Mask.size() < InputVF && 10618 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) && 10619 Idx == 0) || 10620 (Mask.size() == InputVF && 10621 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) { 10622 std::iota(Mask.begin(), Mask.end(), 0); 10623 } else { 10624 unsigned I = 10625 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; }); 10626 std::fill(Mask.begin(), Mask.end(), I); 10627 } 10628 return true; 10629 }; 10630 BVTy ShuffleBuilder(Params...); 10631 ResTy Res = ResTy(); 10632 SmallVector<int> Mask; 10633 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem); 10634 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles; 10635 Value *ExtractVecBase = nullptr; 10636 bool UseVecBaseAsInput = false; 10637 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles; 10638 SmallVector<SmallVector<const TreeEntry *>> Entries; 10639 Type *ScalarTy = GatheredScalars.front()->getType(); 10640 auto *VecTy = FixedVectorType::get(ScalarTy, GatheredScalars.size()); 10641 unsigned NumParts = TTI->getNumberOfParts(VecTy); 10642 if (NumParts == 0 || NumParts >= GatheredScalars.size()) 10643 NumParts = 1; 10644 if (!all_of(GatheredScalars, UndefValue::classof)) { 10645 // Check for gathered extracts. 10646 bool Resized = false; 10647 ExtractShuffles = 10648 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts); 10649 if (!ExtractShuffles.empty()) { 10650 SmallVector<const TreeEntry *> ExtractEntries; 10651 for (auto [Idx, I] : enumerate(ExtractMask)) { 10652 if (I == PoisonMaskElem) 10653 continue; 10654 if (const auto *TE = getTreeEntry( 10655 cast<ExtractElementInst>(E->Scalars[Idx])->getVectorOperand())) 10656 ExtractEntries.push_back(TE); 10657 } 10658 if (std::optional<ResTy> Delayed = 10659 ShuffleBuilder.needToDelay(E, ExtractEntries)) { 10660 // Delay emission of gathers which are not ready yet. 10661 PostponedGathers.insert(E); 10662 // Postpone gather emission, will be emitted after the end of the 10663 // process to keep correct order. 10664 return *Delayed; 10665 } 10666 if (Value *VecBase = ShuffleBuilder.adjustExtracts( 10667 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) { 10668 ExtractVecBase = VecBase; 10669 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType())) 10670 if (VF == VecBaseTy->getNumElements() && 10671 GatheredScalars.size() != VF) { 10672 Resized = true; 10673 GatheredScalars.append(VF - GatheredScalars.size(), 10674 PoisonValue::get(ScalarTy)); 10675 } 10676 } 10677 } 10678 // Gather extracts after we check for full matched gathers only. 10679 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load || 10680 E->isAltShuffle() || 10681 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) || 10682 isSplat(E->Scalars) || 10683 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) { 10684 GatherShuffles = 10685 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts); 10686 } 10687 if (!GatherShuffles.empty()) { 10688 if (std::optional<ResTy> Delayed = 10689 ShuffleBuilder.needToDelay(E, Entries)) { 10690 // Delay emission of gathers which are not ready yet. 10691 PostponedGathers.insert(E); 10692 // Postpone gather emission, will be emitted after the end of the 10693 // process to keep correct order. 10694 return *Delayed; 10695 } 10696 if (GatherShuffles.size() == 1 && 10697 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc && 10698 Entries.front().front()->isSame(E->Scalars)) { 10699 // Perfect match in the graph, will reuse the previously vectorized 10700 // node. Cost is 0. 10701 LLVM_DEBUG( 10702 dbgs() 10703 << "SLP: perfect diamond match for gather bundle " 10704 << shortBundleName(E->Scalars) << ".\n"); 10705 // Restore the mask for previous partially matched values. 10706 Mask.resize(E->Scalars.size()); 10707 const TreeEntry *FrontTE = Entries.front().front(); 10708 if (FrontTE->ReorderIndices.empty() && 10709 ((FrontTE->ReuseShuffleIndices.empty() && 10710 E->Scalars.size() == FrontTE->Scalars.size()) || 10711 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) { 10712 std::iota(Mask.begin(), Mask.end(), 0); 10713 } else { 10714 for (auto [I, V] : enumerate(E->Scalars)) { 10715 if (isa<PoisonValue>(V)) { 10716 Mask[I] = PoisonMaskElem; 10717 continue; 10718 } 10719 Mask[I] = FrontTE->findLaneForValue(V); 10720 } 10721 } 10722 ShuffleBuilder.add(*FrontTE, Mask); 10723 Res = ShuffleBuilder.finalize(E->getCommonMask()); 10724 return Res; 10725 } 10726 if (!Resized) { 10727 if (GatheredScalars.size() != VF && 10728 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) { 10729 return any_of(TEs, [&](const TreeEntry *TE) { 10730 return TE->getVectorFactor() == VF; 10731 }); 10732 })) 10733 GatheredScalars.append(VF - GatheredScalars.size(), 10734 PoisonValue::get(ScalarTy)); 10735 } 10736 // Remove shuffled elements from list of gathers. 10737 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10738 if (Mask[I] != PoisonMaskElem) 10739 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10740 } 10741 } 10742 } 10743 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars, 10744 SmallVectorImpl<int> &ReuseMask, 10745 bool IsRootPoison) { 10746 // For splats with can emit broadcasts instead of gathers, so try to find 10747 // such sequences. 10748 bool IsSplat = IsRootPoison && isSplat(Scalars) && 10749 (Scalars.size() > 2 || Scalars.front() == Scalars.back()); 10750 Scalars.append(VF - Scalars.size(), PoisonValue::get(ScalarTy)); 10751 SmallVector<int> UndefPos; 10752 DenseMap<Value *, unsigned> UniquePositions; 10753 // Gather unique non-const values and all constant values. 10754 // For repeated values, just shuffle them. 10755 int NumNonConsts = 0; 10756 int SinglePos = 0; 10757 for (auto [I, V] : enumerate(Scalars)) { 10758 if (isa<UndefValue>(V)) { 10759 if (!isa<PoisonValue>(V)) { 10760 ReuseMask[I] = I; 10761 UndefPos.push_back(I); 10762 } 10763 continue; 10764 } 10765 if (isConstant(V)) { 10766 ReuseMask[I] = I; 10767 continue; 10768 } 10769 ++NumNonConsts; 10770 SinglePos = I; 10771 Value *OrigV = V; 10772 Scalars[I] = PoisonValue::get(ScalarTy); 10773 if (IsSplat) { 10774 Scalars.front() = OrigV; 10775 ReuseMask[I] = 0; 10776 } else { 10777 const auto Res = UniquePositions.try_emplace(OrigV, I); 10778 Scalars[Res.first->second] = OrigV; 10779 ReuseMask[I] = Res.first->second; 10780 } 10781 } 10782 if (NumNonConsts == 1) { 10783 // Restore single insert element. 10784 if (IsSplat) { 10785 ReuseMask.assign(VF, PoisonMaskElem); 10786 std::swap(Scalars.front(), Scalars[SinglePos]); 10787 if (!UndefPos.empty() && UndefPos.front() == 0) 10788 Scalars.front() = UndefValue::get(ScalarTy); 10789 } 10790 ReuseMask[SinglePos] = SinglePos; 10791 } else if (!UndefPos.empty() && IsSplat) { 10792 // For undef values, try to replace them with the simple broadcast. 10793 // We can do it if the broadcasted value is guaranteed to be 10794 // non-poisonous, or by freezing the incoming scalar value first. 10795 auto *It = find_if(Scalars, [this, E](Value *V) { 10796 return !isa<UndefValue>(V) && 10797 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) || 10798 (E->UserTreeIndices.size() == 1 && 10799 any_of(V->uses(), [E](const Use &U) { 10800 // Check if the value already used in the same operation in 10801 // one of the nodes already. 10802 return E->UserTreeIndices.front().EdgeIdx != 10803 U.getOperandNo() && 10804 is_contained( 10805 E->UserTreeIndices.front().UserTE->Scalars, 10806 U.getUser()); 10807 }))); 10808 }); 10809 if (It != Scalars.end()) { 10810 // Replace undefs by the non-poisoned scalars and emit broadcast. 10811 int Pos = std::distance(Scalars.begin(), It); 10812 for (int I : UndefPos) { 10813 // Set the undef position to the non-poisoned scalar. 10814 ReuseMask[I] = Pos; 10815 // Replace the undef by the poison, in the mask it is replaced by 10816 // non-poisoned scalar already. 10817 if (I != Pos) 10818 Scalars[I] = PoisonValue::get(ScalarTy); 10819 } 10820 } else { 10821 // Replace undefs by the poisons, emit broadcast and then emit 10822 // freeze. 10823 for (int I : UndefPos) { 10824 ReuseMask[I] = PoisonMaskElem; 10825 if (isa<UndefValue>(Scalars[I])) 10826 Scalars[I] = PoisonValue::get(ScalarTy); 10827 } 10828 NeedFreeze = true; 10829 } 10830 } 10831 }; 10832 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) { 10833 bool IsNonPoisoned = true; 10834 bool IsUsedInExpr = true; 10835 Value *Vec1 = nullptr; 10836 if (!ExtractShuffles.empty()) { 10837 // Gather of extractelements can be represented as just a shuffle of 10838 // a single/two vectors the scalars are extracted from. 10839 // Find input vectors. 10840 Value *Vec2 = nullptr; 10841 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10842 if (!Mask.empty() && Mask[I] != PoisonMaskElem) 10843 ExtractMask[I] = PoisonMaskElem; 10844 } 10845 if (UseVecBaseAsInput) { 10846 Vec1 = ExtractVecBase; 10847 } else { 10848 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10849 if (ExtractMask[I] == PoisonMaskElem) 10850 continue; 10851 if (isa<UndefValue>(E->Scalars[I])) 10852 continue; 10853 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10854 Value *VecOp = EI->getVectorOperand(); 10855 if (const auto *TE = getTreeEntry(VecOp)) 10856 if (TE->VectorizedValue) 10857 VecOp = TE->VectorizedValue; 10858 if (!Vec1) { 10859 Vec1 = VecOp; 10860 } else if (Vec1 != EI->getVectorOperand()) { 10861 assert((!Vec2 || Vec2 == EI->getVectorOperand()) && 10862 "Expected only 1 or 2 vectors shuffle."); 10863 Vec2 = VecOp; 10864 } 10865 } 10866 } 10867 if (Vec2) { 10868 IsUsedInExpr = false; 10869 IsNonPoisoned &= 10870 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2); 10871 ShuffleBuilder.add(Vec1, Vec2, ExtractMask); 10872 } else if (Vec1) { 10873 IsUsedInExpr &= FindReusedSplat( 10874 ExtractMask, 10875 cast<FixedVectorType>(Vec1->getType())->getNumElements()); 10876 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true); 10877 IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1); 10878 } else { 10879 IsUsedInExpr = false; 10880 ShuffleBuilder.add(PoisonValue::get(FixedVectorType::get( 10881 ScalarTy, GatheredScalars.size())), 10882 ExtractMask, /*ForExtracts=*/true); 10883 } 10884 } 10885 if (!GatherShuffles.empty()) { 10886 unsigned SliceSize = E->Scalars.size() / NumParts; 10887 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10888 for (const auto [I, TEs] : enumerate(Entries)) { 10889 if (TEs.empty()) { 10890 assert(!GatherShuffles[I] && 10891 "No shuffles with empty entries list expected."); 10892 continue; 10893 } 10894 assert((TEs.size() == 1 || TEs.size() == 2) && 10895 "Expected shuffle of 1 or 2 entries."); 10896 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize); 10897 VecMask.assign(VecMask.size(), PoisonMaskElem); 10898 copy(SubMask, std::next(VecMask.begin(), I * SliceSize)); 10899 if (TEs.size() == 1) { 10900 IsUsedInExpr &= 10901 FindReusedSplat(VecMask, TEs.front()->getVectorFactor()); 10902 ShuffleBuilder.add(*TEs.front(), VecMask); 10903 if (TEs.front()->VectorizedValue) 10904 IsNonPoisoned &= 10905 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue); 10906 } else { 10907 IsUsedInExpr = false; 10908 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask); 10909 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue) 10910 IsNonPoisoned &= 10911 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) && 10912 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue); 10913 } 10914 } 10915 } 10916 // Try to figure out best way to combine values: build a shuffle and insert 10917 // elements or just build several shuffles. 10918 // Insert non-constant scalars. 10919 SmallVector<Value *> NonConstants(GatheredScalars); 10920 int EMSz = ExtractMask.size(); 10921 int MSz = Mask.size(); 10922 // Try to build constant vector and shuffle with it only if currently we 10923 // have a single permutation and more than 1 scalar constants. 10924 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty(); 10925 bool IsIdentityShuffle = 10926 ((UseVecBaseAsInput || 10927 all_of(ExtractShuffles, 10928 [](const std::optional<TTI::ShuffleKind> &SK) { 10929 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10930 TTI::SK_PermuteSingleSrc; 10931 })) && 10932 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) && 10933 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) || 10934 (!GatherShuffles.empty() && 10935 all_of(GatherShuffles, 10936 [](const std::optional<TTI::ShuffleKind> &SK) { 10937 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10938 TTI::SK_PermuteSingleSrc; 10939 }) && 10940 none_of(Mask, [&](int I) { return I >= MSz; }) && 10941 ShuffleVectorInst::isIdentityMask(Mask, MSz)); 10942 bool EnoughConstsForShuffle = 10943 IsSingleShuffle && 10944 (none_of(GatheredScalars, 10945 [](Value *V) { 10946 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10947 }) || 10948 any_of(GatheredScalars, 10949 [](Value *V) { 10950 return isa<Constant>(V) && !isa<UndefValue>(V); 10951 })) && 10952 (!IsIdentityShuffle || 10953 (GatheredScalars.size() == 2 && 10954 any_of(GatheredScalars, 10955 [](Value *V) { return !isa<UndefValue>(V); })) || 10956 count_if(GatheredScalars, [](Value *V) { 10957 return isa<Constant>(V) && !isa<PoisonValue>(V); 10958 }) > 1); 10959 // NonConstants array contains just non-constant values, GatheredScalars 10960 // contains only constant to build final vector and then shuffle. 10961 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) { 10962 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I])) 10963 NonConstants[I] = PoisonValue::get(ScalarTy); 10964 else 10965 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10966 } 10967 // Generate constants for final shuffle and build a mask for them. 10968 if (!all_of(GatheredScalars, PoisonValue::classof)) { 10969 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem); 10970 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true); 10971 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size()); 10972 ShuffleBuilder.add(BV, BVMask); 10973 } 10974 if (all_of(NonConstants, [=](Value *V) { 10975 return isa<PoisonValue>(V) || 10976 (IsSingleShuffle && ((IsIdentityShuffle && 10977 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V)); 10978 })) 10979 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10980 else 10981 Res = ShuffleBuilder.finalize( 10982 E->ReuseShuffleIndices, E->Scalars.size(), 10983 [&](Value *&Vec, SmallVectorImpl<int> &Mask) { 10984 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false); 10985 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec); 10986 }); 10987 } else if (!allConstant(GatheredScalars)) { 10988 // Gather unique scalars and all constants. 10989 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem); 10990 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true); 10991 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size()); 10992 ShuffleBuilder.add(BV, ReuseMask); 10993 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10994 } else { 10995 // Gather all constants. 10996 SmallVector<int> Mask(E->Scalars.size(), PoisonMaskElem); 10997 for (auto [I, V] : enumerate(E->Scalars)) { 10998 if (!isa<PoisonValue>(V)) 10999 Mask[I] = I; 11000 } 11001 Value *BV = ShuffleBuilder.gather(E->Scalars); 11002 ShuffleBuilder.add(BV, Mask); 11003 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11004 } 11005 11006 if (NeedFreeze) 11007 Res = ShuffleBuilder.createFreeze(Res); 11008 return Res; 11009 } 11010 11011 Value *BoUpSLP::createBuildVector(const TreeEntry *E) { 11012 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, Builder, 11013 *this); 11014 } 11015 11016 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { 11017 IRBuilder<>::InsertPointGuard Guard(Builder); 11018 11019 if (E->VectorizedValue && 11020 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI || 11021 E->isAltShuffle())) { 11022 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 11023 return E->VectorizedValue; 11024 } 11025 11026 if (E->State == TreeEntry::NeedToGather) { 11027 // Set insert point for non-reduction initial nodes. 11028 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList) 11029 setInsertPointAfterBundle(E); 11030 Value *Vec = createBuildVector(E); 11031 E->VectorizedValue = Vec; 11032 return Vec; 11033 } 11034 11035 auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy, 11036 bool IsSigned) { 11037 if (V->getType() != VecTy) 11038 V = Builder.CreateIntCast(V, VecTy, IsSigned); 11039 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 11040 if (E->getOpcode() == Instruction::Store) { 11041 ArrayRef<int> Mask = 11042 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()), 11043 E->ReorderIndices.size()); 11044 ShuffleBuilder.add(V, Mask); 11045 } else if (E->State == TreeEntry::PossibleStridedVectorize) { 11046 ShuffleBuilder.addOrdered(V, std::nullopt); 11047 } else { 11048 ShuffleBuilder.addOrdered(V, E->ReorderIndices); 11049 } 11050 return ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11051 }; 11052 11053 assert((E->State == TreeEntry::Vectorize || 11054 E->State == TreeEntry::ScatterVectorize || 11055 E->State == TreeEntry::PossibleStridedVectorize) && 11056 "Unhandled state"); 11057 unsigned ShuffleOrOp = 11058 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 11059 Instruction *VL0 = E->getMainOp(); 11060 Type *ScalarTy = VL0->getType(); 11061 if (auto *Store = dyn_cast<StoreInst>(VL0)) 11062 ScalarTy = Store->getValueOperand()->getType(); 11063 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 11064 ScalarTy = IE->getOperand(1)->getType(); 11065 bool IsSigned = false; 11066 auto It = MinBWs.find(E); 11067 if (It != MinBWs.end()) { 11068 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 11069 IsSigned = It->second.second; 11070 } 11071 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 11072 switch (ShuffleOrOp) { 11073 case Instruction::PHI: { 11074 assert((E->ReorderIndices.empty() || 11075 E != VectorizableTree.front().get() || 11076 !E->UserTreeIndices.empty()) && 11077 "PHI reordering is free."); 11078 if (PostponedPHIs && E->VectorizedValue) 11079 return E->VectorizedValue; 11080 auto *PH = cast<PHINode>(VL0); 11081 Builder.SetInsertPoint(PH->getParent(), 11082 PH->getParent()->getFirstNonPHIIt()); 11083 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11084 if (PostponedPHIs || !E->VectorizedValue) { 11085 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 11086 E->PHI = NewPhi; 11087 Value *V = NewPhi; 11088 11089 // Adjust insertion point once all PHI's have been generated. 11090 Builder.SetInsertPoint(PH->getParent(), 11091 PH->getParent()->getFirstInsertionPt()); 11092 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11093 11094 V = FinalShuffle(V, E, VecTy, IsSigned); 11095 11096 E->VectorizedValue = V; 11097 if (PostponedPHIs) 11098 return V; 11099 } 11100 PHINode *NewPhi = cast<PHINode>(E->PHI); 11101 // If phi node is fully emitted - exit. 11102 if (NewPhi->getNumIncomingValues() != 0) 11103 return NewPhi; 11104 11105 // PHINodes may have multiple entries from the same block. We want to 11106 // visit every block once. 11107 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 11108 11109 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 11110 ValueList Operands; 11111 BasicBlock *IBB = PH->getIncomingBlock(I); 11112 11113 // Stop emission if all incoming values are generated. 11114 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) { 11115 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11116 return NewPhi; 11117 } 11118 11119 if (!VisitedBBs.insert(IBB).second) { 11120 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 11121 continue; 11122 } 11123 11124 Builder.SetInsertPoint(IBB->getTerminator()); 11125 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11126 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true); 11127 if (VecTy != Vec->getType()) { 11128 assert(MinBWs.contains(getOperandEntry(E, I)) && 11129 "Expected item in MinBWs."); 11130 Vec = Builder.CreateIntCast(Vec, VecTy, It->second.second); 11131 } 11132 NewPhi->addIncoming(Vec, IBB); 11133 } 11134 11135 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 11136 "Invalid number of incoming values"); 11137 return NewPhi; 11138 } 11139 11140 case Instruction::ExtractElement: { 11141 Value *V = E->getSingleOperand(0); 11142 setInsertPointAfterBundle(E); 11143 V = FinalShuffle(V, E, VecTy, IsSigned); 11144 E->VectorizedValue = V; 11145 return V; 11146 } 11147 case Instruction::ExtractValue: { 11148 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 11149 Builder.SetInsertPoint(LI); 11150 Value *Ptr = LI->getPointerOperand(); 11151 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 11152 Value *NewV = propagateMetadata(V, E->Scalars); 11153 NewV = FinalShuffle(NewV, E, VecTy, IsSigned); 11154 E->VectorizedValue = NewV; 11155 return NewV; 11156 } 11157 case Instruction::InsertElement: { 11158 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 11159 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 11160 Value *V = vectorizeOperand(E, 1, PostponedPHIs); 11161 ArrayRef<Value *> Op = E->getOperand(1); 11162 Type *ScalarTy = Op.front()->getType(); 11163 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) { 11164 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs."); 11165 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1)); 11166 assert(Res.first > 0 && "Expected item in MinBWs."); 11167 V = Builder.CreateIntCast( 11168 V, 11169 FixedVectorType::get( 11170 ScalarTy, 11171 cast<FixedVectorType>(V->getType())->getNumElements()), 11172 Res.second); 11173 } 11174 11175 // Create InsertVector shuffle if necessary 11176 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 11177 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 11178 })); 11179 const unsigned NumElts = 11180 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 11181 const unsigned NumScalars = E->Scalars.size(); 11182 11183 unsigned Offset = *getInsertIndex(VL0); 11184 assert(Offset < NumElts && "Failed to find vector index offset"); 11185 11186 // Create shuffle to resize vector 11187 SmallVector<int> Mask; 11188 if (!E->ReorderIndices.empty()) { 11189 inversePermutation(E->ReorderIndices, Mask); 11190 Mask.append(NumElts - NumScalars, PoisonMaskElem); 11191 } else { 11192 Mask.assign(NumElts, PoisonMaskElem); 11193 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 11194 } 11195 // Create InsertVector shuffle if necessary 11196 bool IsIdentity = true; 11197 SmallVector<int> PrevMask(NumElts, PoisonMaskElem); 11198 Mask.swap(PrevMask); 11199 for (unsigned I = 0; I < NumScalars; ++I) { 11200 Value *Scalar = E->Scalars[PrevMask[I]]; 11201 unsigned InsertIdx = *getInsertIndex(Scalar); 11202 IsIdentity &= InsertIdx - Offset == I; 11203 Mask[InsertIdx - Offset] = I; 11204 } 11205 if (!IsIdentity || NumElts != NumScalars) { 11206 Value *V2 = nullptr; 11207 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V); 11208 SmallVector<int> InsertMask(Mask); 11209 if (NumElts != NumScalars && Offset == 0) { 11210 // Follow all insert element instructions from the current buildvector 11211 // sequence. 11212 InsertElementInst *Ins = cast<InsertElementInst>(VL0); 11213 do { 11214 std::optional<unsigned> InsertIdx = getInsertIndex(Ins); 11215 if (!InsertIdx) 11216 break; 11217 if (InsertMask[*InsertIdx] == PoisonMaskElem) 11218 InsertMask[*InsertIdx] = *InsertIdx; 11219 if (!Ins->hasOneUse()) 11220 break; 11221 Ins = dyn_cast_or_null<InsertElementInst>( 11222 Ins->getUniqueUndroppableUser()); 11223 } while (Ins); 11224 SmallBitVector UseMask = 11225 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11226 SmallBitVector IsFirstPoison = 11227 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11228 SmallBitVector IsFirstUndef = 11229 isUndefVector(FirstInsert->getOperand(0), UseMask); 11230 if (!IsFirstPoison.all()) { 11231 unsigned Idx = 0; 11232 for (unsigned I = 0; I < NumElts; I++) { 11233 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) && 11234 IsFirstUndef.test(I)) { 11235 if (IsVNonPoisonous) { 11236 InsertMask[I] = I < NumScalars ? I : 0; 11237 continue; 11238 } 11239 if (!V2) 11240 V2 = UndefValue::get(V->getType()); 11241 if (Idx >= NumScalars) 11242 Idx = NumScalars - 1; 11243 InsertMask[I] = NumScalars + Idx; 11244 ++Idx; 11245 } else if (InsertMask[I] != PoisonMaskElem && 11246 Mask[I] == PoisonMaskElem) { 11247 InsertMask[I] = PoisonMaskElem; 11248 } 11249 } 11250 } else { 11251 InsertMask = Mask; 11252 } 11253 } 11254 if (!V2) 11255 V2 = PoisonValue::get(V->getType()); 11256 V = Builder.CreateShuffleVector(V, V2, InsertMask); 11257 if (auto *I = dyn_cast<Instruction>(V)) { 11258 GatherShuffleExtractSeq.insert(I); 11259 CSEBlocks.insert(I->getParent()); 11260 } 11261 } 11262 11263 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 11264 for (unsigned I = 0; I < NumElts; I++) { 11265 if (Mask[I] != PoisonMaskElem) 11266 InsertMask[Offset + I] = I; 11267 } 11268 SmallBitVector UseMask = 11269 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11270 SmallBitVector IsFirstUndef = 11271 isUndefVector(FirstInsert->getOperand(0), UseMask); 11272 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) && 11273 NumElts != NumScalars) { 11274 if (IsFirstUndef.all()) { 11275 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) { 11276 SmallBitVector IsFirstPoison = 11277 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11278 if (!IsFirstPoison.all()) { 11279 for (unsigned I = 0; I < NumElts; I++) { 11280 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I)) 11281 InsertMask[I] = I + NumElts; 11282 } 11283 } 11284 V = Builder.CreateShuffleVector( 11285 V, 11286 IsFirstPoison.all() ? PoisonValue::get(V->getType()) 11287 : FirstInsert->getOperand(0), 11288 InsertMask, cast<Instruction>(E->Scalars.back())->getName()); 11289 if (auto *I = dyn_cast<Instruction>(V)) { 11290 GatherShuffleExtractSeq.insert(I); 11291 CSEBlocks.insert(I->getParent()); 11292 } 11293 } 11294 } else { 11295 SmallBitVector IsFirstPoison = 11296 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11297 for (unsigned I = 0; I < NumElts; I++) { 11298 if (InsertMask[I] == PoisonMaskElem) 11299 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I; 11300 else 11301 InsertMask[I] += NumElts; 11302 } 11303 V = Builder.CreateShuffleVector( 11304 FirstInsert->getOperand(0), V, InsertMask, 11305 cast<Instruction>(E->Scalars.back())->getName()); 11306 if (auto *I = dyn_cast<Instruction>(V)) { 11307 GatherShuffleExtractSeq.insert(I); 11308 CSEBlocks.insert(I->getParent()); 11309 } 11310 } 11311 } 11312 11313 ++NumVectorInstructions; 11314 E->VectorizedValue = V; 11315 return V; 11316 } 11317 case Instruction::ZExt: 11318 case Instruction::SExt: 11319 case Instruction::FPToUI: 11320 case Instruction::FPToSI: 11321 case Instruction::FPExt: 11322 case Instruction::PtrToInt: 11323 case Instruction::IntToPtr: 11324 case Instruction::SIToFP: 11325 case Instruction::UIToFP: 11326 case Instruction::Trunc: 11327 case Instruction::FPTrunc: 11328 case Instruction::BitCast: { 11329 setInsertPointAfterBundle(E); 11330 11331 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs); 11332 if (E->VectorizedValue) { 11333 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11334 return E->VectorizedValue; 11335 } 11336 11337 auto *CI = cast<CastInst>(VL0); 11338 Instruction::CastOps VecOpcode = CI->getOpcode(); 11339 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 11340 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 11341 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 11342 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 11343 // Check if the values are candidates to demote. 11344 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 11345 if (SrcIt != MinBWs.end()) 11346 SrcBWSz = SrcIt->second.first; 11347 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 11348 if (BWSz == SrcBWSz) { 11349 VecOpcode = Instruction::BitCast; 11350 } else if (BWSz < SrcBWSz) { 11351 VecOpcode = Instruction::Trunc; 11352 } else if (It != MinBWs.end()) { 11353 assert(BWSz > SrcBWSz && "Invalid cast!"); 11354 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 11355 } 11356 } 11357 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast) 11358 ? InVec 11359 : Builder.CreateCast(VecOpcode, InVec, VecTy); 11360 V = FinalShuffle(V, E, VecTy, IsSigned); 11361 11362 E->VectorizedValue = V; 11363 ++NumVectorInstructions; 11364 return V; 11365 } 11366 case Instruction::FCmp: 11367 case Instruction::ICmp: { 11368 setInsertPointAfterBundle(E); 11369 11370 Value *L = vectorizeOperand(E, 0, PostponedPHIs); 11371 if (E->VectorizedValue) { 11372 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11373 return E->VectorizedValue; 11374 } 11375 Value *R = vectorizeOperand(E, 1, PostponedPHIs); 11376 if (E->VectorizedValue) { 11377 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11378 return E->VectorizedValue; 11379 } 11380 if (L->getType() != R->getType()) { 11381 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11382 MinBWs.contains(getOperandEntry(E, 1))) && 11383 "Expected item in MinBWs."); 11384 L = Builder.CreateIntCast(L, VecTy, IsSigned); 11385 R = Builder.CreateIntCast(R, VecTy, IsSigned); 11386 } 11387 11388 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 11389 Value *V = Builder.CreateCmp(P0, L, R); 11390 propagateIRFlags(V, E->Scalars, VL0); 11391 // Do not cast for cmps. 11392 VecTy = cast<FixedVectorType>(V->getType()); 11393 V = FinalShuffle(V, E, VecTy, IsSigned); 11394 11395 E->VectorizedValue = V; 11396 ++NumVectorInstructions; 11397 return V; 11398 } 11399 case Instruction::Select: { 11400 setInsertPointAfterBundle(E); 11401 11402 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs); 11403 if (E->VectorizedValue) { 11404 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11405 return E->VectorizedValue; 11406 } 11407 Value *True = vectorizeOperand(E, 1, PostponedPHIs); 11408 if (E->VectorizedValue) { 11409 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11410 return E->VectorizedValue; 11411 } 11412 Value *False = vectorizeOperand(E, 2, PostponedPHIs); 11413 if (E->VectorizedValue) { 11414 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11415 return E->VectorizedValue; 11416 } 11417 if (True->getType() != False->getType()) { 11418 assert((MinBWs.contains(getOperandEntry(E, 1)) || 11419 MinBWs.contains(getOperandEntry(E, 2))) && 11420 "Expected item in MinBWs."); 11421 True = Builder.CreateIntCast(True, VecTy, IsSigned); 11422 False = Builder.CreateIntCast(False, VecTy, IsSigned); 11423 } 11424 11425 Value *V = Builder.CreateSelect(Cond, True, False); 11426 V = FinalShuffle(V, E, VecTy, IsSigned); 11427 11428 E->VectorizedValue = V; 11429 ++NumVectorInstructions; 11430 return V; 11431 } 11432 case Instruction::FNeg: { 11433 setInsertPointAfterBundle(E); 11434 11435 Value *Op = vectorizeOperand(E, 0, PostponedPHIs); 11436 11437 if (E->VectorizedValue) { 11438 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11439 return E->VectorizedValue; 11440 } 11441 11442 Value *V = Builder.CreateUnOp( 11443 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 11444 propagateIRFlags(V, E->Scalars, VL0); 11445 if (auto *I = dyn_cast<Instruction>(V)) 11446 V = propagateMetadata(I, E->Scalars); 11447 11448 V = FinalShuffle(V, E, VecTy, IsSigned); 11449 11450 E->VectorizedValue = V; 11451 ++NumVectorInstructions; 11452 11453 return V; 11454 } 11455 case Instruction::Add: 11456 case Instruction::FAdd: 11457 case Instruction::Sub: 11458 case Instruction::FSub: 11459 case Instruction::Mul: 11460 case Instruction::FMul: 11461 case Instruction::UDiv: 11462 case Instruction::SDiv: 11463 case Instruction::FDiv: 11464 case Instruction::URem: 11465 case Instruction::SRem: 11466 case Instruction::FRem: 11467 case Instruction::Shl: 11468 case Instruction::LShr: 11469 case Instruction::AShr: 11470 case Instruction::And: 11471 case Instruction::Or: 11472 case Instruction::Xor: { 11473 setInsertPointAfterBundle(E); 11474 11475 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs); 11476 if (E->VectorizedValue) { 11477 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11478 return E->VectorizedValue; 11479 } 11480 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs); 11481 if (E->VectorizedValue) { 11482 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11483 return E->VectorizedValue; 11484 } 11485 if (LHS->getType() != RHS->getType()) { 11486 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11487 MinBWs.contains(getOperandEntry(E, 1))) && 11488 "Expected item in MinBWs."); 11489 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11490 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11491 } 11492 11493 Value *V = Builder.CreateBinOp( 11494 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 11495 RHS); 11496 propagateIRFlags(V, E->Scalars, VL0, !MinBWs.contains(E)); 11497 if (auto *I = dyn_cast<Instruction>(V)) 11498 V = propagateMetadata(I, E->Scalars); 11499 11500 V = FinalShuffle(V, E, VecTy, IsSigned); 11501 11502 E->VectorizedValue = V; 11503 ++NumVectorInstructions; 11504 11505 return V; 11506 } 11507 case Instruction::Load: { 11508 // Loads are inserted at the head of the tree because we don't want to 11509 // sink them all the way down past store instructions. 11510 setInsertPointAfterBundle(E); 11511 11512 LoadInst *LI = cast<LoadInst>(VL0); 11513 Instruction *NewLI; 11514 Value *PO = LI->getPointerOperand(); 11515 if (E->State == TreeEntry::Vectorize) { 11516 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign()); 11517 } else { 11518 assert((E->State == TreeEntry::ScatterVectorize || 11519 E->State == TreeEntry::PossibleStridedVectorize) && 11520 "Unhandled state"); 11521 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs); 11522 if (E->VectorizedValue) { 11523 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11524 return E->VectorizedValue; 11525 } 11526 // Use the minimum alignment of the gathered loads. 11527 Align CommonAlignment = LI->getAlign(); 11528 for (Value *V : E->Scalars) 11529 CommonAlignment = 11530 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 11531 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 11532 } 11533 Value *V = propagateMetadata(NewLI, E->Scalars); 11534 11535 V = FinalShuffle(V, E, VecTy, IsSigned); 11536 E->VectorizedValue = V; 11537 ++NumVectorInstructions; 11538 return V; 11539 } 11540 case Instruction::Store: { 11541 auto *SI = cast<StoreInst>(VL0); 11542 11543 setInsertPointAfterBundle(E); 11544 11545 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs); 11546 VecValue = FinalShuffle(VecValue, E, VecTy, IsSigned); 11547 11548 Value *Ptr = SI->getPointerOperand(); 11549 StoreInst *ST = 11550 Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); 11551 11552 Value *V = propagateMetadata(ST, E->Scalars); 11553 11554 E->VectorizedValue = V; 11555 ++NumVectorInstructions; 11556 return V; 11557 } 11558 case Instruction::GetElementPtr: { 11559 auto *GEP0 = cast<GetElementPtrInst>(VL0); 11560 setInsertPointAfterBundle(E); 11561 11562 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs); 11563 if (E->VectorizedValue) { 11564 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11565 return E->VectorizedValue; 11566 } 11567 11568 SmallVector<Value *> OpVecs; 11569 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 11570 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs); 11571 if (E->VectorizedValue) { 11572 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11573 return E->VectorizedValue; 11574 } 11575 OpVecs.push_back(OpVec); 11576 } 11577 11578 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 11579 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) { 11580 SmallVector<Value *> GEPs; 11581 for (Value *V : E->Scalars) { 11582 if (isa<GetElementPtrInst>(V)) 11583 GEPs.push_back(V); 11584 } 11585 V = propagateMetadata(I, GEPs); 11586 } 11587 11588 V = FinalShuffle(V, E, VecTy, IsSigned); 11589 11590 E->VectorizedValue = V; 11591 ++NumVectorInstructions; 11592 11593 return V; 11594 } 11595 case Instruction::Call: { 11596 CallInst *CI = cast<CallInst>(VL0); 11597 setInsertPointAfterBundle(E); 11598 11599 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 11600 11601 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 11602 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 11603 VecCallCosts.first <= VecCallCosts.second; 11604 11605 Value *ScalarArg = nullptr; 11606 SmallVector<Value *> OpVecs; 11607 SmallVector<Type *, 2> TysForDecl; 11608 // Add return type if intrinsic is overloaded on it. 11609 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1)) 11610 TysForDecl.push_back( 11611 FixedVectorType::get(CI->getType(), E->Scalars.size())); 11612 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 11613 ValueList OpVL; 11614 // Some intrinsics have scalar arguments. This argument should not be 11615 // vectorized. 11616 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) { 11617 CallInst *CEI = cast<CallInst>(VL0); 11618 ScalarArg = CEI->getArgOperand(I); 11619 OpVecs.push_back(CEI->getArgOperand(I)); 11620 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11621 TysForDecl.push_back(ScalarArg->getType()); 11622 continue; 11623 } 11624 11625 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs); 11626 if (E->VectorizedValue) { 11627 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11628 return E->VectorizedValue; 11629 } 11630 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n"); 11631 OpVecs.push_back(OpVec); 11632 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11633 TysForDecl.push_back(OpVec->getType()); 11634 } 11635 11636 Function *CF; 11637 if (!UseIntrinsic) { 11638 VFShape Shape = 11639 VFShape::get(CI->getFunctionType(), 11640 ElementCount::getFixed( 11641 static_cast<unsigned>(VecTy->getNumElements())), 11642 false /*HasGlobalPred*/); 11643 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 11644 } else { 11645 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 11646 } 11647 11648 SmallVector<OperandBundleDef, 1> OpBundles; 11649 CI->getOperandBundlesAsDefs(OpBundles); 11650 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 11651 11652 propagateIRFlags(V, E->Scalars, VL0); 11653 V = FinalShuffle(V, E, VecTy, IsSigned); 11654 11655 E->VectorizedValue = V; 11656 ++NumVectorInstructions; 11657 return V; 11658 } 11659 case Instruction::ShuffleVector: { 11660 assert(E->isAltShuffle() && 11661 ((Instruction::isBinaryOp(E->getOpcode()) && 11662 Instruction::isBinaryOp(E->getAltOpcode())) || 11663 (Instruction::isCast(E->getOpcode()) && 11664 Instruction::isCast(E->getAltOpcode())) || 11665 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 11666 "Invalid Shuffle Vector Operand"); 11667 11668 Value *LHS = nullptr, *RHS = nullptr; 11669 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 11670 setInsertPointAfterBundle(E); 11671 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11672 if (E->VectorizedValue) { 11673 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11674 return E->VectorizedValue; 11675 } 11676 RHS = vectorizeOperand(E, 1, PostponedPHIs); 11677 } else { 11678 setInsertPointAfterBundle(E); 11679 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11680 } 11681 if (E->VectorizedValue) { 11682 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11683 return E->VectorizedValue; 11684 } 11685 if (LHS && RHS && LHS->getType() != RHS->getType()) { 11686 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11687 MinBWs.contains(getOperandEntry(E, 1))) && 11688 "Expected item in MinBWs."); 11689 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11690 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11691 } 11692 11693 Value *V0, *V1; 11694 if (Instruction::isBinaryOp(E->getOpcode())) { 11695 V0 = Builder.CreateBinOp( 11696 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 11697 V1 = Builder.CreateBinOp( 11698 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 11699 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 11700 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 11701 auto *AltCI = cast<CmpInst>(E->getAltOp()); 11702 CmpInst::Predicate AltPred = AltCI->getPredicate(); 11703 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 11704 } else { 11705 V0 = Builder.CreateCast( 11706 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 11707 V1 = Builder.CreateCast( 11708 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 11709 } 11710 // Add V0 and V1 to later analysis to try to find and remove matching 11711 // instruction, if any. 11712 for (Value *V : {V0, V1}) { 11713 if (auto *I = dyn_cast<Instruction>(V)) { 11714 GatherShuffleExtractSeq.insert(I); 11715 CSEBlocks.insert(I->getParent()); 11716 } 11717 } 11718 11719 // Create shuffle to take alternate operations from the vector. 11720 // Also, gather up main and alt scalar ops to propagate IR flags to 11721 // each vector operation. 11722 ValueList OpScalars, AltScalars; 11723 SmallVector<int> Mask; 11724 E->buildAltOpShuffleMask( 11725 [E, this](Instruction *I) { 11726 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 11727 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(), 11728 *TLI); 11729 }, 11730 Mask, &OpScalars, &AltScalars); 11731 11732 propagateIRFlags(V0, OpScalars); 11733 propagateIRFlags(V1, AltScalars); 11734 11735 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 11736 if (auto *I = dyn_cast<Instruction>(V)) { 11737 V = propagateMetadata(I, E->Scalars); 11738 GatherShuffleExtractSeq.insert(I); 11739 CSEBlocks.insert(I->getParent()); 11740 } 11741 11742 if (V->getType() != VecTy && !isa<CmpInst>(VL0)) 11743 V = Builder.CreateIntCast( 11744 V, FixedVectorType::get(ScalarTy, E->getVectorFactor()), IsSigned); 11745 E->VectorizedValue = V; 11746 ++NumVectorInstructions; 11747 11748 return V; 11749 } 11750 default: 11751 llvm_unreachable("unknown inst"); 11752 } 11753 return nullptr; 11754 } 11755 11756 Value *BoUpSLP::vectorizeTree() { 11757 ExtraValueToDebugLocsMap ExternallyUsedValues; 11758 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 11759 return vectorizeTree(ExternallyUsedValues, ReplacedExternals); 11760 } 11761 11762 namespace { 11763 /// Data type for handling buildvector sequences with the reused scalars from 11764 /// other tree entries. 11765 struct ShuffledInsertData { 11766 /// List of insertelements to be replaced by shuffles. 11767 SmallVector<InsertElementInst *> InsertElements; 11768 /// The parent vectors and shuffle mask for the given list of inserts. 11769 MapVector<Value *, SmallVector<int>> ValueMasks; 11770 }; 11771 } // namespace 11772 11773 Value *BoUpSLP::vectorizeTree( 11774 const ExtraValueToDebugLocsMap &ExternallyUsedValues, 11775 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 11776 Instruction *ReductionRoot) { 11777 // All blocks must be scheduled before any instructions are inserted. 11778 for (auto &BSIter : BlocksSchedules) { 11779 scheduleBlock(BSIter.second.get()); 11780 } 11781 // Clean Entry-to-LastInstruction table. It can be affected after scheduling, 11782 // need to rebuild it. 11783 EntryToLastInstruction.clear(); 11784 11785 if (ReductionRoot) 11786 Builder.SetInsertPoint(ReductionRoot->getParent(), 11787 ReductionRoot->getIterator()); 11788 else 11789 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11790 11791 // Postpone emission of PHIs operands to avoid cyclic dependencies issues. 11792 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true); 11793 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) 11794 if (TE->State == TreeEntry::Vectorize && 11795 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() && 11796 TE->VectorizedValue) 11797 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false); 11798 // Run through the list of postponed gathers and emit them, replacing the temp 11799 // emitted allocas with actual vector instructions. 11800 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef(); 11801 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues; 11802 for (const TreeEntry *E : PostponedNodes) { 11803 auto *TE = const_cast<TreeEntry *>(E); 11804 if (auto *VecTE = getTreeEntry(TE->Scalars.front())) 11805 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand( 11806 TE->UserTreeIndices.front().EdgeIdx))) 11807 // Found gather node which is absolutely the same as one of the 11808 // vectorized nodes. It may happen after reordering. 11809 continue; 11810 auto *PrevVec = cast<Instruction>(TE->VectorizedValue); 11811 TE->VectorizedValue = nullptr; 11812 auto *UserI = 11813 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue); 11814 // If user is a PHI node, its vector code have to be inserted right before 11815 // block terminator. Since the node was delayed, there were some unresolved 11816 // dependencies at the moment when stab instruction was emitted. In a case 11817 // when any of these dependencies turn out an operand of another PHI, coming 11818 // from this same block, position of a stab instruction will become invalid. 11819 // The is because source vector that supposed to feed this gather node was 11820 // inserted at the end of the block [after stab instruction]. So we need 11821 // to adjust insertion point again to the end of block. 11822 if (isa<PHINode>(UserI)) { 11823 // Insert before all users. 11824 Instruction *InsertPt = PrevVec->getParent()->getTerminator(); 11825 for (User *U : PrevVec->users()) { 11826 if (U == UserI) 11827 continue; 11828 auto *UI = dyn_cast<Instruction>(U); 11829 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent()) 11830 continue; 11831 if (UI->comesBefore(InsertPt)) 11832 InsertPt = UI; 11833 } 11834 Builder.SetInsertPoint(InsertPt); 11835 } else { 11836 Builder.SetInsertPoint(PrevVec); 11837 } 11838 Builder.SetCurrentDebugLocation(UserI->getDebugLoc()); 11839 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false); 11840 PrevVec->replaceAllUsesWith(Vec); 11841 PostponedValues.try_emplace(Vec).first->second.push_back(TE); 11842 // Replace the stub vector node, if it was used before for one of the 11843 // buildvector nodes already. 11844 auto It = PostponedValues.find(PrevVec); 11845 if (It != PostponedValues.end()) { 11846 for (TreeEntry *VTE : It->getSecond()) 11847 VTE->VectorizedValue = Vec; 11848 } 11849 eraseInstruction(PrevVec); 11850 } 11851 11852 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 11853 << " values .\n"); 11854 11855 SmallVector<ShuffledInsertData> ShuffledInserts; 11856 // Maps vector instruction to original insertelement instruction 11857 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 11858 // Maps extract Scalar to the corresponding extractelement instruction in the 11859 // basic block. Only one extractelement per block should be emitted. 11860 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs; 11861 SmallDenseSet<Value *, 4> UsedInserts; 11862 DenseMap<Value *, Value *> VectorCasts; 11863 SmallDenseSet<Value *, 4> ScalarsWithNullptrUser; 11864 // Extract all of the elements with the external uses. 11865 for (const auto &ExternalUse : ExternalUses) { 11866 Value *Scalar = ExternalUse.Scalar; 11867 llvm::User *User = ExternalUse.User; 11868 11869 // Skip users that we already RAUW. This happens when one instruction 11870 // has multiple uses of the same value. 11871 if (User && !is_contained(Scalar->users(), User)) 11872 continue; 11873 TreeEntry *E = getTreeEntry(Scalar); 11874 assert(E && "Invalid scalar"); 11875 assert(E->State != TreeEntry::NeedToGather && 11876 "Extracting from a gather list"); 11877 // Non-instruction pointers are not deleted, just skip them. 11878 if (E->getOpcode() == Instruction::GetElementPtr && 11879 !isa<GetElementPtrInst>(Scalar)) 11880 continue; 11881 11882 Value *Vec = E->VectorizedValue; 11883 assert(Vec && "Can't find vectorizable value"); 11884 11885 Value *Lane = Builder.getInt32(ExternalUse.Lane); 11886 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 11887 if (Scalar->getType() != Vec->getType()) { 11888 Value *Ex = nullptr; 11889 auto It = ScalarToEEs.find(Scalar); 11890 if (It != ScalarToEEs.end()) { 11891 // No need to emit many extracts, just move the only one in the 11892 // current block. 11893 auto EEIt = It->second.find(Builder.GetInsertBlock()); 11894 if (EEIt != It->second.end()) { 11895 Instruction *I = EEIt->second; 11896 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 11897 Builder.GetInsertPoint()->comesBefore(I)) 11898 I->moveBefore(*Builder.GetInsertPoint()->getParent(), 11899 Builder.GetInsertPoint()); 11900 Ex = I; 11901 } 11902 } 11903 if (!Ex) { 11904 // "Reuse" the existing extract to improve final codegen. 11905 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 11906 Ex = Builder.CreateExtractElement(ES->getOperand(0), 11907 ES->getOperand(1)); 11908 } else { 11909 Ex = Builder.CreateExtractElement(Vec, Lane); 11910 } 11911 if (auto *I = dyn_cast<Instruction>(Ex)) 11912 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I); 11913 } 11914 // The then branch of the previous if may produce constants, since 0 11915 // operand might be a constant. 11916 if (auto *ExI = dyn_cast<Instruction>(Ex)) { 11917 GatherShuffleExtractSeq.insert(ExI); 11918 CSEBlocks.insert(ExI->getParent()); 11919 } 11920 // If necessary, sign-extend or zero-extend ScalarRoot 11921 // to the larger type. 11922 if (Scalar->getType() != Ex->getType()) 11923 return Builder.CreateIntCast(Ex, Scalar->getType(), 11924 MinBWs.find(E)->second.second); 11925 return Ex; 11926 } 11927 assert(isa<FixedVectorType>(Scalar->getType()) && 11928 isa<InsertElementInst>(Scalar) && 11929 "In-tree scalar of vector type is not insertelement?"); 11930 auto *IE = cast<InsertElementInst>(Scalar); 11931 VectorToInsertElement.try_emplace(Vec, IE); 11932 return Vec; 11933 }; 11934 // If User == nullptr, the Scalar remains as scalar in vectorized 11935 // instructions or is used as extra arg. Generate ExtractElement instruction 11936 // and update the record for this scalar in ExternallyUsedValues. 11937 if (!User) { 11938 if (!ScalarsWithNullptrUser.insert(Scalar).second) 11939 continue; 11940 assert((ExternallyUsedValues.count(Scalar) || 11941 any_of(Scalar->users(), 11942 [&](llvm::User *U) { 11943 TreeEntry *UseEntry = getTreeEntry(U); 11944 return UseEntry && 11945 UseEntry->State == TreeEntry::Vectorize && 11946 E->State == TreeEntry::Vectorize && 11947 doesInTreeUserNeedToExtract( 11948 Scalar, 11949 cast<Instruction>(UseEntry->Scalars.front()), 11950 TLI); 11951 })) && 11952 "Scalar with nullptr User must be registered in " 11953 "ExternallyUsedValues map or remain as scalar in vectorized " 11954 "instructions"); 11955 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 11956 if (auto *PHI = dyn_cast<PHINode>(VecI)) 11957 Builder.SetInsertPoint(PHI->getParent(), 11958 PHI->getParent()->getFirstNonPHIIt()); 11959 else 11960 Builder.SetInsertPoint(VecI->getParent(), 11961 std::next(VecI->getIterator())); 11962 } else { 11963 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11964 } 11965 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 11966 // Required to update internally referenced instructions. 11967 Scalar->replaceAllUsesWith(NewInst); 11968 ReplacedExternals.emplace_back(Scalar, NewInst); 11969 continue; 11970 } 11971 11972 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 11973 // Skip if the scalar is another vector op or Vec is not an instruction. 11974 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 11975 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 11976 if (!UsedInserts.insert(VU).second) 11977 continue; 11978 // Need to use original vector, if the root is truncated. 11979 auto BWIt = MinBWs.find(E); 11980 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) { 11981 auto VecIt = VectorCasts.find(Scalar); 11982 if (VecIt == VectorCasts.end()) { 11983 IRBuilder<>::InsertPointGuard Guard(Builder); 11984 if (auto *IVec = dyn_cast<Instruction>(Vec)) 11985 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction()); 11986 Vec = Builder.CreateIntCast(Vec, VU->getType(), 11987 BWIt->second.second); 11988 VectorCasts.try_emplace(Scalar, Vec); 11989 } else { 11990 Vec = VecIt->second; 11991 } 11992 } 11993 11994 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 11995 if (InsertIdx) { 11996 auto *It = 11997 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 11998 // Checks if 2 insertelements are from the same buildvector. 11999 InsertElementInst *VecInsert = Data.InsertElements.front(); 12000 return areTwoInsertFromSameBuildVector( 12001 VU, VecInsert, 12002 [](InsertElementInst *II) { return II->getOperand(0); }); 12003 }); 12004 unsigned Idx = *InsertIdx; 12005 if (It == ShuffledInserts.end()) { 12006 (void)ShuffledInserts.emplace_back(); 12007 It = std::next(ShuffledInserts.begin(), 12008 ShuffledInserts.size() - 1); 12009 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12010 if (Mask.empty()) 12011 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12012 // Find the insertvector, vectorized in tree, if any. 12013 Value *Base = VU; 12014 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 12015 if (IEBase != User && 12016 (!IEBase->hasOneUse() || 12017 getInsertIndex(IEBase).value_or(Idx) == Idx)) 12018 break; 12019 // Build the mask for the vectorized insertelement instructions. 12020 if (const TreeEntry *E = getTreeEntry(IEBase)) { 12021 do { 12022 IEBase = cast<InsertElementInst>(Base); 12023 int IEIdx = *getInsertIndex(IEBase); 12024 assert(Mask[Idx] == PoisonMaskElem && 12025 "InsertElementInstruction used already."); 12026 Mask[IEIdx] = IEIdx; 12027 Base = IEBase->getOperand(0); 12028 } while (E == getTreeEntry(Base)); 12029 break; 12030 } 12031 Base = cast<InsertElementInst>(Base)->getOperand(0); 12032 // After the vectorization the def-use chain has changed, need 12033 // to look through original insertelement instructions, if they 12034 // get replaced by vector instructions. 12035 auto It = VectorToInsertElement.find(Base); 12036 if (It != VectorToInsertElement.end()) 12037 Base = It->second; 12038 } 12039 } 12040 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12041 if (Mask.empty()) 12042 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12043 Mask[Idx] = ExternalUse.Lane; 12044 It->InsertElements.push_back(cast<InsertElementInst>(User)); 12045 continue; 12046 } 12047 } 12048 } 12049 } 12050 12051 // Generate extracts for out-of-tree users. 12052 // Find the insertion point for the extractelement lane. 12053 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 12054 if (PHINode *PH = dyn_cast<PHINode>(User)) { 12055 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 12056 if (PH->getIncomingValue(I) == Scalar) { 12057 Instruction *IncomingTerminator = 12058 PH->getIncomingBlock(I)->getTerminator(); 12059 if (isa<CatchSwitchInst>(IncomingTerminator)) { 12060 Builder.SetInsertPoint(VecI->getParent(), 12061 std::next(VecI->getIterator())); 12062 } else { 12063 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator()); 12064 } 12065 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12066 PH->setOperand(I, NewInst); 12067 } 12068 } 12069 } else { 12070 Builder.SetInsertPoint(cast<Instruction>(User)); 12071 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12072 User->replaceUsesOfWith(Scalar, NewInst); 12073 } 12074 } else { 12075 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 12076 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12077 User->replaceUsesOfWith(Scalar, NewInst); 12078 } 12079 12080 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 12081 } 12082 12083 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) { 12084 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 12085 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 12086 int VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 12087 for (int I = 0, E = Mask.size(); I < E; ++I) { 12088 if (Mask[I] < VF) 12089 CombinedMask1[I] = Mask[I]; 12090 else 12091 CombinedMask2[I] = Mask[I] - VF; 12092 } 12093 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 12094 ShuffleBuilder.add(V1, CombinedMask1); 12095 if (V2) 12096 ShuffleBuilder.add(V2, CombinedMask2); 12097 return ShuffleBuilder.finalize(std::nullopt); 12098 }; 12099 12100 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask, 12101 bool ForSingleMask) { 12102 unsigned VF = Mask.size(); 12103 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 12104 if (VF != VecVF) { 12105 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 12106 Vec = CreateShuffle(Vec, nullptr, Mask); 12107 return std::make_pair(Vec, true); 12108 } 12109 if (!ForSingleMask) { 12110 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 12111 for (unsigned I = 0; I < VF; ++I) { 12112 if (Mask[I] != PoisonMaskElem) 12113 ResizeMask[Mask[I]] = Mask[I]; 12114 } 12115 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 12116 } 12117 } 12118 12119 return std::make_pair(Vec, false); 12120 }; 12121 // Perform shuffling of the vectorize tree entries for better handling of 12122 // external extracts. 12123 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 12124 // Find the first and the last instruction in the list of insertelements. 12125 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 12126 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 12127 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 12128 Builder.SetInsertPoint(LastInsert); 12129 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 12130 Value *NewInst = performExtractsShuffleAction<Value>( 12131 MutableArrayRef(Vector.data(), Vector.size()), 12132 FirstInsert->getOperand(0), 12133 [](Value *Vec) { 12134 return cast<VectorType>(Vec->getType()) 12135 ->getElementCount() 12136 .getKnownMinValue(); 12137 }, 12138 ResizeToVF, 12139 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 12140 ArrayRef<Value *> Vals) { 12141 assert((Vals.size() == 1 || Vals.size() == 2) && 12142 "Expected exactly 1 or 2 input values."); 12143 if (Vals.size() == 1) { 12144 // Do not create shuffle if the mask is a simple identity 12145 // non-resizing mask. 12146 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 12147 ->getNumElements() || 12148 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 12149 return CreateShuffle(Vals.front(), nullptr, Mask); 12150 return Vals.front(); 12151 } 12152 return CreateShuffle(Vals.front() ? Vals.front() 12153 : FirstInsert->getOperand(0), 12154 Vals.back(), Mask); 12155 }); 12156 auto It = ShuffledInserts[I].InsertElements.rbegin(); 12157 // Rebuild buildvector chain. 12158 InsertElementInst *II = nullptr; 12159 if (It != ShuffledInserts[I].InsertElements.rend()) 12160 II = *It; 12161 SmallVector<Instruction *> Inserts; 12162 while (It != ShuffledInserts[I].InsertElements.rend()) { 12163 assert(II && "Must be an insertelement instruction."); 12164 if (*It == II) 12165 ++It; 12166 else 12167 Inserts.push_back(cast<Instruction>(II)); 12168 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 12169 } 12170 for (Instruction *II : reverse(Inserts)) { 12171 II->replaceUsesOfWith(II->getOperand(0), NewInst); 12172 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 12173 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 12174 II->moveAfter(NewI); 12175 NewInst = II; 12176 } 12177 LastInsert->replaceAllUsesWith(NewInst); 12178 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 12179 IE->replaceUsesOfWith(IE->getOperand(0), 12180 PoisonValue::get(IE->getOperand(0)->getType())); 12181 IE->replaceUsesOfWith(IE->getOperand(1), 12182 PoisonValue::get(IE->getOperand(1)->getType())); 12183 eraseInstruction(IE); 12184 } 12185 CSEBlocks.insert(LastInsert->getParent()); 12186 } 12187 12188 SmallVector<Instruction *> RemovedInsts; 12189 // For each vectorized value: 12190 for (auto &TEPtr : VectorizableTree) { 12191 TreeEntry *Entry = TEPtr.get(); 12192 12193 // No need to handle users of gathered values. 12194 if (Entry->State == TreeEntry::NeedToGather) 12195 continue; 12196 12197 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 12198 12199 // For each lane: 12200 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 12201 Value *Scalar = Entry->Scalars[Lane]; 12202 12203 if (Entry->getOpcode() == Instruction::GetElementPtr && 12204 !isa<GetElementPtrInst>(Scalar)) 12205 continue; 12206 #ifndef NDEBUG 12207 Type *Ty = Scalar->getType(); 12208 if (!Ty->isVoidTy()) { 12209 for (User *U : Scalar->users()) { 12210 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 12211 12212 // It is legal to delete users in the ignorelist. 12213 assert((getTreeEntry(U) || 12214 (UserIgnoreList && UserIgnoreList->contains(U)) || 12215 (isa_and_nonnull<Instruction>(U) && 12216 isDeleted(cast<Instruction>(U)))) && 12217 "Deleting out-of-tree value"); 12218 } 12219 } 12220 #endif 12221 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 12222 eraseInstruction(cast<Instruction>(Scalar)); 12223 // Retain to-be-deleted instructions for some debug-info 12224 // bookkeeping. NOTE: eraseInstruction only marks the instruction for 12225 // deletion - instructions are not deleted until later. 12226 RemovedInsts.push_back(cast<Instruction>(Scalar)); 12227 } 12228 } 12229 12230 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the 12231 // new vector instruction. 12232 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue)) 12233 V->mergeDIAssignID(RemovedInsts); 12234 12235 Builder.ClearInsertionPoint(); 12236 InstrElementSize.clear(); 12237 12238 return VectorizableTree[0]->VectorizedValue; 12239 } 12240 12241 void BoUpSLP::optimizeGatherSequence() { 12242 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size() 12243 << " gather sequences instructions.\n"); 12244 // LICM InsertElementInst sequences. 12245 for (Instruction *I : GatherShuffleExtractSeq) { 12246 if (isDeleted(I)) 12247 continue; 12248 12249 // Check if this block is inside a loop. 12250 Loop *L = LI->getLoopFor(I->getParent()); 12251 if (!L) 12252 continue; 12253 12254 // Check if it has a preheader. 12255 BasicBlock *PreHeader = L->getLoopPreheader(); 12256 if (!PreHeader) 12257 continue; 12258 12259 // If the vector or the element that we insert into it are 12260 // instructions that are defined in this basic block then we can't 12261 // hoist this instruction. 12262 if (any_of(I->operands(), [L](Value *V) { 12263 auto *OpI = dyn_cast<Instruction>(V); 12264 return OpI && L->contains(OpI); 12265 })) 12266 continue; 12267 12268 // We can hoist this instruction. Move it to the pre-header. 12269 I->moveBefore(PreHeader->getTerminator()); 12270 CSEBlocks.insert(PreHeader); 12271 } 12272 12273 // Make a list of all reachable blocks in our CSE queue. 12274 SmallVector<const DomTreeNode *, 8> CSEWorkList; 12275 CSEWorkList.reserve(CSEBlocks.size()); 12276 for (BasicBlock *BB : CSEBlocks) 12277 if (DomTreeNode *N = DT->getNode(BB)) { 12278 assert(DT->isReachableFromEntry(N)); 12279 CSEWorkList.push_back(N); 12280 } 12281 12282 // Sort blocks by domination. This ensures we visit a block after all blocks 12283 // dominating it are visited. 12284 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 12285 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 12286 "Different nodes should have different DFS numbers"); 12287 return A->getDFSNumIn() < B->getDFSNumIn(); 12288 }); 12289 12290 // Less defined shuffles can be replaced by the more defined copies. 12291 // Between two shuffles one is less defined if it has the same vector operands 12292 // and its mask indeces are the same as in the first one or undefs. E.g. 12293 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 12294 // poison, <0, 0, 0, 0>. 12295 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 12296 SmallVectorImpl<int> &NewMask) { 12297 if (I1->getType() != I2->getType()) 12298 return false; 12299 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 12300 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 12301 if (!SI1 || !SI2) 12302 return I1->isIdenticalTo(I2); 12303 if (SI1->isIdenticalTo(SI2)) 12304 return true; 12305 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 12306 if (SI1->getOperand(I) != SI2->getOperand(I)) 12307 return false; 12308 // Check if the second instruction is more defined than the first one. 12309 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 12310 ArrayRef<int> SM1 = SI1->getShuffleMask(); 12311 // Count trailing undefs in the mask to check the final number of used 12312 // registers. 12313 unsigned LastUndefsCnt = 0; 12314 for (int I = 0, E = NewMask.size(); I < E; ++I) { 12315 if (SM1[I] == PoisonMaskElem) 12316 ++LastUndefsCnt; 12317 else 12318 LastUndefsCnt = 0; 12319 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem && 12320 NewMask[I] != SM1[I]) 12321 return false; 12322 if (NewMask[I] == PoisonMaskElem) 12323 NewMask[I] = SM1[I]; 12324 } 12325 // Check if the last undefs actually change the final number of used vector 12326 // registers. 12327 return SM1.size() - LastUndefsCnt > 1 && 12328 TTI->getNumberOfParts(SI1->getType()) == 12329 TTI->getNumberOfParts( 12330 FixedVectorType::get(SI1->getType()->getElementType(), 12331 SM1.size() - LastUndefsCnt)); 12332 }; 12333 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 12334 // instructions. TODO: We can further optimize this scan if we split the 12335 // instructions into different buckets based on the insert lane. 12336 SmallVector<Instruction *, 16> Visited; 12337 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 12338 assert(*I && 12339 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 12340 "Worklist not sorted properly!"); 12341 BasicBlock *BB = (*I)->getBlock(); 12342 // For all instructions in blocks containing gather sequences: 12343 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 12344 if (isDeleted(&In)) 12345 continue; 12346 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) && 12347 !GatherShuffleExtractSeq.contains(&In)) 12348 continue; 12349 12350 // Check if we can replace this instruction with any of the 12351 // visited instructions. 12352 bool Replaced = false; 12353 for (Instruction *&V : Visited) { 12354 SmallVector<int> NewMask; 12355 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 12356 DT->dominates(V->getParent(), In.getParent())) { 12357 In.replaceAllUsesWith(V); 12358 eraseInstruction(&In); 12359 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 12360 if (!NewMask.empty()) 12361 SI->setShuffleMask(NewMask); 12362 Replaced = true; 12363 break; 12364 } 12365 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 12366 GatherShuffleExtractSeq.contains(V) && 12367 IsIdenticalOrLessDefined(V, &In, NewMask) && 12368 DT->dominates(In.getParent(), V->getParent())) { 12369 In.moveAfter(V); 12370 V->replaceAllUsesWith(&In); 12371 eraseInstruction(V); 12372 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 12373 if (!NewMask.empty()) 12374 SI->setShuffleMask(NewMask); 12375 V = &In; 12376 Replaced = true; 12377 break; 12378 } 12379 } 12380 if (!Replaced) { 12381 assert(!is_contained(Visited, &In)); 12382 Visited.push_back(&In); 12383 } 12384 } 12385 } 12386 CSEBlocks.clear(); 12387 GatherShuffleExtractSeq.clear(); 12388 } 12389 12390 BoUpSLP::ScheduleData * 12391 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 12392 ScheduleData *Bundle = nullptr; 12393 ScheduleData *PrevInBundle = nullptr; 12394 for (Value *V : VL) { 12395 if (doesNotNeedToBeScheduled(V)) 12396 continue; 12397 ScheduleData *BundleMember = getScheduleData(V); 12398 assert(BundleMember && 12399 "no ScheduleData for bundle member " 12400 "(maybe not in same basic block)"); 12401 assert(BundleMember->isSchedulingEntity() && 12402 "bundle member already part of other bundle"); 12403 if (PrevInBundle) { 12404 PrevInBundle->NextInBundle = BundleMember; 12405 } else { 12406 Bundle = BundleMember; 12407 } 12408 12409 // Group the instructions to a bundle. 12410 BundleMember->FirstInBundle = Bundle; 12411 PrevInBundle = BundleMember; 12412 } 12413 assert(Bundle && "Failed to find schedule bundle"); 12414 return Bundle; 12415 } 12416 12417 // Groups the instructions to a bundle (which is then a single scheduling entity) 12418 // and schedules instructions until the bundle gets ready. 12419 std::optional<BoUpSLP::ScheduleData *> 12420 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 12421 const InstructionsState &S) { 12422 // No need to schedule PHIs, insertelement, extractelement and extractvalue 12423 // instructions. 12424 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 12425 doesNotNeedToSchedule(VL)) 12426 return nullptr; 12427 12428 // Initialize the instruction bundle. 12429 Instruction *OldScheduleEnd = ScheduleEnd; 12430 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 12431 12432 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 12433 ScheduleData *Bundle) { 12434 // The scheduling region got new instructions at the lower end (or it is a 12435 // new region for the first bundle). This makes it necessary to 12436 // recalculate all dependencies. 12437 // It is seldom that this needs to be done a second time after adding the 12438 // initial bundle to the region. 12439 if (ScheduleEnd != OldScheduleEnd) { 12440 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 12441 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 12442 ReSchedule = true; 12443 } 12444 if (Bundle) { 12445 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 12446 << " in block " << BB->getName() << "\n"); 12447 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 12448 } 12449 12450 if (ReSchedule) { 12451 resetSchedule(); 12452 initialFillReadyList(ReadyInsts); 12453 } 12454 12455 // Now try to schedule the new bundle or (if no bundle) just calculate 12456 // dependencies. As soon as the bundle is "ready" it means that there are no 12457 // cyclic dependencies and we can schedule it. Note that's important that we 12458 // don't "schedule" the bundle yet (see cancelScheduling). 12459 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 12460 !ReadyInsts.empty()) { 12461 ScheduleData *Picked = ReadyInsts.pop_back_val(); 12462 assert(Picked->isSchedulingEntity() && Picked->isReady() && 12463 "must be ready to schedule"); 12464 schedule(Picked, ReadyInsts); 12465 } 12466 }; 12467 12468 // Make sure that the scheduling region contains all 12469 // instructions of the bundle. 12470 for (Value *V : VL) { 12471 if (doesNotNeedToBeScheduled(V)) 12472 continue; 12473 if (!extendSchedulingRegion(V, S)) { 12474 // If the scheduling region got new instructions at the lower end (or it 12475 // is a new region for the first bundle). This makes it necessary to 12476 // recalculate all dependencies. 12477 // Otherwise the compiler may crash trying to incorrectly calculate 12478 // dependencies and emit instruction in the wrong order at the actual 12479 // scheduling. 12480 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 12481 return std::nullopt; 12482 } 12483 } 12484 12485 bool ReSchedule = false; 12486 for (Value *V : VL) { 12487 if (doesNotNeedToBeScheduled(V)) 12488 continue; 12489 ScheduleData *BundleMember = getScheduleData(V); 12490 assert(BundleMember && 12491 "no ScheduleData for bundle member (maybe not in same basic block)"); 12492 12493 // Make sure we don't leave the pieces of the bundle in the ready list when 12494 // whole bundle might not be ready. 12495 ReadyInsts.remove(BundleMember); 12496 12497 if (!BundleMember->IsScheduled) 12498 continue; 12499 // A bundle member was scheduled as single instruction before and now 12500 // needs to be scheduled as part of the bundle. We just get rid of the 12501 // existing schedule. 12502 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 12503 << " was already scheduled\n"); 12504 ReSchedule = true; 12505 } 12506 12507 auto *Bundle = buildBundle(VL); 12508 TryScheduleBundleImpl(ReSchedule, Bundle); 12509 if (!Bundle->isReady()) { 12510 cancelScheduling(VL, S.OpValue); 12511 return std::nullopt; 12512 } 12513 return Bundle; 12514 } 12515 12516 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 12517 Value *OpValue) { 12518 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 12519 doesNotNeedToSchedule(VL)) 12520 return; 12521 12522 if (doesNotNeedToBeScheduled(OpValue)) 12523 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 12524 ScheduleData *Bundle = getScheduleData(OpValue); 12525 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 12526 assert(!Bundle->IsScheduled && 12527 "Can't cancel bundle which is already scheduled"); 12528 assert(Bundle->isSchedulingEntity() && 12529 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 12530 "tried to unbundle something which is not a bundle"); 12531 12532 // Remove the bundle from the ready list. 12533 if (Bundle->isReady()) 12534 ReadyInsts.remove(Bundle); 12535 12536 // Un-bundle: make single instructions out of the bundle. 12537 ScheduleData *BundleMember = Bundle; 12538 while (BundleMember) { 12539 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 12540 BundleMember->FirstInBundle = BundleMember; 12541 ScheduleData *Next = BundleMember->NextInBundle; 12542 BundleMember->NextInBundle = nullptr; 12543 BundleMember->TE = nullptr; 12544 if (BundleMember->unscheduledDepsInBundle() == 0) { 12545 ReadyInsts.insert(BundleMember); 12546 } 12547 BundleMember = Next; 12548 } 12549 } 12550 12551 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 12552 // Allocate a new ScheduleData for the instruction. 12553 if (ChunkPos >= ChunkSize) { 12554 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 12555 ChunkPos = 0; 12556 } 12557 return &(ScheduleDataChunks.back()[ChunkPos++]); 12558 } 12559 12560 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 12561 const InstructionsState &S) { 12562 if (getScheduleData(V, isOneOf(S, V))) 12563 return true; 12564 Instruction *I = dyn_cast<Instruction>(V); 12565 assert(I && "bundle member must be an instruction"); 12566 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 12567 !doesNotNeedToBeScheduled(I) && 12568 "phi nodes/insertelements/extractelements/extractvalues don't need to " 12569 "be scheduled"); 12570 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 12571 ScheduleData *ISD = getScheduleData(I); 12572 if (!ISD) 12573 return false; 12574 assert(isInSchedulingRegion(ISD) && 12575 "ScheduleData not in scheduling region"); 12576 ScheduleData *SD = allocateScheduleDataChunks(); 12577 SD->Inst = I; 12578 SD->init(SchedulingRegionID, S.OpValue); 12579 ExtraScheduleDataMap[I][S.OpValue] = SD; 12580 return true; 12581 }; 12582 if (CheckScheduleForI(I)) 12583 return true; 12584 if (!ScheduleStart) { 12585 // It's the first instruction in the new region. 12586 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 12587 ScheduleStart = I; 12588 ScheduleEnd = I->getNextNode(); 12589 if (isOneOf(S, I) != I) 12590 CheckScheduleForI(I); 12591 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12592 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 12593 return true; 12594 } 12595 // Search up and down at the same time, because we don't know if the new 12596 // instruction is above or below the existing scheduling region. 12597 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted 12598 // against the budget. Otherwise debug info could affect codegen. 12599 BasicBlock::reverse_iterator UpIter = 12600 ++ScheduleStart->getIterator().getReverse(); 12601 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 12602 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 12603 BasicBlock::iterator LowerEnd = BB->end(); 12604 auto IsAssumeLikeIntr = [](const Instruction &I) { 12605 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 12606 return II->isAssumeLikeIntrinsic(); 12607 return false; 12608 }; 12609 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12610 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12611 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 12612 &*DownIter != I) { 12613 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 12614 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 12615 return false; 12616 } 12617 12618 ++UpIter; 12619 ++DownIter; 12620 12621 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12622 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12623 } 12624 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 12625 assert(I->getParent() == ScheduleStart->getParent() && 12626 "Instruction is in wrong basic block."); 12627 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 12628 ScheduleStart = I; 12629 if (isOneOf(S, I) != I) 12630 CheckScheduleForI(I); 12631 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 12632 << "\n"); 12633 return true; 12634 } 12635 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 12636 "Expected to reach top of the basic block or instruction down the " 12637 "lower end."); 12638 assert(I->getParent() == ScheduleEnd->getParent() && 12639 "Instruction is in wrong basic block."); 12640 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 12641 nullptr); 12642 ScheduleEnd = I->getNextNode(); 12643 if (isOneOf(S, I) != I) 12644 CheckScheduleForI(I); 12645 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12646 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 12647 return true; 12648 } 12649 12650 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 12651 Instruction *ToI, 12652 ScheduleData *PrevLoadStore, 12653 ScheduleData *NextLoadStore) { 12654 ScheduleData *CurrentLoadStore = PrevLoadStore; 12655 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 12656 // No need to allocate data for non-schedulable instructions. 12657 if (doesNotNeedToBeScheduled(I)) 12658 continue; 12659 ScheduleData *SD = ScheduleDataMap.lookup(I); 12660 if (!SD) { 12661 SD = allocateScheduleDataChunks(); 12662 ScheduleDataMap[I] = SD; 12663 SD->Inst = I; 12664 } 12665 assert(!isInSchedulingRegion(SD) && 12666 "new ScheduleData already in scheduling region"); 12667 SD->init(SchedulingRegionID, I); 12668 12669 if (I->mayReadOrWriteMemory() && 12670 (!isa<IntrinsicInst>(I) || 12671 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 12672 cast<IntrinsicInst>(I)->getIntrinsicID() != 12673 Intrinsic::pseudoprobe))) { 12674 // Update the linked list of memory accessing instructions. 12675 if (CurrentLoadStore) { 12676 CurrentLoadStore->NextLoadStore = SD; 12677 } else { 12678 FirstLoadStoreInRegion = SD; 12679 } 12680 CurrentLoadStore = SD; 12681 } 12682 12683 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12684 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12685 RegionHasStackSave = true; 12686 } 12687 if (NextLoadStore) { 12688 if (CurrentLoadStore) 12689 CurrentLoadStore->NextLoadStore = NextLoadStore; 12690 } else { 12691 LastLoadStoreInRegion = CurrentLoadStore; 12692 } 12693 } 12694 12695 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 12696 bool InsertInReadyList, 12697 BoUpSLP *SLP) { 12698 assert(SD->isSchedulingEntity()); 12699 12700 SmallVector<ScheduleData *, 10> WorkList; 12701 WorkList.push_back(SD); 12702 12703 while (!WorkList.empty()) { 12704 ScheduleData *SD = WorkList.pop_back_val(); 12705 for (ScheduleData *BundleMember = SD; BundleMember; 12706 BundleMember = BundleMember->NextInBundle) { 12707 assert(isInSchedulingRegion(BundleMember)); 12708 if (BundleMember->hasValidDependencies()) 12709 continue; 12710 12711 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 12712 << "\n"); 12713 BundleMember->Dependencies = 0; 12714 BundleMember->resetUnscheduledDeps(); 12715 12716 // Handle def-use chain dependencies. 12717 if (BundleMember->OpValue != BundleMember->Inst) { 12718 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 12719 BundleMember->Dependencies++; 12720 ScheduleData *DestBundle = UseSD->FirstInBundle; 12721 if (!DestBundle->IsScheduled) 12722 BundleMember->incrementUnscheduledDeps(1); 12723 if (!DestBundle->hasValidDependencies()) 12724 WorkList.push_back(DestBundle); 12725 } 12726 } else { 12727 for (User *U : BundleMember->Inst->users()) { 12728 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 12729 BundleMember->Dependencies++; 12730 ScheduleData *DestBundle = UseSD->FirstInBundle; 12731 if (!DestBundle->IsScheduled) 12732 BundleMember->incrementUnscheduledDeps(1); 12733 if (!DestBundle->hasValidDependencies()) 12734 WorkList.push_back(DestBundle); 12735 } 12736 } 12737 } 12738 12739 auto MakeControlDependent = [&](Instruction *I) { 12740 auto *DepDest = getScheduleData(I); 12741 assert(DepDest && "must be in schedule window"); 12742 DepDest->ControlDependencies.push_back(BundleMember); 12743 BundleMember->Dependencies++; 12744 ScheduleData *DestBundle = DepDest->FirstInBundle; 12745 if (!DestBundle->IsScheduled) 12746 BundleMember->incrementUnscheduledDeps(1); 12747 if (!DestBundle->hasValidDependencies()) 12748 WorkList.push_back(DestBundle); 12749 }; 12750 12751 // Any instruction which isn't safe to speculate at the beginning of the 12752 // block is control dependend on any early exit or non-willreturn call 12753 // which proceeds it. 12754 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 12755 for (Instruction *I = BundleMember->Inst->getNextNode(); 12756 I != ScheduleEnd; I = I->getNextNode()) { 12757 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC)) 12758 continue; 12759 12760 // Add the dependency 12761 MakeControlDependent(I); 12762 12763 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 12764 // Everything past here must be control dependent on I. 12765 break; 12766 } 12767 } 12768 12769 if (RegionHasStackSave) { 12770 // If we have an inalloc alloca instruction, it needs to be scheduled 12771 // after any preceeding stacksave. We also need to prevent any alloca 12772 // from reordering above a preceeding stackrestore. 12773 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 12774 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 12775 for (Instruction *I = BundleMember->Inst->getNextNode(); 12776 I != ScheduleEnd; I = I->getNextNode()) { 12777 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12778 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12779 // Any allocas past here must be control dependent on I, and I 12780 // must be memory dependend on BundleMember->Inst. 12781 break; 12782 12783 if (!isa<AllocaInst>(I)) 12784 continue; 12785 12786 // Add the dependency 12787 MakeControlDependent(I); 12788 } 12789 } 12790 12791 // In addition to the cases handle just above, we need to prevent 12792 // allocas and loads/stores from moving below a stacksave or a 12793 // stackrestore. Avoiding moving allocas below stackrestore is currently 12794 // thought to be conservatism. Moving loads/stores below a stackrestore 12795 // can lead to incorrect code. 12796 if (isa<AllocaInst>(BundleMember->Inst) || 12797 BundleMember->Inst->mayReadOrWriteMemory()) { 12798 for (Instruction *I = BundleMember->Inst->getNextNode(); 12799 I != ScheduleEnd; I = I->getNextNode()) { 12800 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 12801 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12802 continue; 12803 12804 // Add the dependency 12805 MakeControlDependent(I); 12806 break; 12807 } 12808 } 12809 } 12810 12811 // Handle the memory dependencies (if any). 12812 ScheduleData *DepDest = BundleMember->NextLoadStore; 12813 if (!DepDest) 12814 continue; 12815 Instruction *SrcInst = BundleMember->Inst; 12816 assert(SrcInst->mayReadOrWriteMemory() && 12817 "NextLoadStore list for non memory effecting bundle?"); 12818 MemoryLocation SrcLoc = getLocation(SrcInst); 12819 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 12820 unsigned NumAliased = 0; 12821 unsigned DistToSrc = 1; 12822 12823 for (; DepDest; DepDest = DepDest->NextLoadStore) { 12824 assert(isInSchedulingRegion(DepDest)); 12825 12826 // We have two limits to reduce the complexity: 12827 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 12828 // SLP->isAliased (which is the expensive part in this loop). 12829 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 12830 // the whole loop (even if the loop is fast, it's quadratic). 12831 // It's important for the loop break condition (see below) to 12832 // check this limit even between two read-only instructions. 12833 if (DistToSrc >= MaxMemDepDistance || 12834 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 12835 (NumAliased >= AliasedCheckLimit || 12836 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 12837 12838 // We increment the counter only if the locations are aliased 12839 // (instead of counting all alias checks). This gives a better 12840 // balance between reduced runtime and accurate dependencies. 12841 NumAliased++; 12842 12843 DepDest->MemoryDependencies.push_back(BundleMember); 12844 BundleMember->Dependencies++; 12845 ScheduleData *DestBundle = DepDest->FirstInBundle; 12846 if (!DestBundle->IsScheduled) { 12847 BundleMember->incrementUnscheduledDeps(1); 12848 } 12849 if (!DestBundle->hasValidDependencies()) { 12850 WorkList.push_back(DestBundle); 12851 } 12852 } 12853 12854 // Example, explaining the loop break condition: Let's assume our 12855 // starting instruction is i0 and MaxMemDepDistance = 3. 12856 // 12857 // +--------v--v--v 12858 // i0,i1,i2,i3,i4,i5,i6,i7,i8 12859 // +--------^--^--^ 12860 // 12861 // MaxMemDepDistance let us stop alias-checking at i3 and we add 12862 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 12863 // Previously we already added dependencies from i3 to i6,i7,i8 12864 // (because of MaxMemDepDistance). As we added a dependency from 12865 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 12866 // and we can abort this loop at i6. 12867 if (DistToSrc >= 2 * MaxMemDepDistance) 12868 break; 12869 DistToSrc++; 12870 } 12871 } 12872 if (InsertInReadyList && SD->isReady()) { 12873 ReadyInsts.insert(SD); 12874 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 12875 << "\n"); 12876 } 12877 } 12878 } 12879 12880 void BoUpSLP::BlockScheduling::resetSchedule() { 12881 assert(ScheduleStart && 12882 "tried to reset schedule on block which has not been scheduled"); 12883 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 12884 doForAllOpcodes(I, [&](ScheduleData *SD) { 12885 assert(isInSchedulingRegion(SD) && 12886 "ScheduleData not in scheduling region"); 12887 SD->IsScheduled = false; 12888 SD->resetUnscheduledDeps(); 12889 }); 12890 } 12891 ReadyInsts.clear(); 12892 } 12893 12894 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 12895 if (!BS->ScheduleStart) 12896 return; 12897 12898 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 12899 12900 // A key point - if we got here, pre-scheduling was able to find a valid 12901 // scheduling of the sub-graph of the scheduling window which consists 12902 // of all vector bundles and their transitive users. As such, we do not 12903 // need to reschedule anything *outside of* that subgraph. 12904 12905 BS->resetSchedule(); 12906 12907 // For the real scheduling we use a more sophisticated ready-list: it is 12908 // sorted by the original instruction location. This lets the final schedule 12909 // be as close as possible to the original instruction order. 12910 // WARNING: If changing this order causes a correctness issue, that means 12911 // there is some missing dependence edge in the schedule data graph. 12912 struct ScheduleDataCompare { 12913 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 12914 return SD2->SchedulingPriority < SD1->SchedulingPriority; 12915 } 12916 }; 12917 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 12918 12919 // Ensure that all dependency data is updated (for nodes in the sub-graph) 12920 // and fill the ready-list with initial instructions. 12921 int Idx = 0; 12922 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 12923 I = I->getNextNode()) { 12924 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 12925 TreeEntry *SDTE = getTreeEntry(SD->Inst); 12926 (void)SDTE; 12927 assert((isVectorLikeInstWithConstOps(SD->Inst) || 12928 SD->isPartOfBundle() == 12929 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 12930 "scheduler and vectorizer bundle mismatch"); 12931 SD->FirstInBundle->SchedulingPriority = Idx++; 12932 12933 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 12934 BS->calculateDependencies(SD, false, this); 12935 }); 12936 } 12937 BS->initialFillReadyList(ReadyInsts); 12938 12939 Instruction *LastScheduledInst = BS->ScheduleEnd; 12940 12941 // Do the "real" scheduling. 12942 while (!ReadyInsts.empty()) { 12943 ScheduleData *Picked = *ReadyInsts.begin(); 12944 ReadyInsts.erase(ReadyInsts.begin()); 12945 12946 // Move the scheduled instruction(s) to their dedicated places, if not 12947 // there yet. 12948 for (ScheduleData *BundleMember = Picked; BundleMember; 12949 BundleMember = BundleMember->NextInBundle) { 12950 Instruction *PickedInst = BundleMember->Inst; 12951 if (PickedInst->getNextNode() != LastScheduledInst) 12952 PickedInst->moveBefore(LastScheduledInst); 12953 LastScheduledInst = PickedInst; 12954 } 12955 12956 BS->schedule(Picked, ReadyInsts); 12957 } 12958 12959 // Check that we didn't break any of our invariants. 12960 #ifdef EXPENSIVE_CHECKS 12961 BS->verify(); 12962 #endif 12963 12964 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 12965 // Check that all schedulable entities got scheduled 12966 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 12967 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 12968 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 12969 assert(SD->IsScheduled && "must be scheduled at this point"); 12970 } 12971 }); 12972 } 12973 #endif 12974 12975 // Avoid duplicate scheduling of the block. 12976 BS->ScheduleStart = nullptr; 12977 } 12978 12979 unsigned BoUpSLP::getVectorElementSize(Value *V) { 12980 // If V is a store, just return the width of the stored value (or value 12981 // truncated just before storing) without traversing the expression tree. 12982 // This is the common case. 12983 if (auto *Store = dyn_cast<StoreInst>(V)) 12984 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 12985 12986 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 12987 return getVectorElementSize(IEI->getOperand(1)); 12988 12989 auto E = InstrElementSize.find(V); 12990 if (E != InstrElementSize.end()) 12991 return E->second; 12992 12993 // If V is not a store, we can traverse the expression tree to find loads 12994 // that feed it. The type of the loaded value may indicate a more suitable 12995 // width than V's type. We want to base the vector element size on the width 12996 // of memory operations where possible. 12997 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 12998 SmallPtrSet<Instruction *, 16> Visited; 12999 if (auto *I = dyn_cast<Instruction>(V)) { 13000 Worklist.emplace_back(I, I->getParent()); 13001 Visited.insert(I); 13002 } 13003 13004 // Traverse the expression tree in bottom-up order looking for loads. If we 13005 // encounter an instruction we don't yet handle, we give up. 13006 auto Width = 0u; 13007 while (!Worklist.empty()) { 13008 Instruction *I; 13009 BasicBlock *Parent; 13010 std::tie(I, Parent) = Worklist.pop_back_val(); 13011 13012 // We should only be looking at scalar instructions here. If the current 13013 // instruction has a vector type, skip. 13014 auto *Ty = I->getType(); 13015 if (isa<VectorType>(Ty)) 13016 continue; 13017 13018 // If the current instruction is a load, update MaxWidth to reflect the 13019 // width of the loaded value. 13020 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I)) 13021 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 13022 13023 // Otherwise, we need to visit the operands of the instruction. We only 13024 // handle the interesting cases from buildTree here. If an operand is an 13025 // instruction we haven't yet visited and from the same basic block as the 13026 // user or the use is a PHI node, we add it to the worklist. 13027 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst, 13028 BinaryOperator, UnaryOperator>(I)) { 13029 for (Use &U : I->operands()) 13030 if (auto *J = dyn_cast<Instruction>(U.get())) 13031 if (Visited.insert(J).second && 13032 (isa<PHINode>(I) || J->getParent() == Parent)) 13033 Worklist.emplace_back(J, J->getParent()); 13034 } else { 13035 break; 13036 } 13037 } 13038 13039 // If we didn't encounter a memory access in the expression tree, or if we 13040 // gave up for some reason, just return the width of V. Otherwise, return the 13041 // maximum width we found. 13042 if (!Width) { 13043 if (auto *CI = dyn_cast<CmpInst>(V)) 13044 V = CI->getOperand(0); 13045 Width = DL->getTypeSizeInBits(V->getType()); 13046 } 13047 13048 for (Instruction *I : Visited) 13049 InstrElementSize[I] = Width; 13050 13051 return Width; 13052 } 13053 13054 // Determine if a value V in a vectorizable expression Expr can be demoted to a 13055 // smaller type with a truncation. We collect the values that will be demoted 13056 // in ToDemote and additional roots that require investigating in Roots. 13057 bool BoUpSLP::collectValuesToDemote( 13058 Value *V, SmallVectorImpl<Value *> &ToDemote, 13059 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 13060 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const { 13061 // We can always demote constants. 13062 if (isa<Constant>(V)) 13063 return true; 13064 13065 // If the value is not a vectorized instruction in the expression with only 13066 // one use, it cannot be demoted. 13067 auto *I = dyn_cast<Instruction>(V); 13068 if (!I || !I->hasOneUse() || !getTreeEntry(I) || !Visited.insert(I).second) 13069 return false; 13070 13071 unsigned Start = 0; 13072 unsigned End = I->getNumOperands(); 13073 switch (I->getOpcode()) { 13074 13075 // We can always demote truncations and extensions. Since truncations can 13076 // seed additional demotion, we save the truncated value. 13077 case Instruction::Trunc: 13078 Roots.push_back(I->getOperand(0)); 13079 break; 13080 case Instruction::ZExt: 13081 case Instruction::SExt: 13082 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0))) 13083 return false; 13084 break; 13085 13086 // We can demote certain binary operations if we can demote both of their 13087 // operands. 13088 case Instruction::Add: 13089 case Instruction::Sub: 13090 case Instruction::Mul: 13091 case Instruction::And: 13092 case Instruction::Or: 13093 case Instruction::Xor: 13094 if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots, 13095 Visited) || 13096 !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots, 13097 Visited)) 13098 return false; 13099 break; 13100 13101 // We can demote selects if we can demote their true and false values. 13102 case Instruction::Select: { 13103 Start = 1; 13104 SelectInst *SI = cast<SelectInst>(I); 13105 if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts, 13106 Roots, Visited) || 13107 !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts, 13108 Roots, Visited)) 13109 return false; 13110 break; 13111 } 13112 13113 // We can demote phis if we can demote all their incoming operands. Note that 13114 // we don't need to worry about cycles since we ensure single use above. 13115 case Instruction::PHI: { 13116 PHINode *PN = cast<PHINode>(I); 13117 for (Value *IncValue : PN->incoming_values()) 13118 if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots, 13119 Visited)) 13120 return false; 13121 break; 13122 } 13123 13124 // Otherwise, conservatively give up. 13125 default: 13126 return false; 13127 } 13128 13129 // Gather demoted constant operands. 13130 for (unsigned Idx : seq<unsigned>(Start, End)) 13131 if (isa<Constant>(I->getOperand(Idx))) 13132 DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx); 13133 // Record the value that we can demote. 13134 ToDemote.push_back(V); 13135 return true; 13136 } 13137 13138 void BoUpSLP::computeMinimumValueSizes() { 13139 // If there are no external uses, the expression tree must be rooted by a 13140 // store. We can't demote in-memory values, so there is nothing to do here. 13141 if (ExternalUses.empty()) 13142 return; 13143 13144 // We only attempt to truncate integer expressions. 13145 auto &TreeRoot = VectorizableTree[0]->Scalars; 13146 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 13147 if (!TreeRootIT) 13148 return; 13149 13150 // Ensure the roots of the vectorizable tree don't form a cycle. 13151 if (!VectorizableTree.front()->UserTreeIndices.empty()) 13152 return; 13153 13154 // Conservatively determine if we can actually truncate the roots of the 13155 // expression. Collect the values that can be demoted in ToDemote and 13156 // additional roots that require investigating in Roots. 13157 SmallVector<Value *, 32> ToDemote; 13158 DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts; 13159 SmallVector<Value *, 4> Roots; 13160 for (auto *Root : TreeRoot) { 13161 DenseSet<Value *> Visited; 13162 if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited)) 13163 return; 13164 } 13165 13166 // The maximum bit width required to represent all the values that can be 13167 // demoted without loss of precision. It would be safe to truncate the roots 13168 // of the expression to this width. 13169 auto MaxBitWidth = 1u; 13170 13171 // We first check if all the bits of the roots are demanded. If they're not, 13172 // we can truncate the roots to this narrower type. 13173 for (auto *Root : TreeRoot) { 13174 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 13175 MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(), 13176 MaxBitWidth); 13177 } 13178 13179 // True if the roots can be zero-extended back to their original type, rather 13180 // than sign-extended. We know that if the leading bits are not demanded, we 13181 // can safely zero-extend. So we initialize IsKnownPositive to True. 13182 bool IsKnownPositive = true; 13183 13184 // If all the bits of the roots are demanded, we can try a little harder to 13185 // compute a narrower type. This can happen, for example, if the roots are 13186 // getelementptr indices. InstCombine promotes these indices to the pointer 13187 // width. Thus, all their bits are technically demanded even though the 13188 // address computation might be vectorized in a smaller type. 13189 // 13190 // We start by looking at each entry that can be demoted. We compute the 13191 // maximum bit width required to store the scalar by using ValueTracking to 13192 // compute the number of high-order bits we can truncate. 13193 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 13194 all_of(TreeRoot, [](Value *V) { 13195 return all_of(V->users(), 13196 [](User *U) { return isa<GetElementPtrInst>(U); }); 13197 })) { 13198 MaxBitWidth = 8u; 13199 13200 // Determine if the sign bit of all the roots is known to be zero. If not, 13201 // IsKnownPositive is set to False. 13202 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 13203 KnownBits Known = computeKnownBits(R, *DL); 13204 return Known.isNonNegative(); 13205 }); 13206 13207 // Determine the maximum number of bits required to store the scalar 13208 // values. 13209 for (auto *Scalar : ToDemote) { 13210 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 13211 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 13212 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 13213 } 13214 13215 // If we can't prove that the sign bit is zero, we must add one to the 13216 // maximum bit width to account for the unknown sign bit. This preserves 13217 // the existing sign bit so we can safely sign-extend the root back to the 13218 // original type. Otherwise, if we know the sign bit is zero, we will 13219 // zero-extend the root instead. 13220 // 13221 // FIXME: This is somewhat suboptimal, as there will be cases where adding 13222 // one to the maximum bit width will yield a larger-than-necessary 13223 // type. In general, we need to add an extra bit only if we can't 13224 // prove that the upper bit of the original type is equal to the 13225 // upper bit of the proposed smaller type. If these two bits are the 13226 // same (either zero or one) we know that sign-extending from the 13227 // smaller type will result in the same value. Here, since we can't 13228 // yet prove this, we are just making the proposed smaller type 13229 // larger to ensure correctness. 13230 if (!IsKnownPositive) 13231 ++MaxBitWidth; 13232 } 13233 13234 // Round MaxBitWidth up to the next power-of-two. 13235 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 13236 13237 // If the maximum bit width we compute is less than the with of the roots' 13238 // type, we can proceed with the narrowing. Otherwise, do nothing. 13239 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 13240 return; 13241 13242 // If we can truncate the root, we must collect additional values that might 13243 // be demoted as a result. That is, those seeded by truncations we will 13244 // modify. 13245 while (!Roots.empty()) { 13246 DenseSet<Value *> Visited; 13247 collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots, 13248 Visited); 13249 } 13250 13251 // Finally, map the values we can demote to the maximum bit with we computed. 13252 for (auto *Scalar : ToDemote) { 13253 auto *TE = getTreeEntry(Scalar); 13254 assert(TE && "Expected vectorized scalar."); 13255 if (MinBWs.contains(TE)) 13256 continue; 13257 bool IsSigned = any_of(TE->Scalars, [&](Value *R) { 13258 KnownBits Known = computeKnownBits(R, *DL); 13259 return !Known.isNonNegative(); 13260 }); 13261 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned); 13262 const auto *I = cast<Instruction>(Scalar); 13263 auto DCIt = DemotedConsts.find(I); 13264 if (DCIt != DemotedConsts.end()) { 13265 for (unsigned Idx : DCIt->getSecond()) { 13266 // Check that all instructions operands are demoted. 13267 if (all_of(TE->Scalars, [&](Value *V) { 13268 auto SIt = DemotedConsts.find(cast<Instruction>(V)); 13269 return SIt != DemotedConsts.end() && 13270 is_contained(SIt->getSecond(), Idx); 13271 })) { 13272 const TreeEntry *CTE = getOperandEntry(TE, Idx); 13273 MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned); 13274 } 13275 } 13276 } 13277 } 13278 } 13279 13280 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 13281 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 13282 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 13283 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 13284 auto *AA = &AM.getResult<AAManager>(F); 13285 auto *LI = &AM.getResult<LoopAnalysis>(F); 13286 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 13287 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 13288 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 13289 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 13290 13291 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 13292 if (!Changed) 13293 return PreservedAnalyses::all(); 13294 13295 PreservedAnalyses PA; 13296 PA.preserveSet<CFGAnalyses>(); 13297 return PA; 13298 } 13299 13300 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 13301 TargetTransformInfo *TTI_, 13302 TargetLibraryInfo *TLI_, AAResults *AA_, 13303 LoopInfo *LI_, DominatorTree *DT_, 13304 AssumptionCache *AC_, DemandedBits *DB_, 13305 OptimizationRemarkEmitter *ORE_) { 13306 if (!RunSLPVectorization) 13307 return false; 13308 SE = SE_; 13309 TTI = TTI_; 13310 TLI = TLI_; 13311 AA = AA_; 13312 LI = LI_; 13313 DT = DT_; 13314 AC = AC_; 13315 DB = DB_; 13316 DL = &F.getParent()->getDataLayout(); 13317 13318 Stores.clear(); 13319 GEPs.clear(); 13320 bool Changed = false; 13321 13322 // If the target claims to have no vector registers don't attempt 13323 // vectorization. 13324 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 13325 LLVM_DEBUG( 13326 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 13327 return false; 13328 } 13329 13330 // Don't vectorize when the attribute NoImplicitFloat is used. 13331 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 13332 return false; 13333 13334 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 13335 13336 // Use the bottom up slp vectorizer to construct chains that start with 13337 // store instructions. 13338 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 13339 13340 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 13341 // delete instructions. 13342 13343 // Update DFS numbers now so that we can use them for ordering. 13344 DT->updateDFSNumbers(); 13345 13346 // Scan the blocks in the function in post order. 13347 for (auto *BB : post_order(&F.getEntryBlock())) { 13348 // Start new block - clear the list of reduction roots. 13349 R.clearReductionData(); 13350 collectSeedInstructions(BB); 13351 13352 // Vectorize trees that end at stores. 13353 if (!Stores.empty()) { 13354 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 13355 << " underlying objects.\n"); 13356 Changed |= vectorizeStoreChains(R); 13357 } 13358 13359 // Vectorize trees that end at reductions. 13360 Changed |= vectorizeChainsInBlock(BB, R); 13361 13362 // Vectorize the index computations of getelementptr instructions. This 13363 // is primarily intended to catch gather-like idioms ending at 13364 // non-consecutive loads. 13365 if (!GEPs.empty()) { 13366 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 13367 << " underlying objects.\n"); 13368 Changed |= vectorizeGEPIndices(BB, R); 13369 } 13370 } 13371 13372 if (Changed) { 13373 R.optimizeGatherSequence(); 13374 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 13375 } 13376 return Changed; 13377 } 13378 13379 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 13380 unsigned Idx, unsigned MinVF) { 13381 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 13382 << "\n"); 13383 const unsigned Sz = R.getVectorElementSize(Chain[0]); 13384 unsigned VF = Chain.size(); 13385 13386 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 13387 return false; 13388 13389 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 13390 << "\n"); 13391 13392 R.buildTree(Chain); 13393 if (R.isTreeTinyAndNotFullyVectorizable()) 13394 return false; 13395 if (R.isLoadCombineCandidate()) 13396 return false; 13397 R.reorderTopToBottom(); 13398 R.reorderBottomToTop(); 13399 R.buildExternalUses(); 13400 13401 R.computeMinimumValueSizes(); 13402 13403 InstructionCost Cost = R.getTreeCost(); 13404 13405 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n"); 13406 if (Cost < -SLPCostThreshold) { 13407 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 13408 13409 using namespace ore; 13410 13411 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 13412 cast<StoreInst>(Chain[0])) 13413 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 13414 << " and with tree size " 13415 << NV("TreeSize", R.getTreeSize())); 13416 13417 R.vectorizeTree(); 13418 return true; 13419 } 13420 13421 return false; 13422 } 13423 13424 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 13425 BoUpSLP &R) { 13426 // We may run into multiple chains that merge into a single chain. We mark the 13427 // stores that we vectorized so that we don't visit the same store twice. 13428 BoUpSLP::ValueSet VectorizedStores; 13429 bool Changed = false; 13430 13431 // Stores the pair of stores (first_store, last_store) in a range, that were 13432 // already tried to be vectorized. Allows to skip the store ranges that were 13433 // already tried to be vectorized but the attempts were unsuccessful. 13434 DenseSet<std::pair<Value *, Value *>> TriedSequences; 13435 struct StoreDistCompare { 13436 bool operator()(const std::pair<unsigned, int> &Op1, 13437 const std::pair<unsigned, int> &Op2) const { 13438 return Op1.second < Op2.second; 13439 } 13440 }; 13441 // A set of pairs (index of store in Stores array ref, Distance of the store 13442 // address relative to base store address in units). 13443 using StoreIndexToDistSet = 13444 std::set<std::pair<unsigned, int>, StoreDistCompare>; 13445 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) { 13446 int PrevDist = -1; 13447 BoUpSLP::ValueList Operands; 13448 // Collect the chain into a list. 13449 for (auto [Idx, Data] : enumerate(Set)) { 13450 if (Operands.empty() || Data.second - PrevDist == 1) { 13451 Operands.push_back(Stores[Data.first]); 13452 PrevDist = Data.second; 13453 if (Idx != Set.size() - 1) 13454 continue; 13455 } 13456 if (Operands.size() <= 1) { 13457 Operands.clear(); 13458 Operands.push_back(Stores[Data.first]); 13459 PrevDist = Data.second; 13460 continue; 13461 } 13462 13463 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 13464 unsigned EltSize = R.getVectorElementSize(Operands[0]); 13465 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize); 13466 13467 unsigned MaxVF = 13468 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts); 13469 auto *Store = cast<StoreInst>(Operands[0]); 13470 Type *StoreTy = Store->getValueOperand()->getType(); 13471 Type *ValueTy = StoreTy; 13472 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 13473 ValueTy = Trunc->getSrcTy(); 13474 unsigned MinVF = TTI->getStoreMinimumVF( 13475 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 13476 13477 if (MaxVF <= MinVF) { 13478 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF 13479 << ") <= " 13480 << "MinVF (" << MinVF << ")\n"); 13481 } 13482 13483 // FIXME: Is division-by-2 the correct step? Should we assert that the 13484 // register size is a power-of-2? 13485 unsigned StartIdx = 0; 13486 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 13487 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 13488 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size); 13489 assert( 13490 all_of( 13491 Slice, 13492 [&](Value *V) { 13493 return cast<StoreInst>(V)->getValueOperand()->getType() == 13494 cast<StoreInst>(Slice.front()) 13495 ->getValueOperand() 13496 ->getType(); 13497 }) && 13498 "Expected all operands of same type."); 13499 if (!VectorizedStores.count(Slice.front()) && 13500 !VectorizedStores.count(Slice.back()) && 13501 TriedSequences.insert(std::make_pair(Slice.front(), Slice.back())) 13502 .second && 13503 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 13504 // Mark the vectorized stores so that we don't vectorize them again. 13505 VectorizedStores.insert(Slice.begin(), Slice.end()); 13506 Changed = true; 13507 // If we vectorized initial block, no need to try to vectorize it 13508 // again. 13509 if (Cnt == StartIdx) 13510 StartIdx += Size; 13511 Cnt += Size; 13512 continue; 13513 } 13514 ++Cnt; 13515 } 13516 // Check if the whole array was vectorized already - exit. 13517 if (StartIdx >= Operands.size()) 13518 break; 13519 } 13520 Operands.clear(); 13521 Operands.push_back(Stores[Data.first]); 13522 PrevDist = Data.second; 13523 } 13524 }; 13525 13526 // Stores pair (first: index of the store into Stores array ref, address of 13527 // which taken as base, second: sorted set of pairs {index, dist}, which are 13528 // indices of stores in the set and their store location distances relative to 13529 // the base address). 13530 13531 // Need to store the index of the very first store separately, since the set 13532 // may be reordered after the insertion and the first store may be moved. This 13533 // container allows to reduce number of calls of getPointersDiff() function. 13534 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores; 13535 // Inserts the specified store SI with the given index Idx to the set of the 13536 // stores. If the store with the same distance is found already - stop 13537 // insertion, try to vectorize already found stores. If some stores from this 13538 // sequence were not vectorized - try to vectorize them with the new store 13539 // later. But this logic is applied only to the stores, that come before the 13540 // previous store with the same distance. 13541 // Example: 13542 // 1. store x, %p 13543 // 2. store y, %p+1 13544 // 3. store z, %p+2 13545 // 4. store a, %p 13546 // 5. store b, %p+3 13547 // - Scan this from the last to first store. The very first bunch of stores is 13548 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores 13549 // vector). 13550 // - The next store in the list - #1 - has the same distance from store #5 as 13551 // the store #4. 13552 // - Try to vectorize sequence of stores 4,2,3,5. 13553 // - If all these stores are vectorized - just drop them. 13554 // - If some of them are not vectorized (say, #3 and #5), do extra analysis. 13555 // - Start new stores sequence. 13556 // The new bunch of stores is {1, {1, 0}}. 13557 // - Add the stores from previous sequence, that were not vectorized. 13558 // Here we consider the stores in the reversed order, rather they are used in 13559 // the IR (Stores are reversed already, see vectorizeStoreChains() function). 13560 // Store #3 can be added -> comes after store #4 with the same distance as 13561 // store #1. 13562 // Store #5 cannot be added - comes before store #4. 13563 // This logic allows to improve the compile time, we assume that the stores 13564 // after previous store with the same distance most likely have memory 13565 // dependencies and no need to waste compile time to try to vectorize them. 13566 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}. 13567 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) { 13568 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) { 13569 std::optional<int> Diff = getPointersDiff( 13570 Stores[Set.first]->getValueOperand()->getType(), 13571 Stores[Set.first]->getPointerOperand(), 13572 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE, 13573 /*StrictCheck=*/true); 13574 if (!Diff) 13575 continue; 13576 auto It = Set.second.find(std::make_pair(Idx, *Diff)); 13577 if (It == Set.second.end()) { 13578 Set.second.emplace(Idx, *Diff); 13579 return; 13580 } 13581 // Try to vectorize the first found set to avoid duplicate analysis. 13582 TryToVectorize(Set.second); 13583 StoreIndexToDistSet PrevSet; 13584 PrevSet.swap(Set.second); 13585 Set.first = Idx; 13586 Set.second.emplace(Idx, 0); 13587 // Insert stores that followed previous match to try to vectorize them 13588 // with this store. 13589 unsigned StartIdx = It->first + 1; 13590 SmallBitVector UsedStores(Idx - StartIdx); 13591 // Distances to previously found dup store (or this store, since they 13592 // store to the same addresses). 13593 SmallVector<int> Dists(Idx - StartIdx, 0); 13594 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) { 13595 // Do not try to vectorize sequences, we already tried. 13596 if (Pair.first <= It->first || 13597 VectorizedStores.contains(Stores[Pair.first])) 13598 break; 13599 unsigned BI = Pair.first - StartIdx; 13600 UsedStores.set(BI); 13601 Dists[BI] = Pair.second - It->second; 13602 } 13603 for (unsigned I = StartIdx; I < Idx; ++I) { 13604 unsigned BI = I - StartIdx; 13605 if (UsedStores.test(BI)) 13606 Set.second.emplace(I, Dists[BI]); 13607 } 13608 return; 13609 } 13610 auto &Res = SortedStores.emplace_back(); 13611 Res.first = Idx; 13612 Res.second.emplace(Idx, 0); 13613 }; 13614 StoreInst *PrevStore = Stores.front(); 13615 for (auto [I, SI] : enumerate(Stores)) { 13616 // Check that we do not try to vectorize stores of different types. 13617 if (PrevStore->getValueOperand()->getType() != 13618 SI->getValueOperand()->getType()) { 13619 for (auto &Set : SortedStores) 13620 TryToVectorize(Set.second); 13621 SortedStores.clear(); 13622 PrevStore = SI; 13623 } 13624 FillStoresSet(I, SI); 13625 } 13626 13627 // Final vectorization attempt. 13628 for (auto &Set : SortedStores) 13629 TryToVectorize(Set.second); 13630 13631 return Changed; 13632 } 13633 13634 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 13635 // Initialize the collections. We will make a single pass over the block. 13636 Stores.clear(); 13637 GEPs.clear(); 13638 13639 // Visit the store and getelementptr instructions in BB and organize them in 13640 // Stores and GEPs according to the underlying objects of their pointer 13641 // operands. 13642 for (Instruction &I : *BB) { 13643 // Ignore store instructions that are volatile or have a pointer operand 13644 // that doesn't point to a scalar type. 13645 if (auto *SI = dyn_cast<StoreInst>(&I)) { 13646 if (!SI->isSimple()) 13647 continue; 13648 if (!isValidElementType(SI->getValueOperand()->getType())) 13649 continue; 13650 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 13651 } 13652 13653 // Ignore getelementptr instructions that have more than one index, a 13654 // constant index, or a pointer operand that doesn't point to a scalar 13655 // type. 13656 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 13657 if (GEP->getNumIndices() != 1) 13658 continue; 13659 Value *Idx = GEP->idx_begin()->get(); 13660 if (isa<Constant>(Idx)) 13661 continue; 13662 if (!isValidElementType(Idx->getType())) 13663 continue; 13664 if (GEP->getType()->isVectorTy()) 13665 continue; 13666 GEPs[GEP->getPointerOperand()].push_back(GEP); 13667 } 13668 } 13669 } 13670 13671 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 13672 bool MaxVFOnly) { 13673 if (VL.size() < 2) 13674 return false; 13675 13676 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 13677 << VL.size() << ".\n"); 13678 13679 // Check that all of the parts are instructions of the same type, 13680 // we permit an alternate opcode via InstructionsState. 13681 InstructionsState S = getSameOpcode(VL, *TLI); 13682 if (!S.getOpcode()) 13683 return false; 13684 13685 Instruction *I0 = cast<Instruction>(S.OpValue); 13686 // Make sure invalid types (including vector type) are rejected before 13687 // determining vectorization factor for scalar instructions. 13688 for (Value *V : VL) { 13689 Type *Ty = V->getType(); 13690 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 13691 // NOTE: the following will give user internal llvm type name, which may 13692 // not be useful. 13693 R.getORE()->emit([&]() { 13694 std::string TypeStr; 13695 llvm::raw_string_ostream rso(TypeStr); 13696 Ty->print(rso); 13697 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 13698 << "Cannot SLP vectorize list: type " 13699 << rso.str() + " is unsupported by vectorizer"; 13700 }); 13701 return false; 13702 } 13703 } 13704 13705 unsigned Sz = R.getVectorElementSize(I0); 13706 unsigned MinVF = R.getMinVF(Sz); 13707 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF); 13708 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 13709 if (MaxVF < 2) { 13710 R.getORE()->emit([&]() { 13711 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 13712 << "Cannot SLP vectorize list: vectorization factor " 13713 << "less than 2 is not supported"; 13714 }); 13715 return false; 13716 } 13717 13718 bool Changed = false; 13719 bool CandidateFound = false; 13720 InstructionCost MinCost = SLPCostThreshold.getValue(); 13721 Type *ScalarTy = VL[0]->getType(); 13722 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 13723 ScalarTy = IE->getOperand(1)->getType(); 13724 13725 unsigned NextInst = 0, MaxInst = VL.size(); 13726 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 13727 // No actual vectorization should happen, if number of parts is the same as 13728 // provided vectorization factor (i.e. the scalar type is used for vector 13729 // code during codegen). 13730 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 13731 if (TTI->getNumberOfParts(VecTy) == VF) 13732 continue; 13733 for (unsigned I = NextInst; I < MaxInst; ++I) { 13734 unsigned ActualVF = std::min(MaxInst - I, VF); 13735 13736 if (!isPowerOf2_32(ActualVF)) 13737 continue; 13738 13739 if (MaxVFOnly && ActualVF < MaxVF) 13740 break; 13741 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2)) 13742 break; 13743 13744 ArrayRef<Value *> Ops = VL.slice(I, ActualVF); 13745 // Check that a previous iteration of this loop did not delete the Value. 13746 if (llvm::any_of(Ops, [&R](Value *V) { 13747 auto *I = dyn_cast<Instruction>(V); 13748 return I && R.isDeleted(I); 13749 })) 13750 continue; 13751 13752 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations " 13753 << "\n"); 13754 13755 R.buildTree(Ops); 13756 if (R.isTreeTinyAndNotFullyVectorizable()) 13757 continue; 13758 R.reorderTopToBottom(); 13759 R.reorderBottomToTop( 13760 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) && 13761 !R.doesRootHaveInTreeUses()); 13762 R.buildExternalUses(); 13763 13764 R.computeMinimumValueSizes(); 13765 InstructionCost Cost = R.getTreeCost(); 13766 CandidateFound = true; 13767 MinCost = std::min(MinCost, Cost); 13768 13769 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 13770 << " for VF=" << ActualVF << "\n"); 13771 if (Cost < -SLPCostThreshold) { 13772 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 13773 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 13774 cast<Instruction>(Ops[0])) 13775 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 13776 << " and with tree size " 13777 << ore::NV("TreeSize", R.getTreeSize())); 13778 13779 R.vectorizeTree(); 13780 // Move to the next bundle. 13781 I += VF - 1; 13782 NextInst = I + 1; 13783 Changed = true; 13784 } 13785 } 13786 } 13787 13788 if (!Changed && CandidateFound) { 13789 R.getORE()->emit([&]() { 13790 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 13791 << "List vectorization was possible but not beneficial with cost " 13792 << ore::NV("Cost", MinCost) << " >= " 13793 << ore::NV("Treshold", -SLPCostThreshold); 13794 }); 13795 } else if (!Changed) { 13796 R.getORE()->emit([&]() { 13797 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 13798 << "Cannot SLP vectorize list: vectorization was impossible" 13799 << " with available vectorization factors"; 13800 }); 13801 } 13802 return Changed; 13803 } 13804 13805 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 13806 if (!I) 13807 return false; 13808 13809 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType())) 13810 return false; 13811 13812 Value *P = I->getParent(); 13813 13814 // Vectorize in current basic block only. 13815 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 13816 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 13817 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 13818 return false; 13819 13820 // First collect all possible candidates 13821 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 13822 Candidates.emplace_back(Op0, Op1); 13823 13824 auto *A = dyn_cast<BinaryOperator>(Op0); 13825 auto *B = dyn_cast<BinaryOperator>(Op1); 13826 // Try to skip B. 13827 if (A && B && B->hasOneUse()) { 13828 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 13829 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 13830 if (B0 && B0->getParent() == P) 13831 Candidates.emplace_back(A, B0); 13832 if (B1 && B1->getParent() == P) 13833 Candidates.emplace_back(A, B1); 13834 } 13835 // Try to skip A. 13836 if (B && A && A->hasOneUse()) { 13837 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 13838 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 13839 if (A0 && A0->getParent() == P) 13840 Candidates.emplace_back(A0, B); 13841 if (A1 && A1->getParent() == P) 13842 Candidates.emplace_back(A1, B); 13843 } 13844 13845 if (Candidates.size() == 1) 13846 return tryToVectorizeList({Op0, Op1}, R); 13847 13848 // We have multiple options. Try to pick the single best. 13849 std::optional<int> BestCandidate = R.findBestRootPair(Candidates); 13850 if (!BestCandidate) 13851 return false; 13852 return tryToVectorizeList( 13853 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R); 13854 } 13855 13856 namespace { 13857 13858 /// Model horizontal reductions. 13859 /// 13860 /// A horizontal reduction is a tree of reduction instructions that has values 13861 /// that can be put into a vector as its leaves. For example: 13862 /// 13863 /// mul mul mul mul 13864 /// \ / \ / 13865 /// + + 13866 /// \ / 13867 /// + 13868 /// This tree has "mul" as its leaf values and "+" as its reduction 13869 /// instructions. A reduction can feed into a store or a binary operation 13870 /// feeding a phi. 13871 /// ... 13872 /// \ / 13873 /// + 13874 /// | 13875 /// phi += 13876 /// 13877 /// Or: 13878 /// ... 13879 /// \ / 13880 /// + 13881 /// | 13882 /// *p = 13883 /// 13884 class HorizontalReduction { 13885 using ReductionOpsType = SmallVector<Value *, 16>; 13886 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 13887 ReductionOpsListType ReductionOps; 13888 /// List of possibly reduced values. 13889 SmallVector<SmallVector<Value *>> ReducedVals; 13890 /// Maps reduced value to the corresponding reduction operation. 13891 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 13892 // Use map vector to make stable output. 13893 MapVector<Instruction *, Value *> ExtraArgs; 13894 WeakTrackingVH ReductionRoot; 13895 /// The type of reduction operation. 13896 RecurKind RdxKind; 13897 /// Checks if the optimization of original scalar identity operations on 13898 /// matched horizontal reductions is enabled and allowed. 13899 bool IsSupportedHorRdxIdentityOp = false; 13900 13901 static bool isCmpSelMinMax(Instruction *I) { 13902 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 13903 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 13904 } 13905 13906 // And/or are potentially poison-safe logical patterns like: 13907 // select x, y, false 13908 // select x, true, y 13909 static bool isBoolLogicOp(Instruction *I) { 13910 return isa<SelectInst>(I) && 13911 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr())); 13912 } 13913 13914 /// Checks if instruction is associative and can be vectorized. 13915 static bool isVectorizable(RecurKind Kind, Instruction *I) { 13916 if (Kind == RecurKind::None) 13917 return false; 13918 13919 // Integer ops that map to select instructions or intrinsics are fine. 13920 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 13921 isBoolLogicOp(I)) 13922 return true; 13923 13924 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 13925 // FP min/max are associative except for NaN and -0.0. We do not 13926 // have to rule out -0.0 here because the intrinsic semantics do not 13927 // specify a fixed result for it. 13928 return I->getFastMathFlags().noNaNs(); 13929 } 13930 13931 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) 13932 return true; 13933 13934 return I->isAssociative(); 13935 } 13936 13937 static Value *getRdxOperand(Instruction *I, unsigned Index) { 13938 // Poison-safe 'or' takes the form: select X, true, Y 13939 // To make that work with the normal operand processing, we skip the 13940 // true value operand. 13941 // TODO: Change the code and data structures to handle this without a hack. 13942 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 13943 return I->getOperand(2); 13944 return I->getOperand(Index); 13945 } 13946 13947 /// Creates reduction operation with the current opcode. 13948 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 13949 Value *RHS, const Twine &Name, bool UseSelect) { 13950 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 13951 bool IsConstant = isConstant(LHS) && isConstant(RHS); 13952 switch (Kind) { 13953 case RecurKind::Or: 13954 if (UseSelect && 13955 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13956 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 13957 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13958 Name); 13959 case RecurKind::And: 13960 if (UseSelect && 13961 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13962 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 13963 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13964 Name); 13965 case RecurKind::Add: 13966 case RecurKind::Mul: 13967 case RecurKind::Xor: 13968 case RecurKind::FAdd: 13969 case RecurKind::FMul: 13970 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13971 Name); 13972 case RecurKind::FMax: 13973 if (IsConstant) 13974 return ConstantFP::get(LHS->getType(), 13975 maxnum(cast<ConstantFP>(LHS)->getValueAPF(), 13976 cast<ConstantFP>(RHS)->getValueAPF())); 13977 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 13978 case RecurKind::FMin: 13979 if (IsConstant) 13980 return ConstantFP::get(LHS->getType(), 13981 minnum(cast<ConstantFP>(LHS)->getValueAPF(), 13982 cast<ConstantFP>(RHS)->getValueAPF())); 13983 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 13984 case RecurKind::FMaximum: 13985 if (IsConstant) 13986 return ConstantFP::get(LHS->getType(), 13987 maximum(cast<ConstantFP>(LHS)->getValueAPF(), 13988 cast<ConstantFP>(RHS)->getValueAPF())); 13989 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); 13990 case RecurKind::FMinimum: 13991 if (IsConstant) 13992 return ConstantFP::get(LHS->getType(), 13993 minimum(cast<ConstantFP>(LHS)->getValueAPF(), 13994 cast<ConstantFP>(RHS)->getValueAPF())); 13995 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); 13996 case RecurKind::SMax: 13997 if (IsConstant || UseSelect) { 13998 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 13999 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14000 } 14001 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 14002 case RecurKind::SMin: 14003 if (IsConstant || UseSelect) { 14004 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 14005 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14006 } 14007 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 14008 case RecurKind::UMax: 14009 if (IsConstant || UseSelect) { 14010 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 14011 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14012 } 14013 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 14014 case RecurKind::UMin: 14015 if (IsConstant || UseSelect) { 14016 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 14017 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14018 } 14019 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 14020 default: 14021 llvm_unreachable("Unknown reduction operation."); 14022 } 14023 } 14024 14025 /// Creates reduction operation with the current opcode with the IR flags 14026 /// from \p ReductionOps, dropping nuw/nsw flags. 14027 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 14028 Value *RHS, const Twine &Name, 14029 const ReductionOpsListType &ReductionOps) { 14030 bool UseSelect = 14031 ReductionOps.size() == 2 || 14032 // Logical or/and. 14033 (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) { 14034 return isa<SelectInst>(V); 14035 })); 14036 assert((!UseSelect || ReductionOps.size() != 2 || 14037 isa<SelectInst>(ReductionOps[1][0])) && 14038 "Expected cmp + select pairs for reduction"); 14039 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 14040 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 14041 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 14042 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr, 14043 /*IncludeWrapFlags=*/false); 14044 propagateIRFlags(Op, ReductionOps[1], nullptr, 14045 /*IncludeWrapFlags=*/false); 14046 return Op; 14047 } 14048 } 14049 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false); 14050 return Op; 14051 } 14052 14053 public: 14054 static RecurKind getRdxKind(Value *V) { 14055 auto *I = dyn_cast<Instruction>(V); 14056 if (!I) 14057 return RecurKind::None; 14058 if (match(I, m_Add(m_Value(), m_Value()))) 14059 return RecurKind::Add; 14060 if (match(I, m_Mul(m_Value(), m_Value()))) 14061 return RecurKind::Mul; 14062 if (match(I, m_And(m_Value(), m_Value())) || 14063 match(I, m_LogicalAnd(m_Value(), m_Value()))) 14064 return RecurKind::And; 14065 if (match(I, m_Or(m_Value(), m_Value())) || 14066 match(I, m_LogicalOr(m_Value(), m_Value()))) 14067 return RecurKind::Or; 14068 if (match(I, m_Xor(m_Value(), m_Value()))) 14069 return RecurKind::Xor; 14070 if (match(I, m_FAdd(m_Value(), m_Value()))) 14071 return RecurKind::FAdd; 14072 if (match(I, m_FMul(m_Value(), m_Value()))) 14073 return RecurKind::FMul; 14074 14075 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 14076 return RecurKind::FMax; 14077 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 14078 return RecurKind::FMin; 14079 14080 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 14081 return RecurKind::FMaximum; 14082 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 14083 return RecurKind::FMinimum; 14084 // This matches either cmp+select or intrinsics. SLP is expected to handle 14085 // either form. 14086 // TODO: If we are canonicalizing to intrinsics, we can remove several 14087 // special-case paths that deal with selects. 14088 if (match(I, m_SMax(m_Value(), m_Value()))) 14089 return RecurKind::SMax; 14090 if (match(I, m_SMin(m_Value(), m_Value()))) 14091 return RecurKind::SMin; 14092 if (match(I, m_UMax(m_Value(), m_Value()))) 14093 return RecurKind::UMax; 14094 if (match(I, m_UMin(m_Value(), m_Value()))) 14095 return RecurKind::UMin; 14096 14097 if (auto *Select = dyn_cast<SelectInst>(I)) { 14098 // Try harder: look for min/max pattern based on instructions producing 14099 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 14100 // During the intermediate stages of SLP, it's very common to have 14101 // pattern like this (since optimizeGatherSequence is run only once 14102 // at the end): 14103 // %1 = extractelement <2 x i32> %a, i32 0 14104 // %2 = extractelement <2 x i32> %a, i32 1 14105 // %cond = icmp sgt i32 %1, %2 14106 // %3 = extractelement <2 x i32> %a, i32 0 14107 // %4 = extractelement <2 x i32> %a, i32 1 14108 // %select = select i1 %cond, i32 %3, i32 %4 14109 CmpInst::Predicate Pred; 14110 Instruction *L1; 14111 Instruction *L2; 14112 14113 Value *LHS = Select->getTrueValue(); 14114 Value *RHS = Select->getFalseValue(); 14115 Value *Cond = Select->getCondition(); 14116 14117 // TODO: Support inverse predicates. 14118 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 14119 if (!isa<ExtractElementInst>(RHS) || 14120 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14121 return RecurKind::None; 14122 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 14123 if (!isa<ExtractElementInst>(LHS) || 14124 !L1->isIdenticalTo(cast<Instruction>(LHS))) 14125 return RecurKind::None; 14126 } else { 14127 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 14128 return RecurKind::None; 14129 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 14130 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 14131 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14132 return RecurKind::None; 14133 } 14134 14135 switch (Pred) { 14136 default: 14137 return RecurKind::None; 14138 case CmpInst::ICMP_SGT: 14139 case CmpInst::ICMP_SGE: 14140 return RecurKind::SMax; 14141 case CmpInst::ICMP_SLT: 14142 case CmpInst::ICMP_SLE: 14143 return RecurKind::SMin; 14144 case CmpInst::ICMP_UGT: 14145 case CmpInst::ICMP_UGE: 14146 return RecurKind::UMax; 14147 case CmpInst::ICMP_ULT: 14148 case CmpInst::ICMP_ULE: 14149 return RecurKind::UMin; 14150 } 14151 } 14152 return RecurKind::None; 14153 } 14154 14155 /// Get the index of the first operand. 14156 static unsigned getFirstOperandIndex(Instruction *I) { 14157 return isCmpSelMinMax(I) ? 1 : 0; 14158 } 14159 14160 private: 14161 /// Total number of operands in the reduction operation. 14162 static unsigned getNumberOfOperands(Instruction *I) { 14163 return isCmpSelMinMax(I) ? 3 : 2; 14164 } 14165 14166 /// Checks if the instruction is in basic block \p BB. 14167 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 14168 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 14169 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) { 14170 auto *Sel = cast<SelectInst>(I); 14171 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 14172 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 14173 } 14174 return I->getParent() == BB; 14175 } 14176 14177 /// Expected number of uses for reduction operations/reduced values. 14178 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 14179 if (IsCmpSelMinMax) { 14180 // SelectInst must be used twice while the condition op must have single 14181 // use only. 14182 if (auto *Sel = dyn_cast<SelectInst>(I)) 14183 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 14184 return I->hasNUses(2); 14185 } 14186 14187 // Arithmetic reduction operation must be used once only. 14188 return I->hasOneUse(); 14189 } 14190 14191 /// Initializes the list of reduction operations. 14192 void initReductionOps(Instruction *I) { 14193 if (isCmpSelMinMax(I)) 14194 ReductionOps.assign(2, ReductionOpsType()); 14195 else 14196 ReductionOps.assign(1, ReductionOpsType()); 14197 } 14198 14199 /// Add all reduction operations for the reduction instruction \p I. 14200 void addReductionOps(Instruction *I) { 14201 if (isCmpSelMinMax(I)) { 14202 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 14203 ReductionOps[1].emplace_back(I); 14204 } else { 14205 ReductionOps[0].emplace_back(I); 14206 } 14207 } 14208 14209 static bool isGoodForReduction(ArrayRef<Value *> Data) { 14210 int Sz = Data.size(); 14211 auto *I = dyn_cast<Instruction>(Data.front()); 14212 return Sz > 1 || isConstant(Data.front()) || 14213 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode())); 14214 } 14215 14216 public: 14217 HorizontalReduction() = default; 14218 14219 /// Try to find a reduction tree. 14220 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root, 14221 ScalarEvolution &SE, const DataLayout &DL, 14222 const TargetLibraryInfo &TLI) { 14223 RdxKind = HorizontalReduction::getRdxKind(Root); 14224 if (!isVectorizable(RdxKind, Root)) 14225 return false; 14226 14227 // Analyze "regular" integer/FP types for reductions - no target-specific 14228 // types or pointers. 14229 Type *Ty = Root->getType(); 14230 if (!isValidElementType(Ty) || Ty->isPointerTy()) 14231 return false; 14232 14233 // Though the ultimate reduction may have multiple uses, its condition must 14234 // have only single use. 14235 if (auto *Sel = dyn_cast<SelectInst>(Root)) 14236 if (!Sel->getCondition()->hasOneUse()) 14237 return false; 14238 14239 ReductionRoot = Root; 14240 14241 // Iterate through all the operands of the possible reduction tree and 14242 // gather all the reduced values, sorting them by their value id. 14243 BasicBlock *BB = Root->getParent(); 14244 bool IsCmpSelMinMax = isCmpSelMinMax(Root); 14245 SmallVector<Instruction *> Worklist(1, Root); 14246 // Checks if the operands of the \p TreeN instruction are also reduction 14247 // operations or should be treated as reduced values or an extra argument, 14248 // which is not part of the reduction. 14249 auto CheckOperands = [&](Instruction *TreeN, 14250 SmallVectorImpl<Value *> &ExtraArgs, 14251 SmallVectorImpl<Value *> &PossibleReducedVals, 14252 SmallVectorImpl<Instruction *> &ReductionOps) { 14253 for (int I = getFirstOperandIndex(TreeN), 14254 End = getNumberOfOperands(TreeN); 14255 I < End; ++I) { 14256 Value *EdgeVal = getRdxOperand(TreeN, I); 14257 ReducedValsToOps[EdgeVal].push_back(TreeN); 14258 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 14259 // Edge has wrong parent - mark as an extra argument. 14260 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 14261 !hasSameParent(EdgeInst, BB)) { 14262 ExtraArgs.push_back(EdgeVal); 14263 continue; 14264 } 14265 // If the edge is not an instruction, or it is different from the main 14266 // reduction opcode or has too many uses - possible reduced value. 14267 // Also, do not try to reduce const values, if the operation is not 14268 // foldable. 14269 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 14270 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 14271 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 14272 !isVectorizable(RdxKind, EdgeInst) || 14273 (R.isAnalyzedReductionRoot(EdgeInst) && 14274 all_of(EdgeInst->operands(), Constant::classof))) { 14275 PossibleReducedVals.push_back(EdgeVal); 14276 continue; 14277 } 14278 ReductionOps.push_back(EdgeInst); 14279 } 14280 }; 14281 // Try to regroup reduced values so that it gets more profitable to try to 14282 // reduce them. Values are grouped by their value ids, instructions - by 14283 // instruction op id and/or alternate op id, plus do extra analysis for 14284 // loads (grouping them by the distabce between pointers) and cmp 14285 // instructions (grouping them by the predicate). 14286 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 14287 PossibleReducedVals; 14288 initReductionOps(Root); 14289 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap; 14290 SmallSet<size_t, 2> LoadKeyUsed; 14291 SmallPtrSet<Value *, 4> DoNotReverseVals; 14292 14293 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) { 14294 Value *Ptr = getUnderlyingObject(LI->getPointerOperand()); 14295 if (LoadKeyUsed.contains(Key)) { 14296 auto LIt = LoadsMap.find(Ptr); 14297 if (LIt != LoadsMap.end()) { 14298 for (LoadInst *RLI : LIt->second) { 14299 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 14300 LI->getType(), LI->getPointerOperand(), DL, SE, 14301 /*StrictCheck=*/true)) 14302 return hash_value(RLI->getPointerOperand()); 14303 } 14304 for (LoadInst *RLI : LIt->second) { 14305 if (arePointersCompatible(RLI->getPointerOperand(), 14306 LI->getPointerOperand(), TLI)) { 14307 hash_code SubKey = hash_value(RLI->getPointerOperand()); 14308 DoNotReverseVals.insert(RLI); 14309 return SubKey; 14310 } 14311 } 14312 if (LIt->second.size() > 2) { 14313 hash_code SubKey = 14314 hash_value(LIt->second.back()->getPointerOperand()); 14315 DoNotReverseVals.insert(LIt->second.back()); 14316 return SubKey; 14317 } 14318 } 14319 } 14320 LoadKeyUsed.insert(Key); 14321 LoadsMap.try_emplace(Ptr).first->second.push_back(LI); 14322 return hash_value(LI->getPointerOperand()); 14323 }; 14324 14325 while (!Worklist.empty()) { 14326 Instruction *TreeN = Worklist.pop_back_val(); 14327 SmallVector<Value *> Args; 14328 SmallVector<Value *> PossibleRedVals; 14329 SmallVector<Instruction *> PossibleReductionOps; 14330 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 14331 // If too many extra args - mark the instruction itself as a reduction 14332 // value, not a reduction operation. 14333 if (Args.size() < 2) { 14334 addReductionOps(TreeN); 14335 // Add extra args. 14336 if (!Args.empty()) { 14337 assert(Args.size() == 1 && "Expected only single argument."); 14338 ExtraArgs[TreeN] = Args.front(); 14339 } 14340 // Add reduction values. The values are sorted for better vectorization 14341 // results. 14342 for (Value *V : PossibleRedVals) { 14343 size_t Key, Idx; 14344 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey, 14345 /*AllowAlternate=*/false); 14346 ++PossibleReducedVals[Key][Idx] 14347 .insert(std::make_pair(V, 0)) 14348 .first->second; 14349 } 14350 Worklist.append(PossibleReductionOps.rbegin(), 14351 PossibleReductionOps.rend()); 14352 } else { 14353 size_t Key, Idx; 14354 std::tie(Key, Idx) = generateKeySubkey(TreeN, &TLI, GenerateLoadsSubkey, 14355 /*AllowAlternate=*/false); 14356 ++PossibleReducedVals[Key][Idx] 14357 .insert(std::make_pair(TreeN, 0)) 14358 .first->second; 14359 } 14360 } 14361 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 14362 // Sort values by the total number of values kinds to start the reduction 14363 // from the longest possible reduced values sequences. 14364 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 14365 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 14366 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 14367 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 14368 It != E; ++It) { 14369 PossibleRedValsVect.emplace_back(); 14370 auto RedValsVect = It->second.takeVector(); 14371 stable_sort(RedValsVect, llvm::less_second()); 14372 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 14373 PossibleRedValsVect.back().append(Data.second, Data.first); 14374 } 14375 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 14376 return P1.size() > P2.size(); 14377 }); 14378 int NewIdx = -1; 14379 for (ArrayRef<Value *> Data : PossibleRedValsVect) { 14380 if (isGoodForReduction(Data) || 14381 (isa<LoadInst>(Data.front()) && NewIdx >= 0 && 14382 isa<LoadInst>(ReducedVals[NewIdx].front()) && 14383 getUnderlyingObject( 14384 cast<LoadInst>(Data.front())->getPointerOperand()) == 14385 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front()) 14386 ->getPointerOperand()))) { 14387 if (NewIdx < 0) { 14388 NewIdx = ReducedVals.size(); 14389 ReducedVals.emplace_back(); 14390 } 14391 if (DoNotReverseVals.contains(Data.front())) 14392 ReducedVals[NewIdx].append(Data.begin(), Data.end()); 14393 else 14394 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend()); 14395 } else { 14396 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend()); 14397 } 14398 } 14399 } 14400 // Sort the reduced values by number of same/alternate opcode and/or pointer 14401 // operand. 14402 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 14403 return P1.size() > P2.size(); 14404 }); 14405 return true; 14406 } 14407 14408 /// Attempt to vectorize the tree found by matchAssociativeReduction. 14409 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI, 14410 const TargetLibraryInfo &TLI) { 14411 constexpr int ReductionLimit = 4; 14412 constexpr unsigned RegMaxNumber = 4; 14413 constexpr unsigned RedValsMaxNumber = 128; 14414 // If there are a sufficient number of reduction values, reduce 14415 // to a nearby power-of-2. We can safely generate oversized 14416 // vectors and rely on the backend to split them to legal sizes. 14417 unsigned NumReducedVals = 14418 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0, 14419 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned { 14420 if (!isGoodForReduction(Vals)) 14421 return Num; 14422 return Num + Vals.size(); 14423 }); 14424 if (NumReducedVals < ReductionLimit && 14425 (!AllowHorRdxIdenityOptimization || 14426 all_of(ReducedVals, [](ArrayRef<Value *> RedV) { 14427 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV); 14428 }))) { 14429 for (ReductionOpsType &RdxOps : ReductionOps) 14430 for (Value *RdxOp : RdxOps) 14431 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 14432 return nullptr; 14433 } 14434 14435 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 14436 14437 // Track the reduced values in case if they are replaced by extractelement 14438 // because of the vectorization. 14439 DenseMap<Value *, WeakTrackingVH> TrackedVals( 14440 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size()); 14441 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 14442 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 14443 ExternallyUsedValues.reserve(ExtraArgs.size() + 1); 14444 // The same extra argument may be used several times, so log each attempt 14445 // to use it. 14446 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 14447 assert(Pair.first && "DebugLoc must be set."); 14448 ExternallyUsedValues[Pair.second].push_back(Pair.first); 14449 TrackedVals.try_emplace(Pair.second, Pair.second); 14450 } 14451 14452 // The compare instruction of a min/max is the insertion point for new 14453 // instructions and may be replaced with a new compare instruction. 14454 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 14455 assert(isa<SelectInst>(RdxRootInst) && 14456 "Expected min/max reduction to have select root instruction"); 14457 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 14458 assert(isa<Instruction>(ScalarCond) && 14459 "Expected min/max reduction to have compare condition"); 14460 return cast<Instruction>(ScalarCond); 14461 }; 14462 14463 // Return new VectorizedTree, based on previous value. 14464 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) { 14465 if (VectorizedTree) { 14466 // Update the final value in the reduction. 14467 Builder.SetCurrentDebugLocation( 14468 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 14469 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) || 14470 (isGuaranteedNotToBePoison(Res) && 14471 !isGuaranteedNotToBePoison(VectorizedTree))) { 14472 auto It = ReducedValsToOps.find(Res); 14473 if (It != ReducedValsToOps.end() && 14474 any_of(It->getSecond(), 14475 [](Instruction *I) { return isBoolLogicOp(I); })) 14476 std::swap(VectorizedTree, Res); 14477 } 14478 14479 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx", 14480 ReductionOps); 14481 } 14482 // Initialize the final value in the reduction. 14483 return Res; 14484 }; 14485 bool AnyBoolLogicOp = 14486 any_of(ReductionOps.back(), [](Value *V) { 14487 return isBoolLogicOp(cast<Instruction>(V)); 14488 }); 14489 // The reduction root is used as the insertion point for new instructions, 14490 // so set it as externally used to prevent it from being deleted. 14491 ExternallyUsedValues[ReductionRoot]; 14492 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() * 14493 ReductionOps.front().size()); 14494 for (ReductionOpsType &RdxOps : ReductionOps) 14495 for (Value *RdxOp : RdxOps) { 14496 if (!RdxOp) 14497 continue; 14498 IgnoreList.insert(RdxOp); 14499 } 14500 // Intersect the fast-math-flags from all reduction operations. 14501 FastMathFlags RdxFMF; 14502 RdxFMF.set(); 14503 for (Value *U : IgnoreList) 14504 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 14505 RdxFMF &= FPMO->getFastMathFlags(); 14506 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 14507 14508 // Need to track reduced vals, they may be changed during vectorization of 14509 // subvectors. 14510 for (ArrayRef<Value *> Candidates : ReducedVals) 14511 for (Value *V : Candidates) 14512 TrackedVals.try_emplace(V, V); 14513 14514 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size()); 14515 // List of the values that were reduced in other trees as part of gather 14516 // nodes and thus requiring extract if fully vectorized in other trees. 14517 SmallPtrSet<Value *, 4> RequiredExtract; 14518 Value *VectorizedTree = nullptr; 14519 bool CheckForReusedReductionOps = false; 14520 // Try to vectorize elements based on their type. 14521 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 14522 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 14523 InstructionsState S = getSameOpcode(OrigReducedVals, TLI); 14524 SmallVector<Value *> Candidates; 14525 Candidates.reserve(2 * OrigReducedVals.size()); 14526 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size()); 14527 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 14528 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 14529 // Check if the reduction value was not overriden by the extractelement 14530 // instruction because of the vectorization and exclude it, if it is not 14531 // compatible with other values. 14532 // Also check if the instruction was folded to constant/other value. 14533 auto *Inst = dyn_cast<Instruction>(RdxVal); 14534 if ((Inst && isVectorLikeInstWithConstOps(Inst) && 14535 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) || 14536 (S.getOpcode() && !Inst)) 14537 continue; 14538 Candidates.push_back(RdxVal); 14539 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 14540 } 14541 bool ShuffledExtracts = false; 14542 // Try to handle shuffled extractelements. 14543 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 14544 I + 1 < E) { 14545 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI); 14546 if (NextS.getOpcode() == Instruction::ExtractElement && 14547 !NextS.isAltShuffle()) { 14548 SmallVector<Value *> CommonCandidates(Candidates); 14549 for (Value *RV : ReducedVals[I + 1]) { 14550 Value *RdxVal = TrackedVals.find(RV)->second; 14551 // Check if the reduction value was not overriden by the 14552 // extractelement instruction because of the vectorization and 14553 // exclude it, if it is not compatible with other values. 14554 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 14555 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 14556 continue; 14557 CommonCandidates.push_back(RdxVal); 14558 TrackedToOrig.try_emplace(RdxVal, RV); 14559 } 14560 SmallVector<int> Mask; 14561 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 14562 ++I; 14563 Candidates.swap(CommonCandidates); 14564 ShuffledExtracts = true; 14565 } 14566 } 14567 } 14568 14569 // Emit code for constant values. 14570 if (AllowHorRdxIdenityOptimization && Candidates.size() > 1 && 14571 allConstant(Candidates)) { 14572 Value *Res = Candidates.front(); 14573 ++VectorizedVals.try_emplace(Candidates.front(), 0).first->getSecond(); 14574 for (Value *VC : ArrayRef(Candidates).drop_front()) { 14575 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps); 14576 ++VectorizedVals.try_emplace(VC, 0).first->getSecond(); 14577 if (auto *ResI = dyn_cast<Instruction>(Res)) 14578 V.analyzedReductionRoot(ResI); 14579 } 14580 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res); 14581 continue; 14582 } 14583 14584 unsigned NumReducedVals = Candidates.size(); 14585 if (NumReducedVals < ReductionLimit && 14586 (NumReducedVals < 2 || !AllowHorRdxIdenityOptimization || 14587 !isSplat(Candidates))) 14588 continue; 14589 14590 // Check if we support repeated scalar values processing (optimization of 14591 // original scalar identity operations on matched horizontal reductions). 14592 IsSupportedHorRdxIdentityOp = 14593 AllowHorRdxIdenityOptimization && RdxKind != RecurKind::Mul && 14594 RdxKind != RecurKind::FMul && RdxKind != RecurKind::FMulAdd; 14595 // Gather same values. 14596 MapVector<Value *, unsigned> SameValuesCounter; 14597 if (IsSupportedHorRdxIdentityOp) 14598 for (Value *V : Candidates) 14599 ++SameValuesCounter.insert(std::make_pair(V, 0)).first->second; 14600 // Used to check if the reduced values used same number of times. In this 14601 // case the compiler may produce better code. E.g. if reduced values are 14602 // aabbccdd (8 x values), then the first node of the tree will have a node 14603 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>. 14604 // Plus, the final reduction will be performed on <8 x aabbccdd>. 14605 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4 14606 // x abcd) * 2. 14607 // Currently it only handles add/fadd/xor. and/or/min/max do not require 14608 // this analysis, other operations may require an extra estimation of 14609 // the profitability. 14610 bool SameScaleFactor = false; 14611 bool OptReusedScalars = IsSupportedHorRdxIdentityOp && 14612 SameValuesCounter.size() != Candidates.size(); 14613 if (OptReusedScalars) { 14614 SameScaleFactor = 14615 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd || 14616 RdxKind == RecurKind::Xor) && 14617 all_of(drop_begin(SameValuesCounter), 14618 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) { 14619 return P.second == SameValuesCounter.front().second; 14620 }); 14621 Candidates.resize(SameValuesCounter.size()); 14622 transform(SameValuesCounter, Candidates.begin(), 14623 [](const auto &P) { return P.first; }); 14624 NumReducedVals = Candidates.size(); 14625 // Have a reduction of the same element. 14626 if (NumReducedVals == 1) { 14627 Value *OrigV = TrackedToOrig.find(Candidates.front())->second; 14628 unsigned Cnt = SameValuesCounter.lookup(OrigV); 14629 Value *RedVal = 14630 emitScaleForReusedOps(Candidates.front(), Builder, Cnt); 14631 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14632 VectorizedVals.try_emplace(OrigV, Cnt); 14633 continue; 14634 } 14635 } 14636 14637 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 14638 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 14639 unsigned MaxElts = 14640 RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize); 14641 14642 unsigned ReduxWidth = std::min<unsigned>( 14643 llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 14644 unsigned Start = 0; 14645 unsigned Pos = Start; 14646 // Restarts vectorization attempt with lower vector factor. 14647 unsigned PrevReduxWidth = ReduxWidth; 14648 bool CheckForReusedReductionOpsLocal = false; 14649 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 14650 &CheckForReusedReductionOpsLocal, 14651 &PrevReduxWidth, &V, 14652 &IgnoreList](bool IgnoreVL = false) { 14653 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 14654 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 14655 // Check if any of the reduction ops are gathered. If so, worth 14656 // trying again with less number of reduction ops. 14657 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 14658 } 14659 ++Pos; 14660 if (Pos < NumReducedVals - ReduxWidth + 1) 14661 return IsAnyRedOpGathered; 14662 Pos = Start; 14663 ReduxWidth /= 2; 14664 return IsAnyRedOpGathered; 14665 }; 14666 bool AnyVectorized = false; 14667 while (Pos < NumReducedVals - ReduxWidth + 1 && 14668 ReduxWidth >= ReductionLimit) { 14669 // Dependency in tree of the reduction ops - drop this attempt, try 14670 // later. 14671 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 14672 Start == 0) { 14673 CheckForReusedReductionOps = true; 14674 break; 14675 } 14676 PrevReduxWidth = ReduxWidth; 14677 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 14678 // Beeing analyzed already - skip. 14679 if (V.areAnalyzedReductionVals(VL)) { 14680 (void)AdjustReducedVals(/*IgnoreVL=*/true); 14681 continue; 14682 } 14683 // Early exit if any of the reduction values were deleted during 14684 // previous vectorization attempts. 14685 if (any_of(VL, [&V](Value *RedVal) { 14686 auto *RedValI = dyn_cast<Instruction>(RedVal); 14687 if (!RedValI) 14688 return false; 14689 return V.isDeleted(RedValI); 14690 })) 14691 break; 14692 V.buildTree(VL, IgnoreList); 14693 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 14694 if (!AdjustReducedVals()) 14695 V.analyzedReductionVals(VL); 14696 continue; 14697 } 14698 if (V.isLoadCombineReductionCandidate(RdxKind)) { 14699 if (!AdjustReducedVals()) 14700 V.analyzedReductionVals(VL); 14701 continue; 14702 } 14703 V.reorderTopToBottom(); 14704 // No need to reorder the root node at all. 14705 V.reorderBottomToTop(/*IgnoreReorder=*/true); 14706 // Keep extracted other reduction values, if they are used in the 14707 // vectorization trees. 14708 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 14709 ExternallyUsedValues); 14710 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 14711 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 14712 continue; 14713 for (Value *V : ReducedVals[Cnt]) 14714 if (isa<Instruction>(V)) 14715 LocalExternallyUsedValues[TrackedVals[V]]; 14716 } 14717 if (!IsSupportedHorRdxIdentityOp) { 14718 // Number of uses of the candidates in the vector of values. 14719 assert(SameValuesCounter.empty() && 14720 "Reused values counter map is not empty"); 14721 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14722 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14723 continue; 14724 Value *V = Candidates[Cnt]; 14725 Value *OrigV = TrackedToOrig.find(V)->second; 14726 ++SameValuesCounter[OrigV]; 14727 } 14728 } 14729 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end()); 14730 // Gather externally used values. 14731 SmallPtrSet<Value *, 4> Visited; 14732 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14733 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14734 continue; 14735 Value *RdxVal = Candidates[Cnt]; 14736 if (!Visited.insert(RdxVal).second) 14737 continue; 14738 // Check if the scalar was vectorized as part of the vectorization 14739 // tree but not the top node. 14740 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) { 14741 LocalExternallyUsedValues[RdxVal]; 14742 continue; 14743 } 14744 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14745 unsigned NumOps = 14746 VectorizedVals.lookup(RdxVal) + SameValuesCounter[OrigV]; 14747 if (NumOps != ReducedValsToOps.find(OrigV)->second.size()) 14748 LocalExternallyUsedValues[RdxVal]; 14749 } 14750 // Do not need the list of reused scalars in regular mode anymore. 14751 if (!IsSupportedHorRdxIdentityOp) 14752 SameValuesCounter.clear(); 14753 for (Value *RdxVal : VL) 14754 if (RequiredExtract.contains(RdxVal)) 14755 LocalExternallyUsedValues[RdxVal]; 14756 // Update LocalExternallyUsedValues for the scalar, replaced by 14757 // extractelement instructions. 14758 for (const std::pair<Value *, Value *> &Pair : ReplacedExternals) { 14759 auto *It = ExternallyUsedValues.find(Pair.first); 14760 if (It == ExternallyUsedValues.end()) 14761 continue; 14762 LocalExternallyUsedValues[Pair.second].append(It->second); 14763 } 14764 V.buildExternalUses(LocalExternallyUsedValues); 14765 14766 V.computeMinimumValueSizes(); 14767 14768 // Estimate cost. 14769 InstructionCost TreeCost = V.getTreeCost(VL); 14770 InstructionCost ReductionCost = 14771 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF); 14772 InstructionCost Cost = TreeCost + ReductionCost; 14773 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 14774 << " for reduction\n"); 14775 if (!Cost.isValid()) 14776 return nullptr; 14777 if (Cost >= -SLPCostThreshold) { 14778 V.getORE()->emit([&]() { 14779 return OptimizationRemarkMissed( 14780 SV_NAME, "HorSLPNotBeneficial", 14781 ReducedValsToOps.find(VL[0])->second.front()) 14782 << "Vectorizing horizontal reduction is possible " 14783 << "but not beneficial with cost " << ore::NV("Cost", Cost) 14784 << " and threshold " 14785 << ore::NV("Threshold", -SLPCostThreshold); 14786 }); 14787 if (!AdjustReducedVals()) 14788 V.analyzedReductionVals(VL); 14789 continue; 14790 } 14791 14792 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 14793 << Cost << ". (HorRdx)\n"); 14794 V.getORE()->emit([&]() { 14795 return OptimizationRemark( 14796 SV_NAME, "VectorizedHorizontalReduction", 14797 ReducedValsToOps.find(VL[0])->second.front()) 14798 << "Vectorized horizontal reduction with cost " 14799 << ore::NV("Cost", Cost) << " and with tree size " 14800 << ore::NV("TreeSize", V.getTreeSize()); 14801 }); 14802 14803 Builder.setFastMathFlags(RdxFMF); 14804 14805 // Emit a reduction. If the root is a select (min/max idiom), the insert 14806 // point is the compare condition of that select. 14807 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 14808 Instruction *InsertPt = RdxRootInst; 14809 if (IsCmpSelMinMax) 14810 InsertPt = GetCmpForMinMaxReduction(RdxRootInst); 14811 14812 // Vectorize a tree. 14813 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues, 14814 ReplacedExternals, InsertPt); 14815 14816 Builder.SetInsertPoint(InsertPt); 14817 14818 // To prevent poison from leaking across what used to be sequential, 14819 // safe, scalar boolean logic operations, the reduction operand must be 14820 // frozen. 14821 if ((isBoolLogicOp(RdxRootInst) || 14822 (AnyBoolLogicOp && VL.size() != TrackedVals.size())) && 14823 !isGuaranteedNotToBePoison(VectorizedRoot)) 14824 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 14825 14826 // Emit code to correctly handle reused reduced values, if required. 14827 if (OptReusedScalars && !SameScaleFactor) { 14828 VectorizedRoot = 14829 emitReusedOps(VectorizedRoot, Builder, V.getRootNodeScalars(), 14830 SameValuesCounter, TrackedToOrig); 14831 } 14832 14833 Value *ReducedSubTree = 14834 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 14835 if (ReducedSubTree->getType() != VL.front()->getType()) { 14836 ReducedSubTree = Builder.CreateIntCast( 14837 ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) { 14838 KnownBits Known = computeKnownBits( 14839 R, cast<Instruction>(ReductionOps.front().front()) 14840 ->getModule() 14841 ->getDataLayout()); 14842 return !Known.isNonNegative(); 14843 })); 14844 } 14845 14846 // Improved analysis for add/fadd/xor reductions with same scale factor 14847 // for all operands of reductions. We can emit scalar ops for them 14848 // instead. 14849 if (OptReusedScalars && SameScaleFactor) 14850 ReducedSubTree = emitScaleForReusedOps( 14851 ReducedSubTree, Builder, SameValuesCounter.front().second); 14852 14853 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree); 14854 // Count vectorized reduced values to exclude them from final reduction. 14855 for (Value *RdxVal : VL) { 14856 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14857 if (IsSupportedHorRdxIdentityOp) { 14858 VectorizedVals.try_emplace(OrigV, SameValuesCounter[RdxVal]); 14859 continue; 14860 } 14861 ++VectorizedVals.try_emplace(OrigV, 0).first->getSecond(); 14862 if (!V.isVectorized(RdxVal)) 14863 RequiredExtract.insert(RdxVal); 14864 } 14865 Pos += ReduxWidth; 14866 Start = Pos; 14867 ReduxWidth = llvm::bit_floor(NumReducedVals - Pos); 14868 AnyVectorized = true; 14869 } 14870 if (OptReusedScalars && !AnyVectorized) { 14871 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) { 14872 Value *RedVal = emitScaleForReusedOps(P.first, Builder, P.second); 14873 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14874 Value *OrigV = TrackedToOrig.find(P.first)->second; 14875 VectorizedVals.try_emplace(OrigV, P.second); 14876 } 14877 continue; 14878 } 14879 } 14880 if (VectorizedTree) { 14881 // Reorder operands of bool logical op in the natural order to avoid 14882 // possible problem with poison propagation. If not possible to reorder 14883 // (both operands are originally RHS), emit an extra freeze instruction 14884 // for the LHS operand. 14885 // I.e., if we have original code like this: 14886 // RedOp1 = select i1 ?, i1 LHS, i1 false 14887 // RedOp2 = select i1 RHS, i1 ?, i1 false 14888 14889 // Then, we swap LHS/RHS to create a new op that matches the poison 14890 // semantics of the original code. 14891 14892 // If we have original code like this and both values could be poison: 14893 // RedOp1 = select i1 ?, i1 LHS, i1 false 14894 // RedOp2 = select i1 ?, i1 RHS, i1 false 14895 14896 // Then, we must freeze LHS in the new op. 14897 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS, 14898 Instruction *RedOp1, 14899 Instruction *RedOp2, 14900 bool InitStep) { 14901 if (!AnyBoolLogicOp) 14902 return; 14903 if (isBoolLogicOp(RedOp1) && 14904 ((!InitStep && LHS == VectorizedTree) || 14905 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS))) 14906 return; 14907 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) || 14908 getRdxOperand(RedOp2, 0) == RHS || 14909 isGuaranteedNotToBePoison(RHS))) { 14910 std::swap(LHS, RHS); 14911 return; 14912 } 14913 if (LHS != VectorizedTree) 14914 LHS = Builder.CreateFreeze(LHS); 14915 }; 14916 // Finish the reduction. 14917 // Need to add extra arguments and not vectorized possible reduction 14918 // values. 14919 // Try to avoid dependencies between the scalar remainders after 14920 // reductions. 14921 auto FinalGen = 14922 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals, 14923 bool InitStep) { 14924 unsigned Sz = InstVals.size(); 14925 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 14926 Sz % 2); 14927 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 14928 Instruction *RedOp = InstVals[I + 1].first; 14929 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 14930 Value *RdxVal1 = InstVals[I].second; 14931 Value *StableRdxVal1 = RdxVal1; 14932 auto It1 = TrackedVals.find(RdxVal1); 14933 if (It1 != TrackedVals.end()) 14934 StableRdxVal1 = It1->second; 14935 Value *RdxVal2 = InstVals[I + 1].second; 14936 Value *StableRdxVal2 = RdxVal2; 14937 auto It2 = TrackedVals.find(RdxVal2); 14938 if (It2 != TrackedVals.end()) 14939 StableRdxVal2 = It2->second; 14940 // To prevent poison from leaking across what used to be 14941 // sequential, safe, scalar boolean logic operations, the 14942 // reduction operand must be frozen. 14943 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, 14944 RedOp, InitStep); 14945 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 14946 StableRdxVal2, "op.rdx", ReductionOps); 14947 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 14948 } 14949 if (Sz % 2 == 1) 14950 ExtraReds[Sz / 2] = InstVals.back(); 14951 return ExtraReds; 14952 }; 14953 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 14954 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot), 14955 VectorizedTree); 14956 SmallPtrSet<Value *, 8> Visited; 14957 for (ArrayRef<Value *> Candidates : ReducedVals) { 14958 for (Value *RdxVal : Candidates) { 14959 if (!Visited.insert(RdxVal).second) 14960 continue; 14961 unsigned NumOps = VectorizedVals.lookup(RdxVal); 14962 for (Instruction *RedOp : 14963 ArrayRef(ReducedValsToOps.find(RdxVal)->second) 14964 .drop_back(NumOps)) 14965 ExtraReductions.emplace_back(RedOp, RdxVal); 14966 } 14967 } 14968 for (auto &Pair : ExternallyUsedValues) { 14969 // Add each externally used value to the final reduction. 14970 for (auto *I : Pair.second) 14971 ExtraReductions.emplace_back(I, Pair.first); 14972 } 14973 // Iterate through all not-vectorized reduction values/extra arguments. 14974 bool InitStep = true; 14975 while (ExtraReductions.size() > 1) { 14976 VectorizedTree = ExtraReductions.front().second; 14977 SmallVector<std::pair<Instruction *, Value *>> NewReds = 14978 FinalGen(ExtraReductions, InitStep); 14979 ExtraReductions.swap(NewReds); 14980 InitStep = false; 14981 } 14982 VectorizedTree = ExtraReductions.front().second; 14983 14984 ReductionRoot->replaceAllUsesWith(VectorizedTree); 14985 14986 // The original scalar reduction is expected to have no remaining 14987 // uses outside the reduction tree itself. Assert that we got this 14988 // correct, replace internal uses with undef, and mark for eventual 14989 // deletion. 14990 #ifndef NDEBUG 14991 SmallSet<Value *, 4> IgnoreSet; 14992 for (ArrayRef<Value *> RdxOps : ReductionOps) 14993 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 14994 #endif 14995 for (ArrayRef<Value *> RdxOps : ReductionOps) { 14996 for (Value *Ignore : RdxOps) { 14997 if (!Ignore) 14998 continue; 14999 #ifndef NDEBUG 15000 for (auto *U : Ignore->users()) { 15001 assert(IgnoreSet.count(U) && 15002 "All users must be either in the reduction ops list."); 15003 } 15004 #endif 15005 if (!Ignore->use_empty()) { 15006 Value *Undef = UndefValue::get(Ignore->getType()); 15007 Ignore->replaceAllUsesWith(Undef); 15008 } 15009 V.eraseInstruction(cast<Instruction>(Ignore)); 15010 } 15011 } 15012 } else if (!CheckForReusedReductionOps) { 15013 for (ReductionOpsType &RdxOps : ReductionOps) 15014 for (Value *RdxOp : RdxOps) 15015 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 15016 } 15017 return VectorizedTree; 15018 } 15019 15020 private: 15021 /// Calculate the cost of a reduction. 15022 InstructionCost getReductionCost(TargetTransformInfo *TTI, 15023 ArrayRef<Value *> ReducedVals, 15024 bool IsCmpSelMinMax, unsigned ReduxWidth, 15025 FastMathFlags FMF) { 15026 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 15027 Type *ScalarTy = ReducedVals.front()->getType(); 15028 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 15029 InstructionCost VectorCost = 0, ScalarCost; 15030 // If all of the reduced values are constant, the vector cost is 0, since 15031 // the reduction value can be calculated at the compile time. 15032 bool AllConsts = allConstant(ReducedVals); 15033 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) { 15034 InstructionCost Cost = 0; 15035 // Scalar cost is repeated for N-1 elements. 15036 int Cnt = ReducedVals.size(); 15037 for (Value *RdxVal : ReducedVals) { 15038 if (Cnt == 1) 15039 break; 15040 --Cnt; 15041 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) { 15042 Cost += GenCostFn(); 15043 continue; 15044 } 15045 InstructionCost ScalarCost = 0; 15046 for (User *U : RdxVal->users()) { 15047 auto *RdxOp = cast<Instruction>(U); 15048 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) { 15049 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind); 15050 continue; 15051 } 15052 ScalarCost = InstructionCost::getInvalid(); 15053 break; 15054 } 15055 if (ScalarCost.isValid()) 15056 Cost += ScalarCost; 15057 else 15058 Cost += GenCostFn(); 15059 } 15060 return Cost; 15061 }; 15062 switch (RdxKind) { 15063 case RecurKind::Add: 15064 case RecurKind::Mul: 15065 case RecurKind::Or: 15066 case RecurKind::And: 15067 case RecurKind::Xor: 15068 case RecurKind::FAdd: 15069 case RecurKind::FMul: { 15070 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 15071 if (!AllConsts) 15072 VectorCost = 15073 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 15074 ScalarCost = EvaluateScalarCost([&]() { 15075 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 15076 }); 15077 break; 15078 } 15079 case RecurKind::FMax: 15080 case RecurKind::FMin: 15081 case RecurKind::FMaximum: 15082 case RecurKind::FMinimum: 15083 case RecurKind::SMax: 15084 case RecurKind::SMin: 15085 case RecurKind::UMax: 15086 case RecurKind::UMin: { 15087 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind); 15088 if (!AllConsts) 15089 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind); 15090 ScalarCost = EvaluateScalarCost([&]() { 15091 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF); 15092 return TTI->getIntrinsicInstrCost(ICA, CostKind); 15093 }); 15094 break; 15095 } 15096 default: 15097 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 15098 } 15099 15100 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 15101 << " for reduction of " << shortBundleName(ReducedVals) 15102 << " (It is a splitting reduction)\n"); 15103 return VectorCost - ScalarCost; 15104 } 15105 15106 /// Emit a horizontal reduction of the vectorized value. 15107 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 15108 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 15109 assert(VectorizedValue && "Need to have a vectorized tree node"); 15110 assert(isPowerOf2_32(ReduxWidth) && 15111 "We only handle power-of-two reductions for now"); 15112 assert(RdxKind != RecurKind::FMulAdd && 15113 "A call to the llvm.fmuladd intrinsic is not handled yet"); 15114 15115 ++NumVectorInstructions; 15116 return createSimpleTargetReduction(Builder, VectorizedValue, RdxKind); 15117 } 15118 15119 /// Emits optimized code for unique scalar value reused \p Cnt times. 15120 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15121 unsigned Cnt) { 15122 assert(IsSupportedHorRdxIdentityOp && 15123 "The optimization of matched scalar identity horizontal reductions " 15124 "must be supported."); 15125 switch (RdxKind) { 15126 case RecurKind::Add: { 15127 // res = mul vv, n 15128 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt); 15129 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of " 15130 << VectorizedValue << ". (HorRdx)\n"); 15131 return Builder.CreateMul(VectorizedValue, Scale); 15132 } 15133 case RecurKind::Xor: { 15134 // res = n % 2 ? 0 : vv 15135 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue 15136 << ". (HorRdx)\n"); 15137 if (Cnt % 2 == 0) 15138 return Constant::getNullValue(VectorizedValue->getType()); 15139 return VectorizedValue; 15140 } 15141 case RecurKind::FAdd: { 15142 // res = fmul v, n 15143 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt); 15144 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of " 15145 << VectorizedValue << ". (HorRdx)\n"); 15146 return Builder.CreateFMul(VectorizedValue, Scale); 15147 } 15148 case RecurKind::And: 15149 case RecurKind::Or: 15150 case RecurKind::SMax: 15151 case RecurKind::SMin: 15152 case RecurKind::UMax: 15153 case RecurKind::UMin: 15154 case RecurKind::FMax: 15155 case RecurKind::FMin: 15156 case RecurKind::FMaximum: 15157 case RecurKind::FMinimum: 15158 // res = vv 15159 return VectorizedValue; 15160 case RecurKind::Mul: 15161 case RecurKind::FMul: 15162 case RecurKind::FMulAdd: 15163 case RecurKind::IAnyOf: 15164 case RecurKind::FAnyOf: 15165 case RecurKind::None: 15166 llvm_unreachable("Unexpected reduction kind for repeated scalar."); 15167 } 15168 return nullptr; 15169 } 15170 15171 /// Emits actual operation for the scalar identity values, found during 15172 /// horizontal reduction analysis. 15173 Value *emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15174 ArrayRef<Value *> VL, 15175 const MapVector<Value *, unsigned> &SameValuesCounter, 15176 const DenseMap<Value *, Value *> &TrackedToOrig) { 15177 assert(IsSupportedHorRdxIdentityOp && 15178 "The optimization of matched scalar identity horizontal reductions " 15179 "must be supported."); 15180 switch (RdxKind) { 15181 case RecurKind::Add: { 15182 // root = mul prev_root, <1, 1, n, 1> 15183 SmallVector<Constant *> Vals; 15184 for (Value *V : VL) { 15185 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15186 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false)); 15187 } 15188 auto *Scale = ConstantVector::get(Vals); 15189 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of " 15190 << VectorizedValue << ". (HorRdx)\n"); 15191 return Builder.CreateMul(VectorizedValue, Scale); 15192 } 15193 case RecurKind::And: 15194 case RecurKind::Or: 15195 // No need for multiple or/and(s). 15196 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue 15197 << ". (HorRdx)\n"); 15198 return VectorizedValue; 15199 case RecurKind::SMax: 15200 case RecurKind::SMin: 15201 case RecurKind::UMax: 15202 case RecurKind::UMin: 15203 case RecurKind::FMax: 15204 case RecurKind::FMin: 15205 case RecurKind::FMaximum: 15206 case RecurKind::FMinimum: 15207 // No need for multiple min/max(s) of the same value. 15208 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue 15209 << ". (HorRdx)\n"); 15210 return VectorizedValue; 15211 case RecurKind::Xor: { 15212 // Replace values with even number of repeats with 0, since 15213 // x xor x = 0. 15214 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6, 15215 // 7>, if elements 4th and 6th elements have even number of repeats. 15216 SmallVector<int> Mask( 15217 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(), 15218 PoisonMaskElem); 15219 std::iota(Mask.begin(), Mask.end(), 0); 15220 bool NeedShuffle = false; 15221 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) { 15222 Value *V = VL[I]; 15223 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15224 if (Cnt % 2 == 0) { 15225 Mask[I] = VF; 15226 NeedShuffle = true; 15227 } 15228 } 15229 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I 15230 : Mask) dbgs() 15231 << I << " "; 15232 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n"); 15233 if (NeedShuffle) 15234 VectorizedValue = Builder.CreateShuffleVector( 15235 VectorizedValue, 15236 ConstantVector::getNullValue(VectorizedValue->getType()), Mask); 15237 return VectorizedValue; 15238 } 15239 case RecurKind::FAdd: { 15240 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0> 15241 SmallVector<Constant *> Vals; 15242 for (Value *V : VL) { 15243 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15244 Vals.push_back(ConstantFP::get(V->getType(), Cnt)); 15245 } 15246 auto *Scale = ConstantVector::get(Vals); 15247 return Builder.CreateFMul(VectorizedValue, Scale); 15248 } 15249 case RecurKind::Mul: 15250 case RecurKind::FMul: 15251 case RecurKind::FMulAdd: 15252 case RecurKind::IAnyOf: 15253 case RecurKind::FAnyOf: 15254 case RecurKind::None: 15255 llvm_unreachable("Unexpected reduction kind for reused scalars."); 15256 } 15257 return nullptr; 15258 } 15259 }; 15260 } // end anonymous namespace 15261 15262 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) { 15263 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 15264 return cast<FixedVectorType>(IE->getType())->getNumElements(); 15265 15266 unsigned AggregateSize = 1; 15267 auto *IV = cast<InsertValueInst>(InsertInst); 15268 Type *CurrentType = IV->getType(); 15269 do { 15270 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 15271 for (auto *Elt : ST->elements()) 15272 if (Elt != ST->getElementType(0)) // check homogeneity 15273 return std::nullopt; 15274 AggregateSize *= ST->getNumElements(); 15275 CurrentType = ST->getElementType(0); 15276 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 15277 AggregateSize *= AT->getNumElements(); 15278 CurrentType = AT->getElementType(); 15279 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 15280 AggregateSize *= VT->getNumElements(); 15281 return AggregateSize; 15282 } else if (CurrentType->isSingleValueType()) { 15283 return AggregateSize; 15284 } else { 15285 return std::nullopt; 15286 } 15287 } while (true); 15288 } 15289 15290 static void findBuildAggregate_rec(Instruction *LastInsertInst, 15291 TargetTransformInfo *TTI, 15292 SmallVectorImpl<Value *> &BuildVectorOpds, 15293 SmallVectorImpl<Value *> &InsertElts, 15294 unsigned OperandOffset) { 15295 do { 15296 Value *InsertedOperand = LastInsertInst->getOperand(1); 15297 std::optional<unsigned> OperandIndex = 15298 getInsertIndex(LastInsertInst, OperandOffset); 15299 if (!OperandIndex) 15300 return; 15301 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) { 15302 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 15303 BuildVectorOpds, InsertElts, *OperandIndex); 15304 15305 } else { 15306 BuildVectorOpds[*OperandIndex] = InsertedOperand; 15307 InsertElts[*OperandIndex] = LastInsertInst; 15308 } 15309 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 15310 } while (LastInsertInst != nullptr && 15311 isa<InsertValueInst, InsertElementInst>(LastInsertInst) && 15312 LastInsertInst->hasOneUse()); 15313 } 15314 15315 /// Recognize construction of vectors like 15316 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 15317 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 15318 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 15319 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 15320 /// starting from the last insertelement or insertvalue instruction. 15321 /// 15322 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 15323 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 15324 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 15325 /// 15326 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 15327 /// 15328 /// \return true if it matches. 15329 static bool findBuildAggregate(Instruction *LastInsertInst, 15330 TargetTransformInfo *TTI, 15331 SmallVectorImpl<Value *> &BuildVectorOpds, 15332 SmallVectorImpl<Value *> &InsertElts) { 15333 15334 assert((isa<InsertElementInst>(LastInsertInst) || 15335 isa<InsertValueInst>(LastInsertInst)) && 15336 "Expected insertelement or insertvalue instruction!"); 15337 15338 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 15339 "Expected empty result vectors!"); 15340 15341 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 15342 if (!AggregateSize) 15343 return false; 15344 BuildVectorOpds.resize(*AggregateSize); 15345 InsertElts.resize(*AggregateSize); 15346 15347 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 15348 llvm::erase(BuildVectorOpds, nullptr); 15349 llvm::erase(InsertElts, nullptr); 15350 if (BuildVectorOpds.size() >= 2) 15351 return true; 15352 15353 return false; 15354 } 15355 15356 /// Try and get a reduction instruction from a phi node. 15357 /// 15358 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 15359 /// if they come from either \p ParentBB or a containing loop latch. 15360 /// 15361 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 15362 /// if not possible. 15363 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P, 15364 BasicBlock *ParentBB, LoopInfo *LI) { 15365 // There are situations where the reduction value is not dominated by the 15366 // reduction phi. Vectorizing such cases has been reported to cause 15367 // miscompiles. See PR25787. 15368 auto DominatedReduxValue = [&](Value *R) { 15369 return isa<Instruction>(R) && 15370 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 15371 }; 15372 15373 Instruction *Rdx = nullptr; 15374 15375 // Return the incoming value if it comes from the same BB as the phi node. 15376 if (P->getIncomingBlock(0) == ParentBB) { 15377 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15378 } else if (P->getIncomingBlock(1) == ParentBB) { 15379 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15380 } 15381 15382 if (Rdx && DominatedReduxValue(Rdx)) 15383 return Rdx; 15384 15385 // Otherwise, check whether we have a loop latch to look at. 15386 Loop *BBL = LI->getLoopFor(ParentBB); 15387 if (!BBL) 15388 return nullptr; 15389 BasicBlock *BBLatch = BBL->getLoopLatch(); 15390 if (!BBLatch) 15391 return nullptr; 15392 15393 // There is a loop latch, return the incoming value if it comes from 15394 // that. This reduction pattern occasionally turns up. 15395 if (P->getIncomingBlock(0) == BBLatch) { 15396 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15397 } else if (P->getIncomingBlock(1) == BBLatch) { 15398 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15399 } 15400 15401 if (Rdx && DominatedReduxValue(Rdx)) 15402 return Rdx; 15403 15404 return nullptr; 15405 } 15406 15407 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 15408 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 15409 return true; 15410 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 15411 return true; 15412 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 15413 return true; 15414 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1)))) 15415 return true; 15416 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1)))) 15417 return true; 15418 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 15419 return true; 15420 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 15421 return true; 15422 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 15423 return true; 15424 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 15425 return true; 15426 return false; 15427 } 15428 15429 /// We could have an initial reduction that is not an add. 15430 /// r *= v1 + v2 + v3 + v4 15431 /// In such a case start looking for a tree rooted in the first '+'. 15432 /// \Returns the new root if found, which may be nullptr if not an instruction. 15433 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi, 15434 Instruction *Root) { 15435 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) || 15436 isa<IntrinsicInst>(Root)) && 15437 "Expected binop, select, or intrinsic for reduction matching"); 15438 Value *LHS = 15439 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root)); 15440 Value *RHS = 15441 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1); 15442 if (LHS == Phi) 15443 return dyn_cast<Instruction>(RHS); 15444 if (RHS == Phi) 15445 return dyn_cast<Instruction>(LHS); 15446 return nullptr; 15447 } 15448 15449 /// \p Returns the first operand of \p I that does not match \p Phi. If 15450 /// operand is not an instruction it returns nullptr. 15451 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) { 15452 Value *Op0 = nullptr; 15453 Value *Op1 = nullptr; 15454 if (!matchRdxBop(I, Op0, Op1)) 15455 return nullptr; 15456 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0); 15457 } 15458 15459 /// \Returns true if \p I is a candidate instruction for reduction vectorization. 15460 static bool isReductionCandidate(Instruction *I) { 15461 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value())); 15462 Value *B0 = nullptr, *B1 = nullptr; 15463 bool IsBinop = matchRdxBop(I, B0, B1); 15464 return IsBinop || IsSelect; 15465 } 15466 15467 bool SLPVectorizerPass::vectorizeHorReduction( 15468 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI, 15469 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) { 15470 if (!ShouldVectorizeHor) 15471 return false; 15472 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root); 15473 15474 if (Root->getParent() != BB || isa<PHINode>(Root)) 15475 return false; 15476 15477 // If we can find a secondary reduction root, use that instead. 15478 auto SelectRoot = [&]() { 15479 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) && 15480 HorizontalReduction::getRdxKind(Root) != RecurKind::None) 15481 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root)) 15482 return NewRoot; 15483 return Root; 15484 }; 15485 15486 // Start analysis starting from Root instruction. If horizontal reduction is 15487 // found, try to vectorize it. If it is not a horizontal reduction or 15488 // vectorization is not possible or not effective, and currently analyzed 15489 // instruction is a binary operation, try to vectorize the operands, using 15490 // pre-order DFS traversal order. If the operands were not vectorized, repeat 15491 // the same procedure considering each operand as a possible root of the 15492 // horizontal reduction. 15493 // Interrupt the process if the Root instruction itself was vectorized or all 15494 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 15495 // If a horizintal reduction was not matched or vectorized we collect 15496 // instructions for possible later attempts for vectorization. 15497 std::queue<std::pair<Instruction *, unsigned>> Stack; 15498 Stack.emplace(SelectRoot(), 0); 15499 SmallPtrSet<Value *, 8> VisitedInstrs; 15500 bool Res = false; 15501 auto &&TryToReduce = [this, TTI, &R](Instruction *Inst) -> Value * { 15502 if (R.isAnalyzedReductionRoot(Inst)) 15503 return nullptr; 15504 if (!isReductionCandidate(Inst)) 15505 return nullptr; 15506 HorizontalReduction HorRdx; 15507 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI)) 15508 return nullptr; 15509 return HorRdx.tryToReduce(R, TTI, *TLI); 15510 }; 15511 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) { 15512 if (TryOperandsAsNewSeeds && FutureSeed == Root) { 15513 FutureSeed = getNonPhiOperand(Root, P); 15514 if (!FutureSeed) 15515 return false; 15516 } 15517 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their 15518 // analysis is done separately. 15519 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed)) 15520 PostponedInsts.push_back(FutureSeed); 15521 return true; 15522 }; 15523 15524 while (!Stack.empty()) { 15525 Instruction *Inst; 15526 unsigned Level; 15527 std::tie(Inst, Level) = Stack.front(); 15528 Stack.pop(); 15529 // Do not try to analyze instruction that has already been vectorized. 15530 // This may happen when we vectorize instruction operands on a previous 15531 // iteration while stack was populated before that happened. 15532 if (R.isDeleted(Inst)) 15533 continue; 15534 if (Value *VectorizedV = TryToReduce(Inst)) { 15535 Res = true; 15536 if (auto *I = dyn_cast<Instruction>(VectorizedV)) { 15537 // Try to find another reduction. 15538 Stack.emplace(I, Level); 15539 continue; 15540 } 15541 } else { 15542 // We could not vectorize `Inst` so try to use it as a future seed. 15543 if (!TryAppendToPostponedInsts(Inst)) { 15544 assert(Stack.empty() && "Expected empty stack"); 15545 break; 15546 } 15547 } 15548 15549 // Try to vectorize operands. 15550 // Continue analysis for the instruction from the same basic block only to 15551 // save compile time. 15552 if (++Level < RecursionMaxDepth) 15553 for (auto *Op : Inst->operand_values()) 15554 if (VisitedInstrs.insert(Op).second) 15555 if (auto *I = dyn_cast<Instruction>(Op)) 15556 // Do not try to vectorize CmpInst operands, this is done 15557 // separately. 15558 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 15559 !R.isDeleted(I) && I->getParent() == BB) 15560 Stack.emplace(I, Level); 15561 } 15562 return Res; 15563 } 15564 15565 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root, 15566 BasicBlock *BB, BoUpSLP &R, 15567 TargetTransformInfo *TTI) { 15568 SmallVector<WeakTrackingVH> PostponedInsts; 15569 bool Res = vectorizeHorReduction(P, Root, BB, R, TTI, PostponedInsts); 15570 Res |= tryToVectorize(PostponedInsts, R); 15571 return Res; 15572 } 15573 15574 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts, 15575 BoUpSLP &R) { 15576 bool Res = false; 15577 for (Value *V : Insts) 15578 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst)) 15579 Res |= tryToVectorize(Inst, R); 15580 return Res; 15581 } 15582 15583 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 15584 BasicBlock *BB, BoUpSLP &R) { 15585 if (!R.canMapToVector(IVI->getType())) 15586 return false; 15587 15588 SmallVector<Value *, 16> BuildVectorOpds; 15589 SmallVector<Value *, 16> BuildVectorInsts; 15590 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 15591 return false; 15592 15593 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 15594 // Aggregate value is unlikely to be processed in vector register. 15595 return tryToVectorizeList(BuildVectorOpds, R); 15596 } 15597 15598 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 15599 BasicBlock *BB, BoUpSLP &R) { 15600 SmallVector<Value *, 16> BuildVectorInsts; 15601 SmallVector<Value *, 16> BuildVectorOpds; 15602 SmallVector<int> Mask; 15603 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 15604 (llvm::all_of( 15605 BuildVectorOpds, 15606 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 15607 isFixedVectorShuffle(BuildVectorOpds, Mask))) 15608 return false; 15609 15610 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 15611 return tryToVectorizeList(BuildVectorInsts, R); 15612 } 15613 15614 template <typename T> 15615 static bool tryToVectorizeSequence( 15616 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator, 15617 function_ref<bool(T *, T *)> AreCompatible, 15618 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 15619 bool MaxVFOnly, BoUpSLP &R) { 15620 bool Changed = false; 15621 // Sort by type, parent, operands. 15622 stable_sort(Incoming, Comparator); 15623 15624 // Try to vectorize elements base on their type. 15625 SmallVector<T *> Candidates; 15626 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 15627 // Look for the next elements with the same type, parent and operand 15628 // kinds. 15629 auto *SameTypeIt = IncIt; 15630 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 15631 ++SameTypeIt; 15632 15633 // Try to vectorize them. 15634 unsigned NumElts = (SameTypeIt - IncIt); 15635 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 15636 << NumElts << ")\n"); 15637 // The vectorization is a 3-state attempt: 15638 // 1. Try to vectorize instructions with the same/alternate opcodes with the 15639 // size of maximal register at first. 15640 // 2. Try to vectorize remaining instructions with the same type, if 15641 // possible. This may result in the better vectorization results rather than 15642 // if we try just to vectorize instructions with the same/alternate opcodes. 15643 // 3. Final attempt to try to vectorize all instructions with the 15644 // same/alternate ops only, this may result in some extra final 15645 // vectorization. 15646 if (NumElts > 1 && 15647 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), MaxVFOnly)) { 15648 // Success start over because instructions might have been changed. 15649 Changed = true; 15650 } else { 15651 /// \Returns the minimum number of elements that we will attempt to 15652 /// vectorize. 15653 auto GetMinNumElements = [&R](Value *V) { 15654 unsigned EltSize = R.getVectorElementSize(V); 15655 return std::max(2U, R.getMaxVecRegSize() / EltSize); 15656 }; 15657 if (NumElts < GetMinNumElements(*IncIt) && 15658 (Candidates.empty() || 15659 Candidates.front()->getType() == (*IncIt)->getType())) { 15660 Candidates.append(IncIt, std::next(IncIt, NumElts)); 15661 } 15662 } 15663 // Final attempt to vectorize instructions with the same types. 15664 if (Candidates.size() > 1 && 15665 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 15666 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) { 15667 // Success start over because instructions might have been changed. 15668 Changed = true; 15669 } else if (MaxVFOnly) { 15670 // Try to vectorize using small vectors. 15671 for (auto *It = Candidates.begin(), *End = Candidates.end(); 15672 It != End;) { 15673 auto *SameTypeIt = It; 15674 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 15675 ++SameTypeIt; 15676 unsigned NumElts = (SameTypeIt - It); 15677 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(It, NumElts), 15678 /*MaxVFOnly=*/false)) 15679 Changed = true; 15680 It = SameTypeIt; 15681 } 15682 } 15683 Candidates.clear(); 15684 } 15685 15686 // Start over at the next instruction of a different type (or the end). 15687 IncIt = SameTypeIt; 15688 } 15689 return Changed; 15690 } 15691 15692 /// Compare two cmp instructions. If IsCompatibility is true, function returns 15693 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 15694 /// operands. If IsCompatibility is false, function implements strict weak 15695 /// ordering relation between two cmp instructions, returning true if the first 15696 /// instruction is "less" than the second, i.e. its predicate is less than the 15697 /// predicate of the second or the operands IDs are less than the operands IDs 15698 /// of the second cmp instruction. 15699 template <bool IsCompatibility> 15700 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI, 15701 const DominatorTree &DT) { 15702 assert(isValidElementType(V->getType()) && 15703 isValidElementType(V2->getType()) && 15704 "Expected valid element types only."); 15705 if (V == V2) 15706 return IsCompatibility; 15707 auto *CI1 = cast<CmpInst>(V); 15708 auto *CI2 = cast<CmpInst>(V2); 15709 if (CI1->getOperand(0)->getType()->getTypeID() < 15710 CI2->getOperand(0)->getType()->getTypeID()) 15711 return !IsCompatibility; 15712 if (CI1->getOperand(0)->getType()->getTypeID() > 15713 CI2->getOperand(0)->getType()->getTypeID()) 15714 return false; 15715 CmpInst::Predicate Pred1 = CI1->getPredicate(); 15716 CmpInst::Predicate Pred2 = CI2->getPredicate(); 15717 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 15718 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 15719 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 15720 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 15721 if (BasePred1 < BasePred2) 15722 return !IsCompatibility; 15723 if (BasePred1 > BasePred2) 15724 return false; 15725 // Compare operands. 15726 bool CI1Preds = Pred1 == BasePred1; 15727 bool CI2Preds = Pred2 == BasePred1; 15728 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 15729 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1); 15730 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1); 15731 if (Op1 == Op2) 15732 continue; 15733 if (Op1->getValueID() < Op2->getValueID()) 15734 return !IsCompatibility; 15735 if (Op1->getValueID() > Op2->getValueID()) 15736 return false; 15737 if (auto *I1 = dyn_cast<Instruction>(Op1)) 15738 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 15739 if (IsCompatibility) { 15740 if (I1->getParent() != I2->getParent()) 15741 return false; 15742 } else { 15743 // Try to compare nodes with same parent. 15744 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent()); 15745 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent()); 15746 if (!NodeI1) 15747 return NodeI2 != nullptr; 15748 if (!NodeI2) 15749 return false; 15750 assert((NodeI1 == NodeI2) == 15751 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15752 "Different nodes should have different DFS numbers"); 15753 if (NodeI1 != NodeI2) 15754 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15755 } 15756 InstructionsState S = getSameOpcode({I1, I2}, TLI); 15757 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle())) 15758 continue; 15759 if (IsCompatibility) 15760 return false; 15761 if (I1->getOpcode() != I2->getOpcode()) 15762 return I1->getOpcode() < I2->getOpcode(); 15763 } 15764 } 15765 return IsCompatibility; 15766 } 15767 15768 template <typename ItT> 15769 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts, 15770 BasicBlock *BB, BoUpSLP &R) { 15771 bool Changed = false; 15772 // Try to find reductions first. 15773 for (CmpInst *I : CmpInsts) { 15774 if (R.isDeleted(I)) 15775 continue; 15776 for (Value *Op : I->operands()) 15777 if (auto *RootOp = dyn_cast<Instruction>(Op)) 15778 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R, TTI); 15779 } 15780 // Try to vectorize operands as vector bundles. 15781 for (CmpInst *I : CmpInsts) { 15782 if (R.isDeleted(I)) 15783 continue; 15784 Changed |= tryToVectorize(I, R); 15785 } 15786 // Try to vectorize list of compares. 15787 // Sort by type, compare predicate, etc. 15788 auto CompareSorter = [&](Value *V, Value *V2) { 15789 if (V == V2) 15790 return false; 15791 return compareCmp<false>(V, V2, *TLI, *DT); 15792 }; 15793 15794 auto AreCompatibleCompares = [&](Value *V1, Value *V2) { 15795 if (V1 == V2) 15796 return true; 15797 return compareCmp<true>(V1, V2, *TLI, *DT); 15798 }; 15799 15800 SmallVector<Value *> Vals; 15801 for (Instruction *V : CmpInsts) 15802 if (!R.isDeleted(V) && isValidElementType(V->getType())) 15803 Vals.push_back(V); 15804 if (Vals.size() <= 1) 15805 return Changed; 15806 Changed |= tryToVectorizeSequence<Value>( 15807 Vals, CompareSorter, AreCompatibleCompares, 15808 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 15809 // Exclude possible reductions from other blocks. 15810 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) { 15811 return any_of(V->users(), [V](User *U) { 15812 auto *Select = dyn_cast<SelectInst>(U); 15813 return Select && 15814 Select->getParent() != cast<Instruction>(V)->getParent(); 15815 }); 15816 }); 15817 if (ArePossiblyReducedInOtherBlock) 15818 return false; 15819 return tryToVectorizeList(Candidates, R, MaxVFOnly); 15820 }, 15821 /*MaxVFOnly=*/true, R); 15822 return Changed; 15823 } 15824 15825 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions, 15826 BasicBlock *BB, BoUpSLP &R) { 15827 assert(all_of(Instructions, 15828 [](auto *I) { 15829 return isa<InsertElementInst, InsertValueInst>(I); 15830 }) && 15831 "This function only accepts Insert instructions"); 15832 bool OpsChanged = false; 15833 SmallVector<WeakTrackingVH> PostponedInsts; 15834 // pass1 - try to vectorize reductions only 15835 for (auto *I : reverse(Instructions)) { 15836 if (R.isDeleted(I)) 15837 continue; 15838 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts); 15839 } 15840 // pass2 - try to match and vectorize a buildvector sequence. 15841 for (auto *I : reverse(Instructions)) { 15842 if (R.isDeleted(I) || isa<CmpInst>(I)) 15843 continue; 15844 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 15845 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 15846 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 15847 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 15848 } 15849 } 15850 // Now try to vectorize postponed instructions. 15851 OpsChanged |= tryToVectorize(PostponedInsts, R); 15852 15853 Instructions.clear(); 15854 return OpsChanged; 15855 } 15856 15857 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 15858 bool Changed = false; 15859 SmallVector<Value *, 4> Incoming; 15860 SmallPtrSet<Value *, 16> VisitedInstrs; 15861 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 15862 // node. Allows better to identify the chains that can be vectorized in the 15863 // better way. 15864 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 15865 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 15866 assert(isValidElementType(V1->getType()) && 15867 isValidElementType(V2->getType()) && 15868 "Expected vectorizable types only."); 15869 // It is fine to compare type IDs here, since we expect only vectorizable 15870 // types, like ints, floats and pointers, we don't care about other type. 15871 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 15872 return true; 15873 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 15874 return false; 15875 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15876 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15877 if (Opcodes1.size() < Opcodes2.size()) 15878 return true; 15879 if (Opcodes1.size() > Opcodes2.size()) 15880 return false; 15881 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15882 // Undefs are compatible with any other value. 15883 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 15884 if (isa<Instruction>(Opcodes1[I])) 15885 return true; 15886 if (isa<Instruction>(Opcodes2[I])) 15887 return false; 15888 if (isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I])) 15889 return true; 15890 if (isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I])) 15891 return false; 15892 if (isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I])) 15893 continue; 15894 return isa<UndefValue>(Opcodes2[I]); 15895 } 15896 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15897 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15898 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 15899 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 15900 if (!NodeI1) 15901 return NodeI2 != nullptr; 15902 if (!NodeI2) 15903 return false; 15904 assert((NodeI1 == NodeI2) == 15905 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15906 "Different nodes should have different DFS numbers"); 15907 if (NodeI1 != NodeI2) 15908 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15909 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15910 if (S.getOpcode() && !S.isAltShuffle()) 15911 continue; 15912 return I1->getOpcode() < I2->getOpcode(); 15913 } 15914 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15915 return Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 15916 if (isa<Instruction>(Opcodes1[I])) 15917 return true; 15918 if (isa<Instruction>(Opcodes2[I])) 15919 return false; 15920 if (isa<Constant>(Opcodes1[I])) 15921 return true; 15922 if (isa<Constant>(Opcodes2[I])) 15923 return false; 15924 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 15925 return true; 15926 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 15927 return false; 15928 } 15929 return false; 15930 }; 15931 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) { 15932 if (V1 == V2) 15933 return true; 15934 if (V1->getType() != V2->getType()) 15935 return false; 15936 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15937 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15938 if (Opcodes1.size() != Opcodes2.size()) 15939 return false; 15940 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15941 // Undefs are compatible with any other value. 15942 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 15943 continue; 15944 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15945 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15946 if (I1->getParent() != I2->getParent()) 15947 return false; 15948 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15949 if (S.getOpcode()) 15950 continue; 15951 return false; 15952 } 15953 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15954 continue; 15955 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 15956 return false; 15957 } 15958 return true; 15959 }; 15960 15961 bool HaveVectorizedPhiNodes = false; 15962 do { 15963 // Collect the incoming values from the PHIs. 15964 Incoming.clear(); 15965 for (Instruction &I : *BB) { 15966 PHINode *P = dyn_cast<PHINode>(&I); 15967 if (!P) 15968 break; 15969 15970 // No need to analyze deleted, vectorized and non-vectorizable 15971 // instructions. 15972 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 15973 isValidElementType(P->getType())) 15974 Incoming.push_back(P); 15975 } 15976 15977 if (Incoming.size() <= 1) 15978 break; 15979 15980 // Find the corresponding non-phi nodes for better matching when trying to 15981 // build the tree. 15982 for (Value *V : Incoming) { 15983 SmallVectorImpl<Value *> &Opcodes = 15984 PHIToOpcodes.try_emplace(V).first->getSecond(); 15985 if (!Opcodes.empty()) 15986 continue; 15987 SmallVector<Value *, 4> Nodes(1, V); 15988 SmallPtrSet<Value *, 4> Visited; 15989 while (!Nodes.empty()) { 15990 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 15991 if (!Visited.insert(PHI).second) 15992 continue; 15993 for (Value *V : PHI->incoming_values()) { 15994 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 15995 Nodes.push_back(PHI1); 15996 continue; 15997 } 15998 Opcodes.emplace_back(V); 15999 } 16000 } 16001 } 16002 16003 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 16004 Incoming, PHICompare, AreCompatiblePHIs, 16005 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 16006 return tryToVectorizeList(Candidates, R, MaxVFOnly); 16007 }, 16008 /*MaxVFOnly=*/true, R); 16009 Changed |= HaveVectorizedPhiNodes; 16010 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 16011 } while (HaveVectorizedPhiNodes); 16012 16013 VisitedInstrs.clear(); 16014 16015 InstSetVector PostProcessInserts; 16016 SmallSetVector<CmpInst *, 8> PostProcessCmps; 16017 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true 16018 // also vectorizes `PostProcessCmps`. 16019 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) { 16020 bool Changed = vectorizeInserts(PostProcessInserts, BB, R); 16021 if (VectorizeCmps) { 16022 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R); 16023 PostProcessCmps.clear(); 16024 } 16025 PostProcessInserts.clear(); 16026 return Changed; 16027 }; 16028 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`. 16029 auto IsInPostProcessInstrs = [&](Instruction *I) { 16030 if (auto *Cmp = dyn_cast<CmpInst>(I)) 16031 return PostProcessCmps.contains(Cmp); 16032 return isa<InsertElementInst, InsertValueInst>(I) && 16033 PostProcessInserts.contains(I); 16034 }; 16035 // Returns true if `I` is an instruction without users, like terminator, or 16036 // function call with ignored return value, store. Ignore unused instructions 16037 // (basing on instruction type, except for CallInst and InvokeInst). 16038 auto HasNoUsers = [](Instruction *I) { 16039 return I->use_empty() && 16040 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I)); 16041 }; 16042 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) { 16043 // Skip instructions with scalable type. The num of elements is unknown at 16044 // compile-time for scalable type. 16045 if (isa<ScalableVectorType>(It->getType())) 16046 continue; 16047 16048 // Skip instructions marked for the deletion. 16049 if (R.isDeleted(&*It)) 16050 continue; 16051 // We may go through BB multiple times so skip the one we have checked. 16052 if (!VisitedInstrs.insert(&*It).second) { 16053 if (HasNoUsers(&*It) && 16054 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) { 16055 // We would like to start over since some instructions are deleted 16056 // and the iterator may become invalid value. 16057 Changed = true; 16058 It = BB->begin(); 16059 E = BB->end(); 16060 } 16061 continue; 16062 } 16063 16064 if (isa<DbgInfoIntrinsic>(It)) 16065 continue; 16066 16067 // Try to vectorize reductions that use PHINodes. 16068 if (PHINode *P = dyn_cast<PHINode>(It)) { 16069 // Check that the PHI is a reduction PHI. 16070 if (P->getNumIncomingValues() == 2) { 16071 // Try to match and vectorize a horizontal reduction. 16072 Instruction *Root = getReductionInstr(DT, P, BB, LI); 16073 if (Root && vectorizeRootInstruction(P, Root, BB, R, TTI)) { 16074 Changed = true; 16075 It = BB->begin(); 16076 E = BB->end(); 16077 continue; 16078 } 16079 } 16080 // Try to vectorize the incoming values of the PHI, to catch reductions 16081 // that feed into PHIs. 16082 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 16083 // Skip if the incoming block is the current BB for now. Also, bypass 16084 // unreachable IR for efficiency and to avoid crashing. 16085 // TODO: Collect the skipped incoming values and try to vectorize them 16086 // after processing BB. 16087 if (BB == P->getIncomingBlock(I) || 16088 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 16089 continue; 16090 16091 // Postponed instructions should not be vectorized here, delay their 16092 // vectorization. 16093 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I)); 16094 PI && !IsInPostProcessInstrs(PI)) 16095 Changed |= vectorizeRootInstruction(nullptr, PI, 16096 P->getIncomingBlock(I), R, TTI); 16097 } 16098 continue; 16099 } 16100 16101 if (HasNoUsers(&*It)) { 16102 bool OpsChanged = false; 16103 auto *SI = dyn_cast<StoreInst>(It); 16104 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI; 16105 if (SI) { 16106 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand())); 16107 // Try to vectorize chain in store, if this is the only store to the 16108 // address in the block. 16109 // TODO: This is just a temporarily solution to save compile time. Need 16110 // to investigate if we can safely turn on slp-vectorize-hor-store 16111 // instead to allow lookup for reduction chains in all non-vectorized 16112 // stores (need to check side effects and compile time). 16113 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) && 16114 SI->getValueOperand()->hasOneUse(); 16115 } 16116 if (TryToVectorizeRoot) { 16117 for (auto *V : It->operand_values()) { 16118 // Postponed instructions should not be vectorized here, delay their 16119 // vectorization. 16120 if (auto *VI = dyn_cast<Instruction>(V); 16121 VI && !IsInPostProcessInstrs(VI)) 16122 // Try to match and vectorize a horizontal reduction. 16123 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R, TTI); 16124 } 16125 } 16126 // Start vectorization of post-process list of instructions from the 16127 // top-tree instructions to try to vectorize as many instructions as 16128 // possible. 16129 OpsChanged |= 16130 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator()); 16131 if (OpsChanged) { 16132 // We would like to start over since some instructions are deleted 16133 // and the iterator may become invalid value. 16134 Changed = true; 16135 It = BB->begin(); 16136 E = BB->end(); 16137 continue; 16138 } 16139 } 16140 16141 if (isa<InsertElementInst, InsertValueInst>(It)) 16142 PostProcessInserts.insert(&*It); 16143 else if (isa<CmpInst>(It)) 16144 PostProcessCmps.insert(cast<CmpInst>(&*It)); 16145 } 16146 16147 return Changed; 16148 } 16149 16150 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 16151 auto Changed = false; 16152 for (auto &Entry : GEPs) { 16153 // If the getelementptr list has fewer than two elements, there's nothing 16154 // to do. 16155 if (Entry.second.size() < 2) 16156 continue; 16157 16158 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 16159 << Entry.second.size() << ".\n"); 16160 16161 // Process the GEP list in chunks suitable for the target's supported 16162 // vector size. If a vector register can't hold 1 element, we are done. We 16163 // are trying to vectorize the index computations, so the maximum number of 16164 // elements is based on the size of the index expression, rather than the 16165 // size of the GEP itself (the target's pointer size). 16166 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 16167 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 16168 if (MaxVecRegSize < EltSize) 16169 continue; 16170 16171 unsigned MaxElts = MaxVecRegSize / EltSize; 16172 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 16173 auto Len = std::min<unsigned>(BE - BI, MaxElts); 16174 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 16175 16176 // Initialize a set a candidate getelementptrs. Note that we use a 16177 // SetVector here to preserve program order. If the index computations 16178 // are vectorizable and begin with loads, we want to minimize the chance 16179 // of having to reorder them later. 16180 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 16181 16182 // Some of the candidates may have already been vectorized after we 16183 // initially collected them. If so, they are marked as deleted, so remove 16184 // them from the set of candidates. 16185 Candidates.remove_if( 16186 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 16187 16188 // Remove from the set of candidates all pairs of getelementptrs with 16189 // constant differences. Such getelementptrs are likely not good 16190 // candidates for vectorization in a bottom-up phase since one can be 16191 // computed from the other. We also ensure all candidate getelementptr 16192 // indices are unique. 16193 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 16194 auto *GEPI = GEPList[I]; 16195 if (!Candidates.count(GEPI)) 16196 continue; 16197 auto *SCEVI = SE->getSCEV(GEPList[I]); 16198 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 16199 auto *GEPJ = GEPList[J]; 16200 auto *SCEVJ = SE->getSCEV(GEPList[J]); 16201 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 16202 Candidates.remove(GEPI); 16203 Candidates.remove(GEPJ); 16204 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 16205 Candidates.remove(GEPJ); 16206 } 16207 } 16208 } 16209 16210 // We break out of the above computation as soon as we know there are 16211 // fewer than two candidates remaining. 16212 if (Candidates.size() < 2) 16213 continue; 16214 16215 // Add the single, non-constant index of each candidate to the bundle. We 16216 // ensured the indices met these constraints when we originally collected 16217 // the getelementptrs. 16218 SmallVector<Value *, 16> Bundle(Candidates.size()); 16219 auto BundleIndex = 0u; 16220 for (auto *V : Candidates) { 16221 auto *GEP = cast<GetElementPtrInst>(V); 16222 auto *GEPIdx = GEP->idx_begin()->get(); 16223 assert(GEP->getNumIndices() == 1 && !isa<Constant>(GEPIdx)); 16224 Bundle[BundleIndex++] = GEPIdx; 16225 } 16226 16227 // Try and vectorize the indices. We are currently only interested in 16228 // gather-like cases of the form: 16229 // 16230 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 16231 // 16232 // where the loads of "a", the loads of "b", and the subtractions can be 16233 // performed in parallel. It's likely that detecting this pattern in a 16234 // bottom-up phase will be simpler and less costly than building a 16235 // full-blown top-down phase beginning at the consecutive loads. 16236 Changed |= tryToVectorizeList(Bundle, R); 16237 } 16238 } 16239 return Changed; 16240 } 16241 16242 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 16243 bool Changed = false; 16244 // Sort by type, base pointers and values operand. Value operands must be 16245 // compatible (have the same opcode, same parent), otherwise it is 16246 // definitely not profitable to try to vectorize them. 16247 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 16248 if (V->getValueOperand()->getType()->getTypeID() < 16249 V2->getValueOperand()->getType()->getTypeID()) 16250 return true; 16251 if (V->getValueOperand()->getType()->getTypeID() > 16252 V2->getValueOperand()->getType()->getTypeID()) 16253 return false; 16254 if (V->getPointerOperandType()->getTypeID() < 16255 V2->getPointerOperandType()->getTypeID()) 16256 return true; 16257 if (V->getPointerOperandType()->getTypeID() > 16258 V2->getPointerOperandType()->getTypeID()) 16259 return false; 16260 // UndefValues are compatible with all other values. 16261 if (isa<UndefValue>(V->getValueOperand()) || 16262 isa<UndefValue>(V2->getValueOperand())) 16263 return false; 16264 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 16265 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16266 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 16267 DT->getNode(I1->getParent()); 16268 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 16269 DT->getNode(I2->getParent()); 16270 assert(NodeI1 && "Should only process reachable instructions"); 16271 assert(NodeI2 && "Should only process reachable instructions"); 16272 assert((NodeI1 == NodeI2) == 16273 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 16274 "Different nodes should have different DFS numbers"); 16275 if (NodeI1 != NodeI2) 16276 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 16277 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16278 if (S.getOpcode()) 16279 return false; 16280 return I1->getOpcode() < I2->getOpcode(); 16281 } 16282 if (isa<Constant>(V->getValueOperand()) && 16283 isa<Constant>(V2->getValueOperand())) 16284 return false; 16285 return V->getValueOperand()->getValueID() < 16286 V2->getValueOperand()->getValueID(); 16287 }; 16288 16289 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) { 16290 if (V1 == V2) 16291 return true; 16292 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType()) 16293 return false; 16294 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 16295 return false; 16296 // Undefs are compatible with any other value. 16297 if (isa<UndefValue>(V1->getValueOperand()) || 16298 isa<UndefValue>(V2->getValueOperand())) 16299 return true; 16300 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 16301 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16302 if (I1->getParent() != I2->getParent()) 16303 return false; 16304 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16305 return S.getOpcode() > 0; 16306 } 16307 if (isa<Constant>(V1->getValueOperand()) && 16308 isa<Constant>(V2->getValueOperand())) 16309 return true; 16310 return V1->getValueOperand()->getValueID() == 16311 V2->getValueOperand()->getValueID(); 16312 }; 16313 16314 // Attempt to sort and vectorize each of the store-groups. 16315 for (auto &Pair : Stores) { 16316 if (Pair.second.size() < 2) 16317 continue; 16318 16319 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 16320 << Pair.second.size() << ".\n"); 16321 16322 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 16323 continue; 16324 16325 // Reverse stores to do bottom-to-top analysis. This is important if the 16326 // values are stores to the same addresses several times, in this case need 16327 // to follow the stores order (reversed to meet the memory dependecies). 16328 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(), 16329 Pair.second.rend()); 16330 Changed |= tryToVectorizeSequence<StoreInst>( 16331 ReversedStores, StoreSorter, AreCompatibleStores, 16332 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 16333 return vectorizeStores(Candidates, R); 16334 }, 16335 /*MaxVFOnly=*/false, R); 16336 } 16337 return Changed; 16338 } 16339