1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/PriorityQueue.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetOperations.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/ConstantFolding.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/IVDescriptors.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #ifdef EXPENSIVE_CHECKS 73 #include "llvm/IR/Verifier.h" 74 #endif 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/LoopUtils.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <optional> 96 #include <set> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 static cl::opt<bool> 111 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 112 cl::desc("Run the SLP vectorization passes")); 113 114 static cl::opt<int> 115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 116 cl::desc("Only vectorize if you gain more than this " 117 "number ")); 118 119 static cl::opt<bool> 120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 121 cl::desc("Attempt to vectorize horizontal reductions")); 122 123 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 125 cl::desc( 126 "Attempt to vectorize horizontal reductions feeding into a store")); 127 128 // NOTE: If AllowHorRdxIdenityOptimization is true, the optimization will run 129 // even if we match a reduction but do not vectorize in the end. 130 static cl::opt<bool> AllowHorRdxIdenityOptimization( 131 "slp-optimize-identity-hor-reduction-ops", cl::init(true), cl::Hidden, 132 cl::desc("Allow optimization of original scalar identity operations on " 133 "matched horizontal reductions.")); 134 135 static cl::opt<int> 136 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> 140 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 141 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 142 143 /// Limits the size of scheduling regions in a block. 144 /// It avoid long compile times for _very_ large blocks where vector 145 /// instructions are spread over a wide range. 146 /// This limit is way higher than needed by real-world functions. 147 static cl::opt<int> 148 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 149 cl::desc("Limit the size of the SLP scheduling region per block")); 150 151 static cl::opt<int> MinVectorRegSizeOption( 152 "slp-min-reg-size", cl::init(128), cl::Hidden, 153 cl::desc("Attempt to vectorize for this register size in bits")); 154 155 static cl::opt<unsigned> RecursionMaxDepth( 156 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 157 cl::desc("Limit the recursion depth when building a vectorizable tree")); 158 159 static cl::opt<unsigned> MinTreeSize( 160 "slp-min-tree-size", cl::init(3), cl::Hidden, 161 cl::desc("Only vectorize small trees if they are fully vectorizable")); 162 163 // The maximum depth that the look-ahead score heuristic will explore. 164 // The higher this value, the higher the compilation time overhead. 165 static cl::opt<int> LookAheadMaxDepth( 166 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 167 cl::desc("The maximum look-ahead depth for operand reordering scores")); 168 169 // The maximum depth that the look-ahead score heuristic will explore 170 // when it probing among candidates for vectorization tree roots. 171 // The higher this value, the higher the compilation time overhead but unlike 172 // similar limit for operands ordering this is less frequently used, hence 173 // impact of higher value is less noticeable. 174 static cl::opt<int> RootLookAheadMaxDepth( 175 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 176 cl::desc("The maximum look-ahead depth for searching best rooting option")); 177 178 static cl::opt<bool> 179 ViewSLPTree("view-slp-tree", cl::Hidden, 180 cl::desc("Display the SLP trees with Graphviz")); 181 182 // Limit the number of alias checks. The limit is chosen so that 183 // it has no negative effect on the llvm benchmarks. 184 static const unsigned AliasedCheckLimit = 10; 185 186 // Another limit for the alias checks: The maximum distance between load/store 187 // instructions where alias checks are done. 188 // This limit is useful for very large basic blocks. 189 static const unsigned MaxMemDepDistance = 160; 190 191 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 192 /// regions to be handled. 193 static const int MinScheduleRegionSize = 16; 194 195 /// Predicate for the element types that the SLP vectorizer supports. 196 /// 197 /// The most important thing to filter here are types which are invalid in LLVM 198 /// vectors. We also filter target specific types which have absolutely no 199 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 200 /// avoids spending time checking the cost model and realizing that they will 201 /// be inevitably scalarized. 202 static bool isValidElementType(Type *Ty) { 203 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 204 !Ty->isPPC_FP128Ty(); 205 } 206 207 /// \returns True if the value is a constant (but not globals/constant 208 /// expressions). 209 static bool isConstant(Value *V) { 210 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V); 211 } 212 213 /// Checks if \p V is one of vector-like instructions, i.e. undef, 214 /// insertelement/extractelement with constant indices for fixed vector type or 215 /// extractvalue instruction. 216 static bool isVectorLikeInstWithConstOps(Value *V) { 217 if (!isa<InsertElementInst, ExtractElementInst>(V) && 218 !isa<ExtractValueInst, UndefValue>(V)) 219 return false; 220 auto *I = dyn_cast<Instruction>(V); 221 if (!I || isa<ExtractValueInst>(I)) 222 return true; 223 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 224 return false; 225 if (isa<ExtractElementInst>(I)) 226 return isConstant(I->getOperand(1)); 227 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 228 return isConstant(I->getOperand(2)); 229 } 230 231 #if !defined(NDEBUG) 232 /// Print a short descriptor of the instruction bundle suitable for debug output. 233 static std::string shortBundleName(ArrayRef<Value *> VL) { 234 std::string Result; 235 raw_string_ostream OS(Result); 236 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]"; 237 OS.flush(); 238 return Result; 239 } 240 #endif 241 242 /// \returns true if all of the instructions in \p VL are in the same block or 243 /// false otherwise. 244 static bool allSameBlock(ArrayRef<Value *> VL) { 245 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 246 if (!I0) 247 return false; 248 if (all_of(VL, isVectorLikeInstWithConstOps)) 249 return true; 250 251 BasicBlock *BB = I0->getParent(); 252 for (int I = 1, E = VL.size(); I < E; I++) { 253 auto *II = dyn_cast<Instruction>(VL[I]); 254 if (!II) 255 return false; 256 257 if (BB != II->getParent()) 258 return false; 259 } 260 return true; 261 } 262 263 /// \returns True if all of the values in \p VL are constants (but not 264 /// globals/constant expressions). 265 static bool allConstant(ArrayRef<Value *> VL) { 266 // Constant expressions and globals can't be vectorized like normal integer/FP 267 // constants. 268 return all_of(VL, isConstant); 269 } 270 271 /// \returns True if all of the values in \p VL are identical or some of them 272 /// are UndefValue. 273 static bool isSplat(ArrayRef<Value *> VL) { 274 Value *FirstNonUndef = nullptr; 275 for (Value *V : VL) { 276 if (isa<UndefValue>(V)) 277 continue; 278 if (!FirstNonUndef) { 279 FirstNonUndef = V; 280 continue; 281 } 282 if (V != FirstNonUndef) 283 return false; 284 } 285 return FirstNonUndef != nullptr; 286 } 287 288 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 289 static bool isCommutative(Instruction *I) { 290 if (auto *Cmp = dyn_cast<CmpInst>(I)) 291 return Cmp->isCommutative(); 292 if (auto *BO = dyn_cast<BinaryOperator>(I)) 293 return BO->isCommutative(); 294 // TODO: This should check for generic Instruction::isCommutative(), but 295 // we need to confirm that the caller code correctly handles Intrinsics 296 // for example (does not have 2 operands). 297 return false; 298 } 299 300 /// \returns inserting index of InsertElement or InsertValue instruction, 301 /// using Offset as base offset for index. 302 static std::optional<unsigned> getInsertIndex(const Value *InsertInst, 303 unsigned Offset = 0) { 304 int Index = Offset; 305 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 306 const auto *VT = dyn_cast<FixedVectorType>(IE->getType()); 307 if (!VT) 308 return std::nullopt; 309 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)); 310 if (!CI) 311 return std::nullopt; 312 if (CI->getValue().uge(VT->getNumElements())) 313 return std::nullopt; 314 Index *= VT->getNumElements(); 315 Index += CI->getZExtValue(); 316 return Index; 317 } 318 319 const auto *IV = cast<InsertValueInst>(InsertInst); 320 Type *CurrentType = IV->getType(); 321 for (unsigned I : IV->indices()) { 322 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 323 Index *= ST->getNumElements(); 324 CurrentType = ST->getElementType(I); 325 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 326 Index *= AT->getNumElements(); 327 CurrentType = AT->getElementType(); 328 } else { 329 return std::nullopt; 330 } 331 Index += I; 332 } 333 return Index; 334 } 335 336 namespace { 337 /// Specifies the way the mask should be analyzed for undefs/poisonous elements 338 /// in the shuffle mask. 339 enum class UseMask { 340 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors, 341 ///< check for the mask elements for the first argument (mask 342 ///< indices are in range [0:VF)). 343 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check 344 ///< for the mask elements for the second argument (mask indices 345 ///< are in range [VF:2*VF)) 346 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for 347 ///< future shuffle elements and mark them as ones as being used 348 ///< in future. Non-undef elements are considered as unused since 349 ///< they're already marked as used in the mask. 350 }; 351 } // namespace 352 353 /// Prepares a use bitset for the given mask either for the first argument or 354 /// for the second. 355 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask, 356 UseMask MaskArg) { 357 SmallBitVector UseMask(VF, true); 358 for (auto [Idx, Value] : enumerate(Mask)) { 359 if (Value == PoisonMaskElem) { 360 if (MaskArg == UseMask::UndefsAsMask) 361 UseMask.reset(Idx); 362 continue; 363 } 364 if (MaskArg == UseMask::FirstArg && Value < VF) 365 UseMask.reset(Value); 366 else if (MaskArg == UseMask::SecondArg && Value >= VF) 367 UseMask.reset(Value - VF); 368 } 369 return UseMask; 370 } 371 372 /// Checks if the given value is actually an undefined constant vector. 373 /// Also, if the \p UseMask is not empty, tries to check if the non-masked 374 /// elements actually mask the insertelement buildvector, if any. 375 template <bool IsPoisonOnly = false> 376 static SmallBitVector isUndefVector(const Value *V, 377 const SmallBitVector &UseMask = {}) { 378 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true); 379 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>; 380 if (isa<T>(V)) 381 return Res; 382 auto *VecTy = dyn_cast<FixedVectorType>(V->getType()); 383 if (!VecTy) 384 return Res.reset(); 385 auto *C = dyn_cast<Constant>(V); 386 if (!C) { 387 if (!UseMask.empty()) { 388 const Value *Base = V; 389 while (auto *II = dyn_cast<InsertElementInst>(Base)) { 390 Base = II->getOperand(0); 391 if (isa<T>(II->getOperand(1))) 392 continue; 393 std::optional<unsigned> Idx = getInsertIndex(II); 394 if (!Idx) { 395 Res.reset(); 396 return Res; 397 } 398 if (*Idx < UseMask.size() && !UseMask.test(*Idx)) 399 Res.reset(*Idx); 400 } 401 // TODO: Add analysis for shuffles here too. 402 if (V == Base) { 403 Res.reset(); 404 } else { 405 SmallBitVector SubMask(UseMask.size(), false); 406 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask); 407 } 408 } else { 409 Res.reset(); 410 } 411 return Res; 412 } 413 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 414 if (Constant *Elem = C->getAggregateElement(I)) 415 if (!isa<T>(Elem) && 416 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I)))) 417 Res.reset(I); 418 } 419 return Res; 420 } 421 422 /// Checks if the vector of instructions can be represented as a shuffle, like: 423 /// %x0 = extractelement <4 x i8> %x, i32 0 424 /// %x3 = extractelement <4 x i8> %x, i32 3 425 /// %y1 = extractelement <4 x i8> %y, i32 1 426 /// %y2 = extractelement <4 x i8> %y, i32 2 427 /// %x0x0 = mul i8 %x0, %x0 428 /// %x3x3 = mul i8 %x3, %x3 429 /// %y1y1 = mul i8 %y1, %y1 430 /// %y2y2 = mul i8 %y2, %y2 431 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 432 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 433 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 434 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 435 /// ret <4 x i8> %ins4 436 /// can be transformed into: 437 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 438 /// i32 6> 439 /// %2 = mul <4 x i8> %1, %1 440 /// ret <4 x i8> %2 441 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 442 /// TODO: Can we split off and reuse the shuffle mask detection from 443 /// ShuffleVectorInst/getShuffleCost? 444 static std::optional<TargetTransformInfo::ShuffleKind> 445 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 446 const auto *It = 447 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 448 if (It == VL.end()) 449 return std::nullopt; 450 auto *EI0 = cast<ExtractElementInst>(*It); 451 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 452 return std::nullopt; 453 unsigned Size = 454 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 455 Value *Vec1 = nullptr; 456 Value *Vec2 = nullptr; 457 enum ShuffleMode { Unknown, Select, Permute }; 458 ShuffleMode CommonShuffleMode = Unknown; 459 Mask.assign(VL.size(), PoisonMaskElem); 460 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 461 // Undef can be represented as an undef element in a vector. 462 if (isa<UndefValue>(VL[I])) 463 continue; 464 auto *EI = cast<ExtractElementInst>(VL[I]); 465 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 466 return std::nullopt; 467 auto *Vec = EI->getVectorOperand(); 468 // We can extractelement from undef or poison vector. 469 if (isUndefVector(Vec).all()) 470 continue; 471 // All vector operands must have the same number of vector elements. 472 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 473 return std::nullopt; 474 if (isa<UndefValue>(EI->getIndexOperand())) 475 continue; 476 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 477 if (!Idx) 478 return std::nullopt; 479 // Undefined behavior if Idx is negative or >= Size. 480 if (Idx->getValue().uge(Size)) 481 continue; 482 unsigned IntIdx = Idx->getValue().getZExtValue(); 483 Mask[I] = IntIdx; 484 // For correct shuffling we have to have at most 2 different vector operands 485 // in all extractelement instructions. 486 if (!Vec1 || Vec1 == Vec) { 487 Vec1 = Vec; 488 } else if (!Vec2 || Vec2 == Vec) { 489 Vec2 = Vec; 490 Mask[I] += Size; 491 } else { 492 return std::nullopt; 493 } 494 if (CommonShuffleMode == Permute) 495 continue; 496 // If the extract index is not the same as the operation number, it is a 497 // permutation. 498 if (IntIdx != I) { 499 CommonShuffleMode = Permute; 500 continue; 501 } 502 CommonShuffleMode = Select; 503 } 504 // If we're not crossing lanes in different vectors, consider it as blending. 505 if (CommonShuffleMode == Select && Vec2) 506 return TargetTransformInfo::SK_Select; 507 // If Vec2 was never used, we have a permutation of a single vector, otherwise 508 // we have permutation of 2 vectors. 509 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 510 : TargetTransformInfo::SK_PermuteSingleSrc; 511 } 512 513 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 514 static std::optional<unsigned> getExtractIndex(Instruction *E) { 515 unsigned Opcode = E->getOpcode(); 516 assert((Opcode == Instruction::ExtractElement || 517 Opcode == Instruction::ExtractValue) && 518 "Expected extractelement or extractvalue instruction."); 519 if (Opcode == Instruction::ExtractElement) { 520 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 521 if (!CI) 522 return std::nullopt; 523 return CI->getZExtValue(); 524 } 525 auto *EI = cast<ExtractValueInst>(E); 526 if (EI->getNumIndices() != 1) 527 return std::nullopt; 528 return *EI->idx_begin(); 529 } 530 531 namespace { 532 533 /// Main data required for vectorization of instructions. 534 struct InstructionsState { 535 /// The very first instruction in the list with the main opcode. 536 Value *OpValue = nullptr; 537 538 /// The main/alternate instruction. 539 Instruction *MainOp = nullptr; 540 Instruction *AltOp = nullptr; 541 542 /// The main/alternate opcodes for the list of instructions. 543 unsigned getOpcode() const { 544 return MainOp ? MainOp->getOpcode() : 0; 545 } 546 547 unsigned getAltOpcode() const { 548 return AltOp ? AltOp->getOpcode() : 0; 549 } 550 551 /// Some of the instructions in the list have alternate opcodes. 552 bool isAltShuffle() const { return AltOp != MainOp; } 553 554 bool isOpcodeOrAlt(Instruction *I) const { 555 unsigned CheckedOpcode = I->getOpcode(); 556 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 557 } 558 559 InstructionsState() = delete; 560 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 561 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 562 }; 563 564 } // end anonymous namespace 565 566 /// Chooses the correct key for scheduling data. If \p Op has the same (or 567 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 568 /// OpValue. 569 static Value *isOneOf(const InstructionsState &S, Value *Op) { 570 auto *I = dyn_cast<Instruction>(Op); 571 if (I && S.isOpcodeOrAlt(I)) 572 return Op; 573 return S.OpValue; 574 } 575 576 /// \returns true if \p Opcode is allowed as part of the main/alternate 577 /// instruction for SLP vectorization. 578 /// 579 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 580 /// "shuffled out" lane would result in division by zero. 581 static bool isValidForAlternation(unsigned Opcode) { 582 if (Instruction::isIntDivRem(Opcode)) 583 return false; 584 585 return true; 586 } 587 588 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 589 const TargetLibraryInfo &TLI, 590 unsigned BaseIndex = 0); 591 592 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 593 /// compatible instructions or constants, or just some other regular values. 594 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 595 Value *Op1, const TargetLibraryInfo &TLI) { 596 return (isConstant(BaseOp0) && isConstant(Op0)) || 597 (isConstant(BaseOp1) && isConstant(Op1)) || 598 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 599 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 600 BaseOp0 == Op0 || BaseOp1 == Op1 || 601 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() || 602 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode(); 603 } 604 605 /// \returns true if a compare instruction \p CI has similar "look" and 606 /// same predicate as \p BaseCI, "as is" or with its operands and predicate 607 /// swapped, false otherwise. 608 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI, 609 const TargetLibraryInfo &TLI) { 610 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && 611 "Assessing comparisons of different types?"); 612 CmpInst::Predicate BasePred = BaseCI->getPredicate(); 613 CmpInst::Predicate Pred = CI->getPredicate(); 614 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred); 615 616 Value *BaseOp0 = BaseCI->getOperand(0); 617 Value *BaseOp1 = BaseCI->getOperand(1); 618 Value *Op0 = CI->getOperand(0); 619 Value *Op1 = CI->getOperand(1); 620 621 return (BasePred == Pred && 622 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) || 623 (BasePred == SwappedPred && 624 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI)); 625 } 626 627 /// \returns analysis of the Instructions in \p VL described in 628 /// InstructionsState, the Opcode that we suppose the whole list 629 /// could be vectorized even if its structure is diverse. 630 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 631 const TargetLibraryInfo &TLI, 632 unsigned BaseIndex) { 633 // Make sure these are all Instructions. 634 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 635 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 636 637 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 638 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 639 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 640 CmpInst::Predicate BasePred = 641 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 642 : CmpInst::BAD_ICMP_PREDICATE; 643 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 644 unsigned AltOpcode = Opcode; 645 unsigned AltIndex = BaseIndex; 646 647 // Check for one alternate opcode from another BinaryOperator. 648 // TODO - generalize to support all operators (types, calls etc.). 649 auto *IBase = cast<Instruction>(VL[BaseIndex]); 650 Intrinsic::ID BaseID = 0; 651 SmallVector<VFInfo> BaseMappings; 652 if (auto *CallBase = dyn_cast<CallInst>(IBase)) { 653 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI); 654 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase); 655 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty()) 656 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 657 } 658 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 659 auto *I = cast<Instruction>(VL[Cnt]); 660 unsigned InstOpcode = I->getOpcode(); 661 if (IsBinOp && isa<BinaryOperator>(I)) { 662 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 663 continue; 664 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 665 isValidForAlternation(Opcode)) { 666 AltOpcode = InstOpcode; 667 AltIndex = Cnt; 668 continue; 669 } 670 } else if (IsCastOp && isa<CastInst>(I)) { 671 Value *Op0 = IBase->getOperand(0); 672 Type *Ty0 = Op0->getType(); 673 Value *Op1 = I->getOperand(0); 674 Type *Ty1 = Op1->getType(); 675 if (Ty0 == Ty1) { 676 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 677 continue; 678 if (Opcode == AltOpcode) { 679 assert(isValidForAlternation(Opcode) && 680 isValidForAlternation(InstOpcode) && 681 "Cast isn't safe for alternation, logic needs to be updated!"); 682 AltOpcode = InstOpcode; 683 AltIndex = Cnt; 684 continue; 685 } 686 } 687 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) { 688 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]); 689 Type *Ty0 = BaseInst->getOperand(0)->getType(); 690 Type *Ty1 = Inst->getOperand(0)->getType(); 691 if (Ty0 == Ty1) { 692 assert(InstOpcode == Opcode && "Expected same CmpInst opcode."); 693 // Check for compatible operands. If the corresponding operands are not 694 // compatible - need to perform alternate vectorization. 695 CmpInst::Predicate CurrentPred = Inst->getPredicate(); 696 CmpInst::Predicate SwappedCurrentPred = 697 CmpInst::getSwappedPredicate(CurrentPred); 698 699 if (E == 2 && 700 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 701 continue; 702 703 if (isCmpSameOrSwapped(BaseInst, Inst, TLI)) 704 continue; 705 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 706 if (AltIndex != BaseIndex) { 707 if (isCmpSameOrSwapped(AltInst, Inst, TLI)) 708 continue; 709 } else if (BasePred != CurrentPred) { 710 assert( 711 isValidForAlternation(InstOpcode) && 712 "CmpInst isn't safe for alternation, logic needs to be updated!"); 713 AltIndex = Cnt; 714 continue; 715 } 716 CmpInst::Predicate AltPred = AltInst->getPredicate(); 717 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 718 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 719 continue; 720 } 721 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) { 722 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 723 if (Gep->getNumOperands() != 2 || 724 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType()) 725 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 726 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) { 727 if (!isVectorLikeInstWithConstOps(EI)) 728 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 729 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 730 auto *BaseLI = cast<LoadInst>(IBase); 731 if (!LI->isSimple() || !BaseLI->isSimple()) 732 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 733 } else if (auto *Call = dyn_cast<CallInst>(I)) { 734 auto *CallBase = cast<CallInst>(IBase); 735 if (Call->getCalledFunction() != CallBase->getCalledFunction()) 736 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 737 if (Call->hasOperandBundles() && 738 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(), 739 Call->op_begin() + Call->getBundleOperandsEndIndex(), 740 CallBase->op_begin() + 741 CallBase->getBundleOperandsStartIndex())) 742 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 743 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI); 744 if (ID != BaseID) 745 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 746 if (!ID) { 747 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call); 748 if (Mappings.size() != BaseMappings.size() || 749 Mappings.front().ISA != BaseMappings.front().ISA || 750 Mappings.front().ScalarName != BaseMappings.front().ScalarName || 751 Mappings.front().VectorName != BaseMappings.front().VectorName || 752 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF || 753 Mappings.front().Shape.Parameters != 754 BaseMappings.front().Shape.Parameters) 755 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 756 } 757 } 758 continue; 759 } 760 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 761 } 762 763 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 764 cast<Instruction>(VL[AltIndex])); 765 } 766 767 /// \returns true if all of the values in \p VL have the same type or false 768 /// otherwise. 769 static bool allSameType(ArrayRef<Value *> VL) { 770 Type *Ty = VL.front()->getType(); 771 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; }); 772 } 773 774 /// \returns True if in-tree use also needs extract. This refers to 775 /// possible scalar operand in vectorized instruction. 776 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 777 TargetLibraryInfo *TLI) { 778 unsigned Opcode = UserInst->getOpcode(); 779 switch (Opcode) { 780 case Instruction::Load: { 781 LoadInst *LI = cast<LoadInst>(UserInst); 782 return (LI->getPointerOperand() == Scalar); 783 } 784 case Instruction::Store: { 785 StoreInst *SI = cast<StoreInst>(UserInst); 786 return (SI->getPointerOperand() == Scalar); 787 } 788 case Instruction::Call: { 789 CallInst *CI = cast<CallInst>(UserInst); 790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 791 return any_of(enumerate(CI->args()), [&](auto &&Arg) { 792 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) && 793 Arg.value().get() == Scalar; 794 }); 795 } 796 default: 797 return false; 798 } 799 } 800 801 /// \returns the AA location that is being access by the instruction. 802 static MemoryLocation getLocation(Instruction *I) { 803 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 804 return MemoryLocation::get(SI); 805 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 806 return MemoryLocation::get(LI); 807 return MemoryLocation(); 808 } 809 810 /// \returns True if the instruction is not a volatile or atomic load/store. 811 static bool isSimple(Instruction *I) { 812 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 813 return LI->isSimple(); 814 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 815 return SI->isSimple(); 816 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 817 return !MI->isVolatile(); 818 return true; 819 } 820 821 /// Shuffles \p Mask in accordance with the given \p SubMask. 822 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only 823 /// one but two input vectors. 824 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask, 825 bool ExtendingManyInputs = false) { 826 if (SubMask.empty()) 827 return; 828 assert( 829 (!ExtendingManyInputs || SubMask.size() > Mask.size() || 830 // Check if input scalars were extended to match the size of other node. 831 (SubMask.size() == Mask.size() && 832 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(), 833 [](int Idx) { return Idx == PoisonMaskElem; }))) && 834 "SubMask with many inputs support must be larger than the mask."); 835 if (Mask.empty()) { 836 Mask.append(SubMask.begin(), SubMask.end()); 837 return; 838 } 839 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem); 840 int TermValue = std::min(Mask.size(), SubMask.size()); 841 for (int I = 0, E = SubMask.size(); I < E; ++I) { 842 if (SubMask[I] == PoisonMaskElem || 843 (!ExtendingManyInputs && 844 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue))) 845 continue; 846 NewMask[I] = Mask[SubMask[I]]; 847 } 848 Mask.swap(NewMask); 849 } 850 851 /// Order may have elements assigned special value (size) which is out of 852 /// bounds. Such indices only appear on places which correspond to undef values 853 /// (see canReuseExtract for details) and used in order to avoid undef values 854 /// have effect on operands ordering. 855 /// The first loop below simply finds all unused indices and then the next loop 856 /// nest assigns these indices for undef values positions. 857 /// As an example below Order has two undef positions and they have assigned 858 /// values 3 and 7 respectively: 859 /// before: 6 9 5 4 9 2 1 0 860 /// after: 6 3 5 4 7 2 1 0 861 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 862 const unsigned Sz = Order.size(); 863 SmallBitVector UnusedIndices(Sz, /*t=*/true); 864 SmallBitVector MaskedIndices(Sz); 865 for (unsigned I = 0; I < Sz; ++I) { 866 if (Order[I] < Sz) 867 UnusedIndices.reset(Order[I]); 868 else 869 MaskedIndices.set(I); 870 } 871 if (MaskedIndices.none()) 872 return; 873 assert(UnusedIndices.count() == MaskedIndices.count() && 874 "Non-synced masked/available indices."); 875 int Idx = UnusedIndices.find_first(); 876 int MIdx = MaskedIndices.find_first(); 877 while (MIdx >= 0) { 878 assert(Idx >= 0 && "Indices must be synced."); 879 Order[MIdx] = Idx; 880 Idx = UnusedIndices.find_next(Idx); 881 MIdx = MaskedIndices.find_next(MIdx); 882 } 883 } 884 885 namespace llvm { 886 887 static void inversePermutation(ArrayRef<unsigned> Indices, 888 SmallVectorImpl<int> &Mask) { 889 Mask.clear(); 890 const unsigned E = Indices.size(); 891 Mask.resize(E, PoisonMaskElem); 892 for (unsigned I = 0; I < E; ++I) 893 Mask[Indices[I]] = I; 894 } 895 896 /// Reorders the list of scalars in accordance with the given \p Mask. 897 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 898 ArrayRef<int> Mask) { 899 assert(!Mask.empty() && "Expected non-empty mask."); 900 SmallVector<Value *> Prev(Scalars.size(), 901 UndefValue::get(Scalars.front()->getType())); 902 Prev.swap(Scalars); 903 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 904 if (Mask[I] != PoisonMaskElem) 905 Scalars[Mask[I]] = Prev[I]; 906 } 907 908 /// Checks if the provided value does not require scheduling. It does not 909 /// require scheduling if this is not an instruction or it is an instruction 910 /// that does not read/write memory and all operands are either not instructions 911 /// or phi nodes or instructions from different blocks. 912 static bool areAllOperandsNonInsts(Value *V) { 913 auto *I = dyn_cast<Instruction>(V); 914 if (!I) 915 return true; 916 return !mayHaveNonDefUseDependency(*I) && 917 all_of(I->operands(), [I](Value *V) { 918 auto *IO = dyn_cast<Instruction>(V); 919 if (!IO) 920 return true; 921 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 922 }); 923 } 924 925 /// Checks if the provided value does not require scheduling. It does not 926 /// require scheduling if this is not an instruction or it is an instruction 927 /// that does not read/write memory and all users are phi nodes or instructions 928 /// from the different blocks. 929 static bool isUsedOutsideBlock(Value *V) { 930 auto *I = dyn_cast<Instruction>(V); 931 if (!I) 932 return true; 933 // Limits the number of uses to save compile time. 934 constexpr int UsesLimit = 8; 935 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 936 all_of(I->users(), [I](User *U) { 937 auto *IU = dyn_cast<Instruction>(U); 938 if (!IU) 939 return true; 940 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 941 }); 942 } 943 944 /// Checks if the specified value does not require scheduling. It does not 945 /// require scheduling if all operands and all users do not need to be scheduled 946 /// in the current basic block. 947 static bool doesNotNeedToBeScheduled(Value *V) { 948 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 949 } 950 951 /// Checks if the specified array of instructions does not require scheduling. 952 /// It is so if all either instructions have operands that do not require 953 /// scheduling or their users do not require scheduling since they are phis or 954 /// in other basic blocks. 955 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 956 return !VL.empty() && 957 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 958 } 959 960 namespace slpvectorizer { 961 962 /// Bottom Up SLP Vectorizer. 963 class BoUpSLP { 964 struct TreeEntry; 965 struct ScheduleData; 966 class ShuffleCostEstimator; 967 class ShuffleInstructionBuilder; 968 969 public: 970 using ValueList = SmallVector<Value *, 8>; 971 using InstrList = SmallVector<Instruction *, 16>; 972 using ValueSet = SmallPtrSet<Value *, 16>; 973 using StoreList = SmallVector<StoreInst *, 8>; 974 using ExtraValueToDebugLocsMap = 975 MapVector<Value *, SmallVector<Instruction *, 2>>; 976 using OrdersType = SmallVector<unsigned, 4>; 977 978 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 979 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 980 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 981 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 982 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 983 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 984 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 985 // Use the vector register size specified by the target unless overridden 986 // by a command-line option. 987 // TODO: It would be better to limit the vectorization factor based on 988 // data type rather than just register size. For example, x86 AVX has 989 // 256-bit registers, but it does not support integer operations 990 // at that width (that requires AVX2). 991 if (MaxVectorRegSizeOption.getNumOccurrences()) 992 MaxVecRegSize = MaxVectorRegSizeOption; 993 else 994 MaxVecRegSize = 995 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 996 .getFixedValue(); 997 998 if (MinVectorRegSizeOption.getNumOccurrences()) 999 MinVecRegSize = MinVectorRegSizeOption; 1000 else 1001 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 1002 } 1003 1004 /// Vectorize the tree that starts with the elements in \p VL. 1005 /// Returns the vectorized root. 1006 Value *vectorizeTree(); 1007 1008 /// Vectorize the tree but with the list of externally used values \p 1009 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 1010 /// generated extractvalue instructions. 1011 /// \param ReplacedExternals containd list of replaced external values 1012 /// {scalar, replace} after emitting extractelement for external uses. 1013 Value * 1014 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, 1015 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 1016 Instruction *ReductionRoot = nullptr); 1017 1018 /// \returns the cost incurred by unwanted spills and fills, caused by 1019 /// holding live values over call sites. 1020 InstructionCost getSpillCost() const; 1021 1022 /// \returns the vectorization cost of the subtree that starts at \p VL. 1023 /// A negative number means that this is profitable. 1024 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt); 1025 1026 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 1027 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 1028 void buildTree(ArrayRef<Value *> Roots, 1029 const SmallDenseSet<Value *> &UserIgnoreLst); 1030 1031 /// Construct a vectorizable tree that starts at \p Roots. 1032 void buildTree(ArrayRef<Value *> Roots); 1033 1034 /// Returns whether the root node has in-tree uses. 1035 bool doesRootHaveInTreeUses() const { 1036 return !VectorizableTree.empty() && 1037 !VectorizableTree.front()->UserTreeIndices.empty(); 1038 } 1039 1040 /// Return the scalars of the root node. 1041 ArrayRef<Value *> getRootNodeScalars() const { 1042 assert(!VectorizableTree.empty() && "No graph to get the first node from"); 1043 return VectorizableTree.front()->Scalars; 1044 } 1045 1046 /// Builds external uses of the vectorized scalars, i.e. the list of 1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 1048 /// ExternallyUsedValues contains additional list of external uses to handle 1049 /// vectorization of reductions. 1050 void 1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 1052 1053 /// Clear the internal data structures that are created by 'buildTree'. 1054 void deleteTree() { 1055 VectorizableTree.clear(); 1056 ScalarToTreeEntry.clear(); 1057 MultiNodeScalars.clear(); 1058 MustGather.clear(); 1059 EntryToLastInstruction.clear(); 1060 ExternalUses.clear(); 1061 for (auto &Iter : BlocksSchedules) { 1062 BlockScheduling *BS = Iter.second.get(); 1063 BS->clear(); 1064 } 1065 MinBWs.clear(); 1066 InstrElementSize.clear(); 1067 UserIgnoreList = nullptr; 1068 PostponedGathers.clear(); 1069 ValueToGatherNodes.clear(); 1070 } 1071 1072 unsigned getTreeSize() const { return VectorizableTree.size(); } 1073 1074 /// Perform LICM and CSE on the newly generated gather sequences. 1075 void optimizeGatherSequence(); 1076 1077 /// Checks if the specified gather tree entry \p TE can be represented as a 1078 /// shuffled vector entry + (possibly) permutation with other gathers. It 1079 /// implements the checks only for possibly ordered scalars (Loads, 1080 /// ExtractElement, ExtractValue), which can be part of the graph. 1081 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 1082 1083 /// Sort loads into increasing pointers offsets to allow greater clustering. 1084 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 1085 1086 /// Gets reordering data for the given tree entry. If the entry is vectorized 1087 /// - just return ReorderIndices, otherwise check if the scalars can be 1088 /// reordered and return the most optimal order. 1089 /// \return std::nullopt if ordering is not important, empty order, if 1090 /// identity order is important, or the actual order. 1091 /// \param TopToBottom If true, include the order of vectorized stores and 1092 /// insertelement nodes, otherwise skip them. 1093 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, 1094 bool TopToBottom); 1095 1096 /// Reorders the current graph to the most profitable order starting from the 1097 /// root node to the leaf nodes. The best order is chosen only from the nodes 1098 /// of the same size (vectorization factor). Smaller nodes are considered 1099 /// parts of subgraph with smaller VF and they are reordered independently. We 1100 /// can make it because we still need to extend smaller nodes to the wider VF 1101 /// and we can merge reordering shuffles with the widening shuffles. 1102 void reorderTopToBottom(); 1103 1104 /// Reorders the current graph to the most profitable order starting from 1105 /// leaves to the root. It allows to rotate small subgraphs and reduce the 1106 /// number of reshuffles if the leaf nodes use the same order. In this case we 1107 /// can merge the orders and just shuffle user node instead of shuffling its 1108 /// operands. Plus, even the leaf nodes have different orders, it allows to 1109 /// sink reordering in the graph closer to the root node and merge it later 1110 /// during analysis. 1111 void reorderBottomToTop(bool IgnoreReorder = false); 1112 1113 /// \return The vector element size in bits to use when vectorizing the 1114 /// expression tree ending at \p V. If V is a store, the size is the width of 1115 /// the stored value. Otherwise, the size is the width of the largest loaded 1116 /// value reaching V. This method is used by the vectorizer to calculate 1117 /// vectorization factors. 1118 unsigned getVectorElementSize(Value *V); 1119 1120 /// Compute the minimum type sizes required to represent the entries in a 1121 /// vectorizable tree. 1122 void computeMinimumValueSizes(); 1123 1124 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 1125 unsigned getMaxVecRegSize() const { 1126 return MaxVecRegSize; 1127 } 1128 1129 // \returns minimum vector register size as set by cl::opt. 1130 unsigned getMinVecRegSize() const { 1131 return MinVecRegSize; 1132 } 1133 1134 unsigned getMinVF(unsigned Sz) const { 1135 return std::max(2U, getMinVecRegSize() / Sz); 1136 } 1137 1138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 1139 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 1140 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 1141 return MaxVF ? MaxVF : UINT_MAX; 1142 } 1143 1144 /// Check if homogeneous aggregate is isomorphic to some VectorType. 1145 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 1146 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 1147 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 1148 /// 1149 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1150 unsigned canMapToVector(Type *T) const; 1151 1152 /// \returns True if the VectorizableTree is both tiny and not fully 1153 /// vectorizable. We do not vectorize such trees. 1154 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1155 1156 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1157 /// can be load combined in the backend. Load combining may not be allowed in 1158 /// the IR optimizer, so we do not want to alter the pattern. For example, 1159 /// partially transforming a scalar bswap() pattern into vector code is 1160 /// effectively impossible for the backend to undo. 1161 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1162 /// may not be necessary. 1163 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1164 1165 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1166 /// can be load combined in the backend. Load combining may not be allowed in 1167 /// the IR optimizer, so we do not want to alter the pattern. For example, 1168 /// partially transforming a scalar bswap() pattern into vector code is 1169 /// effectively impossible for the backend to undo. 1170 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1171 /// may not be necessary. 1172 bool isLoadCombineCandidate() const; 1173 1174 OptimizationRemarkEmitter *getORE() { return ORE; } 1175 1176 /// This structure holds any data we need about the edges being traversed 1177 /// during buildTree_rec(). We keep track of: 1178 /// (i) the user TreeEntry index, and 1179 /// (ii) the index of the edge. 1180 struct EdgeInfo { 1181 EdgeInfo() = default; 1182 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1183 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1184 /// The user TreeEntry. 1185 TreeEntry *UserTE = nullptr; 1186 /// The operand index of the use. 1187 unsigned EdgeIdx = UINT_MAX; 1188 #ifndef NDEBUG 1189 friend inline raw_ostream &operator<<(raw_ostream &OS, 1190 const BoUpSLP::EdgeInfo &EI) { 1191 EI.dump(OS); 1192 return OS; 1193 } 1194 /// Debug print. 1195 void dump(raw_ostream &OS) const { 1196 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1197 << " EdgeIdx:" << EdgeIdx << "}"; 1198 } 1199 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1200 #endif 1201 bool operator == (const EdgeInfo &Other) const { 1202 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx; 1203 } 1204 }; 1205 1206 /// A helper class used for scoring candidates for two consecutive lanes. 1207 class LookAheadHeuristics { 1208 const TargetLibraryInfo &TLI; 1209 const DataLayout &DL; 1210 ScalarEvolution &SE; 1211 const BoUpSLP &R; 1212 int NumLanes; // Total number of lanes (aka vectorization factor). 1213 int MaxLevel; // The maximum recursion depth for accumulating score. 1214 1215 public: 1216 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL, 1217 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes, 1218 int MaxLevel) 1219 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes), 1220 MaxLevel(MaxLevel) {} 1221 1222 // The hard-coded scores listed here are not very important, though it shall 1223 // be higher for better matches to improve the resulting cost. When 1224 // computing the scores of matching one sub-tree with another, we are 1225 // basically counting the number of values that are matching. So even if all 1226 // scores are set to 1, we would still get a decent matching result. 1227 // However, sometimes we have to break ties. For example we may have to 1228 // choose between matching loads vs matching opcodes. This is what these 1229 // scores are helping us with: they provide the order of preference. Also, 1230 // this is important if the scalar is externally used or used in another 1231 // tree entry node in the different lane. 1232 1233 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1234 static const int ScoreConsecutiveLoads = 4; 1235 /// The same load multiple times. This should have a better score than 1236 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1237 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1238 /// a vector load and 1.0 for a broadcast. 1239 static const int ScoreSplatLoads = 3; 1240 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1241 static const int ScoreReversedLoads = 3; 1242 /// A load candidate for masked gather. 1243 static const int ScoreMaskedGatherCandidate = 1; 1244 /// ExtractElementInst from same vector and consecutive indexes. 1245 static const int ScoreConsecutiveExtracts = 4; 1246 /// ExtractElementInst from same vector and reversed indices. 1247 static const int ScoreReversedExtracts = 3; 1248 /// Constants. 1249 static const int ScoreConstants = 2; 1250 /// Instructions with the same opcode. 1251 static const int ScoreSameOpcode = 2; 1252 /// Instructions with alt opcodes (e.g, add + sub). 1253 static const int ScoreAltOpcodes = 1; 1254 /// Identical instructions (a.k.a. splat or broadcast). 1255 static const int ScoreSplat = 1; 1256 /// Matching with an undef is preferable to failing. 1257 static const int ScoreUndef = 1; 1258 /// Score for failing to find a decent match. 1259 static const int ScoreFail = 0; 1260 /// Score if all users are vectorized. 1261 static const int ScoreAllUserVectorized = 1; 1262 1263 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1264 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1265 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1266 /// MainAltOps. 1267 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1268 ArrayRef<Value *> MainAltOps) const { 1269 if (!isValidElementType(V1->getType()) || 1270 !isValidElementType(V2->getType())) 1271 return LookAheadHeuristics::ScoreFail; 1272 1273 if (V1 == V2) { 1274 if (isa<LoadInst>(V1)) { 1275 // Retruns true if the users of V1 and V2 won't need to be extracted. 1276 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1277 // Bail out if we have too many uses to save compilation time. 1278 static constexpr unsigned Limit = 8; 1279 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1280 return false; 1281 1282 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1283 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1284 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1285 }); 1286 }; 1287 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1288 }; 1289 // A broadcast of a load can be cheaper on some targets. 1290 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1291 ElementCount::getFixed(NumLanes)) && 1292 ((int)V1->getNumUses() == NumLanes || 1293 AllUsersAreInternal(V1, V2))) 1294 return LookAheadHeuristics::ScoreSplatLoads; 1295 } 1296 return LookAheadHeuristics::ScoreSplat; 1297 } 1298 1299 auto *LI1 = dyn_cast<LoadInst>(V1); 1300 auto *LI2 = dyn_cast<LoadInst>(V2); 1301 if (LI1 && LI2) { 1302 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || 1303 !LI2->isSimple()) 1304 return LookAheadHeuristics::ScoreFail; 1305 1306 std::optional<int> Dist = getPointersDiff( 1307 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1308 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1309 if (!Dist || *Dist == 0) { 1310 if (getUnderlyingObject(LI1->getPointerOperand()) == 1311 getUnderlyingObject(LI2->getPointerOperand()) && 1312 R.TTI->isLegalMaskedGather( 1313 FixedVectorType::get(LI1->getType(), NumLanes), 1314 LI1->getAlign())) 1315 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1316 return LookAheadHeuristics::ScoreFail; 1317 } 1318 // The distance is too large - still may be profitable to use masked 1319 // loads/gathers. 1320 if (std::abs(*Dist) > NumLanes / 2) 1321 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1322 // This still will detect consecutive loads, but we might have "holes" 1323 // in some cases. It is ok for non-power-2 vectorization and may produce 1324 // better results. It should not affect current vectorization. 1325 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1326 : LookAheadHeuristics::ScoreReversedLoads; 1327 } 1328 1329 auto *C1 = dyn_cast<Constant>(V1); 1330 auto *C2 = dyn_cast<Constant>(V2); 1331 if (C1 && C2) 1332 return LookAheadHeuristics::ScoreConstants; 1333 1334 // Extracts from consecutive indexes of the same vector better score as 1335 // the extracts could be optimized away. 1336 Value *EV1; 1337 ConstantInt *Ex1Idx; 1338 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1339 // Undefs are always profitable for extractelements. 1340 // Compiler can easily combine poison and extractelement <non-poison> or 1341 // undef and extractelement <poison>. But combining undef + 1342 // extractelement <non-poison-but-may-produce-poison> requires some 1343 // extra operations. 1344 if (isa<UndefValue>(V2)) 1345 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all()) 1346 ? LookAheadHeuristics::ScoreConsecutiveExtracts 1347 : LookAheadHeuristics::ScoreSameOpcode; 1348 Value *EV2 = nullptr; 1349 ConstantInt *Ex2Idx = nullptr; 1350 if (match(V2, 1351 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1352 m_Undef())))) { 1353 // Undefs are always profitable for extractelements. 1354 if (!Ex2Idx) 1355 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1356 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType()) 1357 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1358 if (EV2 == EV1) { 1359 int Idx1 = Ex1Idx->getZExtValue(); 1360 int Idx2 = Ex2Idx->getZExtValue(); 1361 int Dist = Idx2 - Idx1; 1362 // The distance is too large - still may be profitable to use 1363 // shuffles. 1364 if (std::abs(Dist) == 0) 1365 return LookAheadHeuristics::ScoreSplat; 1366 if (std::abs(Dist) > NumLanes / 2) 1367 return LookAheadHeuristics::ScoreSameOpcode; 1368 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1369 : LookAheadHeuristics::ScoreReversedExtracts; 1370 } 1371 return LookAheadHeuristics::ScoreAltOpcodes; 1372 } 1373 return LookAheadHeuristics::ScoreFail; 1374 } 1375 1376 auto *I1 = dyn_cast<Instruction>(V1); 1377 auto *I2 = dyn_cast<Instruction>(V2); 1378 if (I1 && I2) { 1379 if (I1->getParent() != I2->getParent()) 1380 return LookAheadHeuristics::ScoreFail; 1381 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1382 Ops.push_back(I1); 1383 Ops.push_back(I2); 1384 InstructionsState S = getSameOpcode(Ops, TLI); 1385 // Note: Only consider instructions with <= 2 operands to avoid 1386 // complexity explosion. 1387 if (S.getOpcode() && 1388 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1389 !S.isAltShuffle()) && 1390 all_of(Ops, [&S](Value *V) { 1391 return cast<Instruction>(V)->getNumOperands() == 1392 S.MainOp->getNumOperands(); 1393 })) 1394 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1395 : LookAheadHeuristics::ScoreSameOpcode; 1396 } 1397 1398 if (isa<UndefValue>(V2)) 1399 return LookAheadHeuristics::ScoreUndef; 1400 1401 return LookAheadHeuristics::ScoreFail; 1402 } 1403 1404 /// Go through the operands of \p LHS and \p RHS recursively until 1405 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1406 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1407 /// of \p U1 and \p U2), except at the beginning of the recursion where 1408 /// these are set to nullptr. 1409 /// 1410 /// For example: 1411 /// \verbatim 1412 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1413 /// \ / \ / \ / \ / 1414 /// + + + + 1415 /// G1 G2 G3 G4 1416 /// \endverbatim 1417 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1418 /// each level recursively, accumulating the score. It starts from matching 1419 /// the additions at level 0, then moves on to the loads (level 1). The 1420 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1421 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1422 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1423 /// Please note that the order of the operands does not matter, as we 1424 /// evaluate the score of all profitable combinations of operands. In 1425 /// other words the score of G1 and G4 is the same as G1 and G2. This 1426 /// heuristic is based on ideas described in: 1427 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1428 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1429 /// Luís F. W. Góes 1430 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1431 Instruction *U2, int CurrLevel, 1432 ArrayRef<Value *> MainAltOps) const { 1433 1434 // Get the shallow score of V1 and V2. 1435 int ShallowScoreAtThisLevel = 1436 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1437 1438 // If reached MaxLevel, 1439 // or if V1 and V2 are not instructions, 1440 // or if they are SPLAT, 1441 // or if they are not consecutive, 1442 // or if profitable to vectorize loads or extractelements, early return 1443 // the current cost. 1444 auto *I1 = dyn_cast<Instruction>(LHS); 1445 auto *I2 = dyn_cast<Instruction>(RHS); 1446 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1447 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1448 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1449 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1450 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1451 ShallowScoreAtThisLevel)) 1452 return ShallowScoreAtThisLevel; 1453 assert(I1 && I2 && "Should have early exited."); 1454 1455 // Contains the I2 operand indexes that got matched with I1 operands. 1456 SmallSet<unsigned, 4> Op2Used; 1457 1458 // Recursion towards the operands of I1 and I2. We are trying all possible 1459 // operand pairs, and keeping track of the best score. 1460 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1461 OpIdx1 != NumOperands1; ++OpIdx1) { 1462 // Try to pair op1I with the best operand of I2. 1463 int MaxTmpScore = 0; 1464 unsigned MaxOpIdx2 = 0; 1465 bool FoundBest = false; 1466 // If I2 is commutative try all combinations. 1467 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1468 unsigned ToIdx = isCommutative(I2) 1469 ? I2->getNumOperands() 1470 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1471 assert(FromIdx <= ToIdx && "Bad index"); 1472 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1473 // Skip operands already paired with OpIdx1. 1474 if (Op2Used.count(OpIdx2)) 1475 continue; 1476 // Recursively calculate the cost at each level 1477 int TmpScore = 1478 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1479 I1, I2, CurrLevel + 1, std::nullopt); 1480 // Look for the best score. 1481 if (TmpScore > LookAheadHeuristics::ScoreFail && 1482 TmpScore > MaxTmpScore) { 1483 MaxTmpScore = TmpScore; 1484 MaxOpIdx2 = OpIdx2; 1485 FoundBest = true; 1486 } 1487 } 1488 if (FoundBest) { 1489 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1490 Op2Used.insert(MaxOpIdx2); 1491 ShallowScoreAtThisLevel += MaxTmpScore; 1492 } 1493 } 1494 return ShallowScoreAtThisLevel; 1495 } 1496 }; 1497 /// A helper data structure to hold the operands of a vector of instructions. 1498 /// This supports a fixed vector length for all operand vectors. 1499 class VLOperands { 1500 /// For each operand we need (i) the value, and (ii) the opcode that it 1501 /// would be attached to if the expression was in a left-linearized form. 1502 /// This is required to avoid illegal operand reordering. 1503 /// For example: 1504 /// \verbatim 1505 /// 0 Op1 1506 /// |/ 1507 /// Op1 Op2 Linearized + Op2 1508 /// \ / ----------> |/ 1509 /// - - 1510 /// 1511 /// Op1 - Op2 (0 + Op1) - Op2 1512 /// \endverbatim 1513 /// 1514 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1515 /// 1516 /// Another way to think of this is to track all the operations across the 1517 /// path from the operand all the way to the root of the tree and to 1518 /// calculate the operation that corresponds to this path. For example, the 1519 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1520 /// corresponding operation is a '-' (which matches the one in the 1521 /// linearized tree, as shown above). 1522 /// 1523 /// For lack of a better term, we refer to this operation as Accumulated 1524 /// Path Operation (APO). 1525 struct OperandData { 1526 OperandData() = default; 1527 OperandData(Value *V, bool APO, bool IsUsed) 1528 : V(V), APO(APO), IsUsed(IsUsed) {} 1529 /// The operand value. 1530 Value *V = nullptr; 1531 /// TreeEntries only allow a single opcode, or an alternate sequence of 1532 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1533 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1534 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1535 /// (e.g., Add/Mul) 1536 bool APO = false; 1537 /// Helper data for the reordering function. 1538 bool IsUsed = false; 1539 }; 1540 1541 /// During operand reordering, we are trying to select the operand at lane 1542 /// that matches best with the operand at the neighboring lane. Our 1543 /// selection is based on the type of value we are looking for. For example, 1544 /// if the neighboring lane has a load, we need to look for a load that is 1545 /// accessing a consecutive address. These strategies are summarized in the 1546 /// 'ReorderingMode' enumerator. 1547 enum class ReorderingMode { 1548 Load, ///< Matching loads to consecutive memory addresses 1549 Opcode, ///< Matching instructions based on opcode (same or alternate) 1550 Constant, ///< Matching constants 1551 Splat, ///< Matching the same instruction multiple times (broadcast) 1552 Failed, ///< We failed to create a vectorizable group 1553 }; 1554 1555 using OperandDataVec = SmallVector<OperandData, 2>; 1556 1557 /// A vector of operand vectors. 1558 SmallVector<OperandDataVec, 4> OpsVec; 1559 1560 const TargetLibraryInfo &TLI; 1561 const DataLayout &DL; 1562 ScalarEvolution &SE; 1563 const BoUpSLP &R; 1564 1565 /// \returns the operand data at \p OpIdx and \p Lane. 1566 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1567 return OpsVec[OpIdx][Lane]; 1568 } 1569 1570 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1571 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1572 return OpsVec[OpIdx][Lane]; 1573 } 1574 1575 /// Clears the used flag for all entries. 1576 void clearUsed() { 1577 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1578 OpIdx != NumOperands; ++OpIdx) 1579 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1580 ++Lane) 1581 OpsVec[OpIdx][Lane].IsUsed = false; 1582 } 1583 1584 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1585 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1586 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1587 } 1588 1589 /// \param Lane lane of the operands under analysis. 1590 /// \param OpIdx operand index in \p Lane lane we're looking the best 1591 /// candidate for. 1592 /// \param Idx operand index of the current candidate value. 1593 /// \returns The additional score due to possible broadcasting of the 1594 /// elements in the lane. It is more profitable to have power-of-2 unique 1595 /// elements in the lane, it will be vectorized with higher probability 1596 /// after removing duplicates. Currently the SLP vectorizer supports only 1597 /// vectorization of the power-of-2 number of unique scalars. 1598 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1599 Value *IdxLaneV = getData(Idx, Lane).V; 1600 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1601 return 0; 1602 SmallPtrSet<Value *, 4> Uniques; 1603 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1604 if (Ln == Lane) 1605 continue; 1606 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1607 if (!isa<Instruction>(OpIdxLnV)) 1608 return 0; 1609 Uniques.insert(OpIdxLnV); 1610 } 1611 int UniquesCount = Uniques.size(); 1612 int UniquesCntWithIdxLaneV = 1613 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1614 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1615 int UniquesCntWithOpIdxLaneV = 1616 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1617 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1618 return 0; 1619 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1620 UniquesCntWithOpIdxLaneV) - 1621 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1622 } 1623 1624 /// \param Lane lane of the operands under analysis. 1625 /// \param OpIdx operand index in \p Lane lane we're looking the best 1626 /// candidate for. 1627 /// \param Idx operand index of the current candidate value. 1628 /// \returns The additional score for the scalar which users are all 1629 /// vectorized. 1630 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1631 Value *IdxLaneV = getData(Idx, Lane).V; 1632 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1633 // Do not care about number of uses for vector-like instructions 1634 // (extractelement/extractvalue with constant indices), they are extracts 1635 // themselves and already externally used. Vectorization of such 1636 // instructions does not add extra extractelement instruction, just may 1637 // remove it. 1638 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1639 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1640 return LookAheadHeuristics::ScoreAllUserVectorized; 1641 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1642 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1643 return 0; 1644 return R.areAllUsersVectorized(IdxLaneI) 1645 ? LookAheadHeuristics::ScoreAllUserVectorized 1646 : 0; 1647 } 1648 1649 /// Score scaling factor for fully compatible instructions but with 1650 /// different number of external uses. Allows better selection of the 1651 /// instructions with less external uses. 1652 static const int ScoreScaleFactor = 10; 1653 1654 /// \Returns the look-ahead score, which tells us how much the sub-trees 1655 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1656 /// score. This helps break ties in an informed way when we cannot decide on 1657 /// the order of the operands by just considering the immediate 1658 /// predecessors. 1659 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1660 int Lane, unsigned OpIdx, unsigned Idx, 1661 bool &IsUsed) { 1662 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(), 1663 LookAheadMaxDepth); 1664 // Keep track of the instruction stack as we recurse into the operands 1665 // during the look-ahead score exploration. 1666 int Score = 1667 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1668 /*CurrLevel=*/1, MainAltOps); 1669 if (Score) { 1670 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1671 if (Score <= -SplatScore) { 1672 // Set the minimum score for splat-like sequence to avoid setting 1673 // failed state. 1674 Score = 1; 1675 } else { 1676 Score += SplatScore; 1677 // Scale score to see the difference between different operands 1678 // and similar operands but all vectorized/not all vectorized 1679 // uses. It does not affect actual selection of the best 1680 // compatible operand in general, just allows to select the 1681 // operand with all vectorized uses. 1682 Score *= ScoreScaleFactor; 1683 Score += getExternalUseScore(Lane, OpIdx, Idx); 1684 IsUsed = true; 1685 } 1686 } 1687 return Score; 1688 } 1689 1690 /// Best defined scores per lanes between the passes. Used to choose the 1691 /// best operand (with the highest score) between the passes. 1692 /// The key - {Operand Index, Lane}. 1693 /// The value - the best score between the passes for the lane and the 1694 /// operand. 1695 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1696 BestScoresPerLanes; 1697 1698 // Search all operands in Ops[*][Lane] for the one that matches best 1699 // Ops[OpIdx][LastLane] and return its opreand index. 1700 // If no good match can be found, return std::nullopt. 1701 std::optional<unsigned> 1702 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1703 ArrayRef<ReorderingMode> ReorderingModes, 1704 ArrayRef<Value *> MainAltOps) { 1705 unsigned NumOperands = getNumOperands(); 1706 1707 // The operand of the previous lane at OpIdx. 1708 Value *OpLastLane = getData(OpIdx, LastLane).V; 1709 1710 // Our strategy mode for OpIdx. 1711 ReorderingMode RMode = ReorderingModes[OpIdx]; 1712 if (RMode == ReorderingMode::Failed) 1713 return std::nullopt; 1714 1715 // The linearized opcode of the operand at OpIdx, Lane. 1716 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1717 1718 // The best operand index and its score. 1719 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1720 // are using the score to differentiate between the two. 1721 struct BestOpData { 1722 std::optional<unsigned> Idx; 1723 unsigned Score = 0; 1724 } BestOp; 1725 BestOp.Score = 1726 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1727 .first->second; 1728 1729 // Track if the operand must be marked as used. If the operand is set to 1730 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1731 // want to reestimate the operands again on the following iterations). 1732 bool IsUsed = 1733 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1734 // Iterate through all unused operands and look for the best. 1735 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1736 // Get the operand at Idx and Lane. 1737 OperandData &OpData = getData(Idx, Lane); 1738 Value *Op = OpData.V; 1739 bool OpAPO = OpData.APO; 1740 1741 // Skip already selected operands. 1742 if (OpData.IsUsed) 1743 continue; 1744 1745 // Skip if we are trying to move the operand to a position with a 1746 // different opcode in the linearized tree form. This would break the 1747 // semantics. 1748 if (OpAPO != OpIdxAPO) 1749 continue; 1750 1751 // Look for an operand that matches the current mode. 1752 switch (RMode) { 1753 case ReorderingMode::Load: 1754 case ReorderingMode::Constant: 1755 case ReorderingMode::Opcode: { 1756 bool LeftToRight = Lane > LastLane; 1757 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1758 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1759 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1760 OpIdx, Idx, IsUsed); 1761 if (Score > static_cast<int>(BestOp.Score)) { 1762 BestOp.Idx = Idx; 1763 BestOp.Score = Score; 1764 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1765 } 1766 break; 1767 } 1768 case ReorderingMode::Splat: 1769 if (Op == OpLastLane) 1770 BestOp.Idx = Idx; 1771 break; 1772 case ReorderingMode::Failed: 1773 llvm_unreachable("Not expected Failed reordering mode."); 1774 } 1775 } 1776 1777 if (BestOp.Idx) { 1778 getData(*BestOp.Idx, Lane).IsUsed = IsUsed; 1779 return BestOp.Idx; 1780 } 1781 // If we could not find a good match return std::nullopt. 1782 return std::nullopt; 1783 } 1784 1785 /// Helper for reorderOperandVecs. 1786 /// \returns the lane that we should start reordering from. This is the one 1787 /// which has the least number of operands that can freely move about or 1788 /// less profitable because it already has the most optimal set of operands. 1789 unsigned getBestLaneToStartReordering() const { 1790 unsigned Min = UINT_MAX; 1791 unsigned SameOpNumber = 0; 1792 // std::pair<unsigned, unsigned> is used to implement a simple voting 1793 // algorithm and choose the lane with the least number of operands that 1794 // can freely move about or less profitable because it already has the 1795 // most optimal set of operands. The first unsigned is a counter for 1796 // voting, the second unsigned is the counter of lanes with instructions 1797 // with same/alternate opcodes and same parent basic block. 1798 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1799 // Try to be closer to the original results, if we have multiple lanes 1800 // with same cost. If 2 lanes have the same cost, use the one with the 1801 // lowest index. 1802 for (int I = getNumLanes(); I > 0; --I) { 1803 unsigned Lane = I - 1; 1804 OperandsOrderData NumFreeOpsHash = 1805 getMaxNumOperandsThatCanBeReordered(Lane); 1806 // Compare the number of operands that can move and choose the one with 1807 // the least number. 1808 if (NumFreeOpsHash.NumOfAPOs < Min) { 1809 Min = NumFreeOpsHash.NumOfAPOs; 1810 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1811 HashMap.clear(); 1812 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1813 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1814 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1815 // Select the most optimal lane in terms of number of operands that 1816 // should be moved around. 1817 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1818 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1819 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1820 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1821 auto *It = HashMap.find(NumFreeOpsHash.Hash); 1822 if (It == HashMap.end()) 1823 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1824 else 1825 ++It->second.first; 1826 } 1827 } 1828 // Select the lane with the minimum counter. 1829 unsigned BestLane = 0; 1830 unsigned CntMin = UINT_MAX; 1831 for (const auto &Data : reverse(HashMap)) { 1832 if (Data.second.first < CntMin) { 1833 CntMin = Data.second.first; 1834 BestLane = Data.second.second; 1835 } 1836 } 1837 return BestLane; 1838 } 1839 1840 /// Data structure that helps to reorder operands. 1841 struct OperandsOrderData { 1842 /// The best number of operands with the same APOs, which can be 1843 /// reordered. 1844 unsigned NumOfAPOs = UINT_MAX; 1845 /// Number of operands with the same/alternate instruction opcode and 1846 /// parent. 1847 unsigned NumOpsWithSameOpcodeParent = 0; 1848 /// Hash for the actual operands ordering. 1849 /// Used to count operands, actually their position id and opcode 1850 /// value. It is used in the voting mechanism to find the lane with the 1851 /// least number of operands that can freely move about or less profitable 1852 /// because it already has the most optimal set of operands. Can be 1853 /// replaced with SmallVector<unsigned> instead but hash code is faster 1854 /// and requires less memory. 1855 unsigned Hash = 0; 1856 }; 1857 /// \returns the maximum number of operands that are allowed to be reordered 1858 /// for \p Lane and the number of compatible instructions(with the same 1859 /// parent/opcode). This is used as a heuristic for selecting the first lane 1860 /// to start operand reordering. 1861 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1862 unsigned CntTrue = 0; 1863 unsigned NumOperands = getNumOperands(); 1864 // Operands with the same APO can be reordered. We therefore need to count 1865 // how many of them we have for each APO, like this: Cnt[APO] = x. 1866 // Since we only have two APOs, namely true and false, we can avoid using 1867 // a map. Instead we can simply count the number of operands that 1868 // correspond to one of them (in this case the 'true' APO), and calculate 1869 // the other by subtracting it from the total number of operands. 1870 // Operands with the same instruction opcode and parent are more 1871 // profitable since we don't need to move them in many cases, with a high 1872 // probability such lane already can be vectorized effectively. 1873 bool AllUndefs = true; 1874 unsigned NumOpsWithSameOpcodeParent = 0; 1875 Instruction *OpcodeI = nullptr; 1876 BasicBlock *Parent = nullptr; 1877 unsigned Hash = 0; 1878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1879 const OperandData &OpData = getData(OpIdx, Lane); 1880 if (OpData.APO) 1881 ++CntTrue; 1882 // Use Boyer-Moore majority voting for finding the majority opcode and 1883 // the number of times it occurs. 1884 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1885 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() || 1886 I->getParent() != Parent) { 1887 if (NumOpsWithSameOpcodeParent == 0) { 1888 NumOpsWithSameOpcodeParent = 1; 1889 OpcodeI = I; 1890 Parent = I->getParent(); 1891 } else { 1892 --NumOpsWithSameOpcodeParent; 1893 } 1894 } else { 1895 ++NumOpsWithSameOpcodeParent; 1896 } 1897 } 1898 Hash = hash_combine( 1899 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1900 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1901 } 1902 if (AllUndefs) 1903 return {}; 1904 OperandsOrderData Data; 1905 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1906 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1907 Data.Hash = Hash; 1908 return Data; 1909 } 1910 1911 /// Go through the instructions in VL and append their operands. 1912 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1913 assert(!VL.empty() && "Bad VL"); 1914 assert((empty() || VL.size() == getNumLanes()) && 1915 "Expected same number of lanes"); 1916 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1917 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1918 OpsVec.resize(NumOperands); 1919 unsigned NumLanes = VL.size(); 1920 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1921 OpsVec[OpIdx].resize(NumLanes); 1922 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1923 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1924 // Our tree has just 3 nodes: the root and two operands. 1925 // It is therefore trivial to get the APO. We only need to check the 1926 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1927 // RHS operand. The LHS operand of both add and sub is never attached 1928 // to an inversese operation in the linearized form, therefore its APO 1929 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1930 1931 // Since operand reordering is performed on groups of commutative 1932 // operations or alternating sequences (e.g., +, -), we can safely 1933 // tell the inverse operations by checking commutativity. 1934 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1935 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1936 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1937 APO, false}; 1938 } 1939 } 1940 } 1941 1942 /// \returns the number of operands. 1943 unsigned getNumOperands() const { return OpsVec.size(); } 1944 1945 /// \returns the number of lanes. 1946 unsigned getNumLanes() const { return OpsVec[0].size(); } 1947 1948 /// \returns the operand value at \p OpIdx and \p Lane. 1949 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1950 return getData(OpIdx, Lane).V; 1951 } 1952 1953 /// \returns true if the data structure is empty. 1954 bool empty() const { return OpsVec.empty(); } 1955 1956 /// Clears the data. 1957 void clear() { OpsVec.clear(); } 1958 1959 /// \Returns true if there are enough operands identical to \p Op to fill 1960 /// the whole vector. 1961 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1962 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1963 bool OpAPO = getData(OpIdx, Lane).APO; 1964 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1965 if (Ln == Lane) 1966 continue; 1967 // This is set to true if we found a candidate for broadcast at Lane. 1968 bool FoundCandidate = false; 1969 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1970 OperandData &Data = getData(OpI, Ln); 1971 if (Data.APO != OpAPO || Data.IsUsed) 1972 continue; 1973 if (Data.V == Op) { 1974 FoundCandidate = true; 1975 Data.IsUsed = true; 1976 break; 1977 } 1978 } 1979 if (!FoundCandidate) 1980 return false; 1981 } 1982 return true; 1983 } 1984 1985 public: 1986 /// Initialize with all the operands of the instruction vector \p RootVL. 1987 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI, 1988 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) 1989 : TLI(TLI), DL(DL), SE(SE), R(R) { 1990 // Append all the operands of RootVL. 1991 appendOperandsOfVL(RootVL); 1992 } 1993 1994 /// \Returns a value vector with the operands across all lanes for the 1995 /// opearnd at \p OpIdx. 1996 ValueList getVL(unsigned OpIdx) const { 1997 ValueList OpVL(OpsVec[OpIdx].size()); 1998 assert(OpsVec[OpIdx].size() == getNumLanes() && 1999 "Expected same num of lanes across all operands"); 2000 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 2001 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 2002 return OpVL; 2003 } 2004 2005 // Performs operand reordering for 2 or more operands. 2006 // The original operands are in OrigOps[OpIdx][Lane]. 2007 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 2008 void reorder() { 2009 unsigned NumOperands = getNumOperands(); 2010 unsigned NumLanes = getNumLanes(); 2011 // Each operand has its own mode. We are using this mode to help us select 2012 // the instructions for each lane, so that they match best with the ones 2013 // we have selected so far. 2014 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 2015 2016 // This is a greedy single-pass algorithm. We are going over each lane 2017 // once and deciding on the best order right away with no back-tracking. 2018 // However, in order to increase its effectiveness, we start with the lane 2019 // that has operands that can move the least. For example, given the 2020 // following lanes: 2021 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 2022 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 2023 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 2024 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 2025 // we will start at Lane 1, since the operands of the subtraction cannot 2026 // be reordered. Then we will visit the rest of the lanes in a circular 2027 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 2028 2029 // Find the first lane that we will start our search from. 2030 unsigned FirstLane = getBestLaneToStartReordering(); 2031 2032 // Initialize the modes. 2033 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2034 Value *OpLane0 = getValue(OpIdx, FirstLane); 2035 // Keep track if we have instructions with all the same opcode on one 2036 // side. 2037 if (isa<LoadInst>(OpLane0)) 2038 ReorderingModes[OpIdx] = ReorderingMode::Load; 2039 else if (isa<Instruction>(OpLane0)) { 2040 // Check if OpLane0 should be broadcast. 2041 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 2042 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2043 else 2044 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 2045 } 2046 else if (isa<Constant>(OpLane0)) 2047 ReorderingModes[OpIdx] = ReorderingMode::Constant; 2048 else if (isa<Argument>(OpLane0)) 2049 // Our best hope is a Splat. It may save some cost in some cases. 2050 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2051 else 2052 // NOTE: This should be unreachable. 2053 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2054 } 2055 2056 // Check that we don't have same operands. No need to reorder if operands 2057 // are just perfect diamond or shuffled diamond match. Do not do it only 2058 // for possible broadcasts or non-power of 2 number of scalars (just for 2059 // now). 2060 auto &&SkipReordering = [this]() { 2061 SmallPtrSet<Value *, 4> UniqueValues; 2062 ArrayRef<OperandData> Op0 = OpsVec.front(); 2063 for (const OperandData &Data : Op0) 2064 UniqueValues.insert(Data.V); 2065 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 2066 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 2067 return !UniqueValues.contains(Data.V); 2068 })) 2069 return false; 2070 } 2071 // TODO: Check if we can remove a check for non-power-2 number of 2072 // scalars after full support of non-power-2 vectorization. 2073 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 2074 }; 2075 2076 // If the initial strategy fails for any of the operand indexes, then we 2077 // perform reordering again in a second pass. This helps avoid assigning 2078 // high priority to the failed strategy, and should improve reordering for 2079 // the non-failed operand indexes. 2080 for (int Pass = 0; Pass != 2; ++Pass) { 2081 // Check if no need to reorder operands since they're are perfect or 2082 // shuffled diamond match. 2083 // Need to do it to avoid extra external use cost counting for 2084 // shuffled matches, which may cause regressions. 2085 if (SkipReordering()) 2086 break; 2087 // Skip the second pass if the first pass did not fail. 2088 bool StrategyFailed = false; 2089 // Mark all operand data as free to use. 2090 clearUsed(); 2091 // We keep the original operand order for the FirstLane, so reorder the 2092 // rest of the lanes. We are visiting the nodes in a circular fashion, 2093 // using FirstLane as the center point and increasing the radius 2094 // distance. 2095 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 2096 for (unsigned I = 0; I < NumOperands; ++I) 2097 MainAltOps[I].push_back(getData(I, FirstLane).V); 2098 2099 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 2100 // Visit the lane on the right and then the lane on the left. 2101 for (int Direction : {+1, -1}) { 2102 int Lane = FirstLane + Direction * Distance; 2103 if (Lane < 0 || Lane >= (int)NumLanes) 2104 continue; 2105 int LastLane = Lane - Direction; 2106 assert(LastLane >= 0 && LastLane < (int)NumLanes && 2107 "Out of bounds"); 2108 // Look for a good match for each operand. 2109 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2110 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 2111 std::optional<unsigned> BestIdx = getBestOperand( 2112 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 2113 // By not selecting a value, we allow the operands that follow to 2114 // select a better matching value. We will get a non-null value in 2115 // the next run of getBestOperand(). 2116 if (BestIdx) { 2117 // Swap the current operand with the one returned by 2118 // getBestOperand(). 2119 swap(OpIdx, *BestIdx, Lane); 2120 } else { 2121 // We failed to find a best operand, set mode to 'Failed'. 2122 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2123 // Enable the second pass. 2124 StrategyFailed = true; 2125 } 2126 // Try to get the alternate opcode and follow it during analysis. 2127 if (MainAltOps[OpIdx].size() != 2) { 2128 OperandData &AltOp = getData(OpIdx, Lane); 2129 InstructionsState OpS = 2130 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI); 2131 if (OpS.getOpcode() && OpS.isAltShuffle()) 2132 MainAltOps[OpIdx].push_back(AltOp.V); 2133 } 2134 } 2135 } 2136 } 2137 // Skip second pass if the strategy did not fail. 2138 if (!StrategyFailed) 2139 break; 2140 } 2141 } 2142 2143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2144 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 2145 switch (RMode) { 2146 case ReorderingMode::Load: 2147 return "Load"; 2148 case ReorderingMode::Opcode: 2149 return "Opcode"; 2150 case ReorderingMode::Constant: 2151 return "Constant"; 2152 case ReorderingMode::Splat: 2153 return "Splat"; 2154 case ReorderingMode::Failed: 2155 return "Failed"; 2156 } 2157 llvm_unreachable("Unimplemented Reordering Type"); 2158 } 2159 2160 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 2161 raw_ostream &OS) { 2162 return OS << getModeStr(RMode); 2163 } 2164 2165 /// Debug print. 2166 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 2167 printMode(RMode, dbgs()); 2168 } 2169 2170 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 2171 return printMode(RMode, OS); 2172 } 2173 2174 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 2175 const unsigned Indent = 2; 2176 unsigned Cnt = 0; 2177 for (const OperandDataVec &OpDataVec : OpsVec) { 2178 OS << "Operand " << Cnt++ << "\n"; 2179 for (const OperandData &OpData : OpDataVec) { 2180 OS.indent(Indent) << "{"; 2181 if (Value *V = OpData.V) 2182 OS << *V; 2183 else 2184 OS << "null"; 2185 OS << ", APO:" << OpData.APO << "}\n"; 2186 } 2187 OS << "\n"; 2188 } 2189 return OS; 2190 } 2191 2192 /// Debug print. 2193 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2194 #endif 2195 }; 2196 2197 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2198 /// for a pair which have highest score deemed to have best chance to form 2199 /// root of profitable tree to vectorize. Return std::nullopt if no candidate 2200 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit 2201 /// of the cost, considered to be good enough score. 2202 std::optional<int> 2203 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2204 int Limit = LookAheadHeuristics::ScoreFail) { 2205 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2, 2206 RootLookAheadMaxDepth); 2207 int BestScore = Limit; 2208 std::optional<int> Index; 2209 for (int I : seq<int>(0, Candidates.size())) { 2210 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2211 Candidates[I].second, 2212 /*U1=*/nullptr, /*U2=*/nullptr, 2213 /*Level=*/1, std::nullopt); 2214 if (Score > BestScore) { 2215 BestScore = Score; 2216 Index = I; 2217 } 2218 } 2219 return Index; 2220 } 2221 2222 /// Checks if the instruction is marked for deletion. 2223 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2224 2225 /// Removes an instruction from its block and eventually deletes it. 2226 /// It's like Instruction::eraseFromParent() except that the actual deletion 2227 /// is delayed until BoUpSLP is destructed. 2228 void eraseInstruction(Instruction *I) { 2229 DeletedInstructions.insert(I); 2230 } 2231 2232 /// Checks if the instruction was already analyzed for being possible 2233 /// reduction root. 2234 bool isAnalyzedReductionRoot(Instruction *I) const { 2235 return AnalyzedReductionsRoots.count(I); 2236 } 2237 /// Register given instruction as already analyzed for being possible 2238 /// reduction root. 2239 void analyzedReductionRoot(Instruction *I) { 2240 AnalyzedReductionsRoots.insert(I); 2241 } 2242 /// Checks if the provided list of reduced values was checked already for 2243 /// vectorization. 2244 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const { 2245 return AnalyzedReductionVals.contains(hash_value(VL)); 2246 } 2247 /// Adds the list of reduced values to list of already checked values for the 2248 /// vectorization. 2249 void analyzedReductionVals(ArrayRef<Value *> VL) { 2250 AnalyzedReductionVals.insert(hash_value(VL)); 2251 } 2252 /// Clear the list of the analyzed reduction root instructions. 2253 void clearReductionData() { 2254 AnalyzedReductionsRoots.clear(); 2255 AnalyzedReductionVals.clear(); 2256 } 2257 /// Checks if the given value is gathered in one of the nodes. 2258 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2259 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2260 } 2261 2262 /// Check if the value is vectorized in the tree. 2263 bool isVectorized(Value *V) const { return getTreeEntry(V); } 2264 2265 ~BoUpSLP(); 2266 2267 private: 2268 /// Determine if a vectorized value \p V in can be demoted to 2269 /// a smaller type with a truncation. We collect the values that will be 2270 /// demoted in ToDemote and additional roots that require investigating in 2271 /// Roots. 2272 /// \param DemotedConsts list of Instruction/OperandIndex pairs that are 2273 /// constant and to be demoted. Required to correctly identify constant nodes 2274 /// to be demoted. 2275 bool collectValuesToDemote( 2276 Value *V, SmallVectorImpl<Value *> &ToDemote, 2277 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 2278 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const; 2279 2280 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2281 /// reordering (i.e. the operands can be reordered because they have only one 2282 /// user and reordarable). 2283 /// \param ReorderableGathers List of all gather nodes that require reordering 2284 /// (e.g., gather of extractlements or partially vectorizable loads). 2285 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2286 /// reordering, subset of \p NonVectorized. 2287 bool 2288 canReorderOperands(TreeEntry *UserTE, 2289 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2290 ArrayRef<TreeEntry *> ReorderableGathers, 2291 SmallVectorImpl<TreeEntry *> &GatherOps); 2292 2293 /// Checks if the given \p TE is a gather node with clustered reused scalars 2294 /// and reorders it per given \p Mask. 2295 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const; 2296 2297 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2298 /// if any. If it is not vectorized (gather node), returns nullptr. 2299 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2300 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2301 TreeEntry *TE = nullptr; 2302 const auto *It = find_if(VL, [&](Value *V) { 2303 TE = getTreeEntry(V); 2304 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) 2305 return true; 2306 auto It = MultiNodeScalars.find(V); 2307 if (It != MultiNodeScalars.end()) { 2308 for (TreeEntry *E : It->second) { 2309 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) { 2310 TE = E; 2311 return true; 2312 } 2313 } 2314 } 2315 return false; 2316 }); 2317 if (It != VL.end()) { 2318 assert(TE->isSame(VL) && "Expected same scalars."); 2319 return TE; 2320 } 2321 return nullptr; 2322 } 2323 2324 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2325 /// if any. If it is not vectorized (gather node), returns nullptr. 2326 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2327 unsigned OpIdx) const { 2328 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2329 const_cast<TreeEntry *>(UserTE), OpIdx); 2330 } 2331 2332 /// Checks if all users of \p I are the part of the vectorization tree. 2333 bool areAllUsersVectorized( 2334 Instruction *I, 2335 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const; 2336 2337 /// Return information about the vector formed for the specified index 2338 /// of a vector of (the same) instruction. 2339 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops); 2340 2341 /// \ returns the graph entry for the \p Idx operand of the \p E entry. 2342 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const; 2343 2344 /// \returns the cost of the vectorizable entry. 2345 InstructionCost getEntryCost(const TreeEntry *E, 2346 ArrayRef<Value *> VectorizedVals, 2347 SmallPtrSetImpl<Value *> &CheckedExtracts); 2348 2349 /// This is the recursive part of buildTree. 2350 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2351 const EdgeInfo &EI); 2352 2353 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2354 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2355 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2356 /// returns false, setting \p CurrentOrder to either an empty vector or a 2357 /// non-identity permutation that allows to reuse extract instructions. 2358 /// \param ResizeAllowed indicates whether it is allowed to handle subvector 2359 /// extract order. 2360 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2361 SmallVectorImpl<unsigned> &CurrentOrder, 2362 bool ResizeAllowed = false) const; 2363 2364 /// Vectorize a single entry in the tree. 2365 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2366 /// avoid issues with def-use order. 2367 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs); 2368 2369 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry 2370 /// \p E. 2371 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2372 /// avoid issues with def-use order. 2373 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs); 2374 2375 /// Create a new vector from a list of scalar values. Produces a sequence 2376 /// which exploits values reused across lanes, and arranges the inserts 2377 /// for ease of later optimization. 2378 template <typename BVTy, typename ResTy, typename... Args> 2379 ResTy processBuildVector(const TreeEntry *E, Args &...Params); 2380 2381 /// Create a new vector from a list of scalar values. Produces a sequence 2382 /// which exploits values reused across lanes, and arranges the inserts 2383 /// for ease of later optimization. 2384 Value *createBuildVector(const TreeEntry *E); 2385 2386 /// Returns the instruction in the bundle, which can be used as a base point 2387 /// for scheduling. Usually it is the last instruction in the bundle, except 2388 /// for the case when all operands are external (in this case, it is the first 2389 /// instruction in the list). 2390 Instruction &getLastInstructionInBundle(const TreeEntry *E); 2391 2392 /// Tries to find extractelement instructions with constant indices from fixed 2393 /// vector type and gather such instructions into a bunch, which highly likely 2394 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2395 /// was successful, the matched scalars are replaced by poison values in \p VL 2396 /// for future analysis. 2397 std::optional<TargetTransformInfo::ShuffleKind> 2398 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL, 2399 SmallVectorImpl<int> &Mask) const; 2400 2401 /// Tries to find extractelement instructions with constant indices from fixed 2402 /// vector type and gather such instructions into a bunch, which highly likely 2403 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2404 /// was successful, the matched scalars are replaced by poison values in \p VL 2405 /// for future analysis. 2406 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2407 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 2408 SmallVectorImpl<int> &Mask, 2409 unsigned NumParts) const; 2410 2411 /// Checks if the gathered \p VL can be represented as a single register 2412 /// shuffle(s) of previous tree entries. 2413 /// \param TE Tree entry checked for permutation. 2414 /// \param VL List of scalars (a subset of the TE scalar), checked for 2415 /// permutations. Must form single-register vector. 2416 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2417 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask. 2418 std::optional<TargetTransformInfo::ShuffleKind> 2419 isGatherShuffledSingleRegisterEntry( 2420 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 2421 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part); 2422 2423 /// Checks if the gathered \p VL can be represented as multi-register 2424 /// shuffle(s) of previous tree entries. 2425 /// \param TE Tree entry checked for permutation. 2426 /// \param VL List of scalars (a subset of the TE scalar), checked for 2427 /// permutations. 2428 /// \returns per-register series of ShuffleKind, if gathered values can be 2429 /// represented as shuffles of previous tree entries. \p Mask is filled with 2430 /// the shuffle mask (also on per-register base). 2431 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2432 isGatherShuffledEntry( 2433 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 2434 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 2435 unsigned NumParts); 2436 2437 /// \returns the scalarization cost for this list of values. Assuming that 2438 /// this subtree gets vectorized, we may need to extract the values from the 2439 /// roots. This method calculates the cost of extracting the values. 2440 /// \param ForPoisonSrc true if initial vector is poison, false otherwise. 2441 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc) const; 2442 2443 /// Set the Builder insert point to one after the last instruction in 2444 /// the bundle 2445 void setInsertPointAfterBundle(const TreeEntry *E); 2446 2447 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not 2448 /// specified, the starting vector value is poison. 2449 Value *gather(ArrayRef<Value *> VL, Value *Root); 2450 2451 /// \returns whether the VectorizableTree is fully vectorizable and will 2452 /// be beneficial even the tree height is tiny. 2453 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2454 2455 /// Reorder commutative or alt operands to get better probability of 2456 /// generating vectorized code. 2457 static void reorderInputsAccordingToOpcode( 2458 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 2459 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 2460 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R); 2461 2462 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2463 /// users of \p TE and collects the stores. It returns the map from the store 2464 /// pointers to the collected stores. 2465 DenseMap<Value *, SmallVector<StoreInst *>> 2466 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2467 2468 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2469 /// stores in \p StoresVec can form a vector instruction. If so it returns 2470 /// true and populates \p ReorderIndices with the shuffle indices of the 2471 /// stores when compared to the sorted vector. 2472 bool canFormVector(ArrayRef<StoreInst *> StoresVec, 2473 OrdersType &ReorderIndices) const; 2474 2475 /// Iterates through the users of \p TE, looking for scalar stores that can be 2476 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2477 /// their order and builds an order index vector for each store bundle. It 2478 /// returns all these order vectors found. 2479 /// We run this after the tree has formed, otherwise we may come across user 2480 /// instructions that are not yet in the tree. 2481 SmallVector<OrdersType, 1> 2482 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2483 2484 struct TreeEntry { 2485 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2486 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2487 2488 /// \returns Common mask for reorder indices and reused scalars. 2489 SmallVector<int> getCommonMask() const { 2490 SmallVector<int> Mask; 2491 inversePermutation(ReorderIndices, Mask); 2492 ::addMask(Mask, ReuseShuffleIndices); 2493 return Mask; 2494 } 2495 2496 /// \returns true if the scalars in VL are equal to this entry. 2497 bool isSame(ArrayRef<Value *> VL) const { 2498 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2499 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2500 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2501 return VL.size() == Mask.size() && 2502 std::equal(VL.begin(), VL.end(), Mask.begin(), 2503 [Scalars](Value *V, int Idx) { 2504 return (isa<UndefValue>(V) && 2505 Idx == PoisonMaskElem) || 2506 (Idx != PoisonMaskElem && V == Scalars[Idx]); 2507 }); 2508 }; 2509 if (!ReorderIndices.empty()) { 2510 // TODO: implement matching if the nodes are just reordered, still can 2511 // treat the vector as the same if the list of scalars matches VL 2512 // directly, without reordering. 2513 SmallVector<int> Mask; 2514 inversePermutation(ReorderIndices, Mask); 2515 if (VL.size() == Scalars.size()) 2516 return IsSame(Scalars, Mask); 2517 if (VL.size() == ReuseShuffleIndices.size()) { 2518 ::addMask(Mask, ReuseShuffleIndices); 2519 return IsSame(Scalars, Mask); 2520 } 2521 return false; 2522 } 2523 return IsSame(Scalars, ReuseShuffleIndices); 2524 } 2525 2526 bool isOperandGatherNode(const EdgeInfo &UserEI) const { 2527 return State == TreeEntry::NeedToGather && 2528 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx && 2529 UserTreeIndices.front().UserTE == UserEI.UserTE; 2530 } 2531 2532 /// \returns true if current entry has same operands as \p TE. 2533 bool hasEqualOperands(const TreeEntry &TE) const { 2534 if (TE.getNumOperands() != getNumOperands()) 2535 return false; 2536 SmallBitVector Used(getNumOperands()); 2537 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2538 unsigned PrevCount = Used.count(); 2539 for (unsigned K = 0; K < E; ++K) { 2540 if (Used.test(K)) 2541 continue; 2542 if (getOperand(K) == TE.getOperand(I)) { 2543 Used.set(K); 2544 break; 2545 } 2546 } 2547 // Check if we actually found the matching operand. 2548 if (PrevCount == Used.count()) 2549 return false; 2550 } 2551 return true; 2552 } 2553 2554 /// \return Final vectorization factor for the node. Defined by the total 2555 /// number of vectorized scalars, including those, used several times in the 2556 /// entry and counted in the \a ReuseShuffleIndices, if any. 2557 unsigned getVectorFactor() const { 2558 if (!ReuseShuffleIndices.empty()) 2559 return ReuseShuffleIndices.size(); 2560 return Scalars.size(); 2561 }; 2562 2563 /// A vector of scalars. 2564 ValueList Scalars; 2565 2566 /// The Scalars are vectorized into this value. It is initialized to Null. 2567 WeakTrackingVH VectorizedValue = nullptr; 2568 2569 /// New vector phi instructions emitted for the vectorized phi nodes. 2570 PHINode *PHI = nullptr; 2571 2572 /// Do we need to gather this sequence or vectorize it 2573 /// (either with vector instruction or with scatter/gather 2574 /// intrinsics for store/load)? 2575 enum EntryState { 2576 Vectorize, 2577 ScatterVectorize, 2578 PossibleStridedVectorize, 2579 NeedToGather 2580 }; 2581 EntryState State; 2582 2583 /// Does this sequence require some shuffling? 2584 SmallVector<int, 4> ReuseShuffleIndices; 2585 2586 /// Does this entry require reordering? 2587 SmallVector<unsigned, 4> ReorderIndices; 2588 2589 /// Points back to the VectorizableTree. 2590 /// 2591 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2592 /// to be a pointer and needs to be able to initialize the child iterator. 2593 /// Thus we need a reference back to the container to translate the indices 2594 /// to entries. 2595 VecTreeTy &Container; 2596 2597 /// The TreeEntry index containing the user of this entry. We can actually 2598 /// have multiple users so the data structure is not truly a tree. 2599 SmallVector<EdgeInfo, 1> UserTreeIndices; 2600 2601 /// The index of this treeEntry in VectorizableTree. 2602 int Idx = -1; 2603 2604 private: 2605 /// The operands of each instruction in each lane Operands[op_index][lane]. 2606 /// Note: This helps avoid the replication of the code that performs the 2607 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2608 SmallVector<ValueList, 2> Operands; 2609 2610 /// The main/alternate instruction. 2611 Instruction *MainOp = nullptr; 2612 Instruction *AltOp = nullptr; 2613 2614 public: 2615 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2616 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2617 if (Operands.size() < OpIdx + 1) 2618 Operands.resize(OpIdx + 1); 2619 assert(Operands[OpIdx].empty() && "Already resized?"); 2620 assert(OpVL.size() <= Scalars.size() && 2621 "Number of operands is greater than the number of scalars."); 2622 Operands[OpIdx].resize(OpVL.size()); 2623 copy(OpVL, Operands[OpIdx].begin()); 2624 } 2625 2626 /// Set the operands of this bundle in their original order. 2627 void setOperandsInOrder() { 2628 assert(Operands.empty() && "Already initialized?"); 2629 auto *I0 = cast<Instruction>(Scalars[0]); 2630 Operands.resize(I0->getNumOperands()); 2631 unsigned NumLanes = Scalars.size(); 2632 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2633 OpIdx != NumOperands; ++OpIdx) { 2634 Operands[OpIdx].resize(NumLanes); 2635 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2636 auto *I = cast<Instruction>(Scalars[Lane]); 2637 assert(I->getNumOperands() == NumOperands && 2638 "Expected same number of operands"); 2639 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2640 } 2641 } 2642 } 2643 2644 /// Reorders operands of the node to the given mask \p Mask. 2645 void reorderOperands(ArrayRef<int> Mask) { 2646 for (ValueList &Operand : Operands) 2647 reorderScalars(Operand, Mask); 2648 } 2649 2650 /// \returns the \p OpIdx operand of this TreeEntry. 2651 ValueList &getOperand(unsigned OpIdx) { 2652 assert(OpIdx < Operands.size() && "Off bounds"); 2653 return Operands[OpIdx]; 2654 } 2655 2656 /// \returns the \p OpIdx operand of this TreeEntry. 2657 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2658 assert(OpIdx < Operands.size() && "Off bounds"); 2659 return Operands[OpIdx]; 2660 } 2661 2662 /// \returns the number of operands. 2663 unsigned getNumOperands() const { return Operands.size(); } 2664 2665 /// \return the single \p OpIdx operand. 2666 Value *getSingleOperand(unsigned OpIdx) const { 2667 assert(OpIdx < Operands.size() && "Off bounds"); 2668 assert(!Operands[OpIdx].empty() && "No operand available"); 2669 return Operands[OpIdx][0]; 2670 } 2671 2672 /// Some of the instructions in the list have alternate opcodes. 2673 bool isAltShuffle() const { return MainOp != AltOp; } 2674 2675 bool isOpcodeOrAlt(Instruction *I) const { 2676 unsigned CheckedOpcode = I->getOpcode(); 2677 return (getOpcode() == CheckedOpcode || 2678 getAltOpcode() == CheckedOpcode); 2679 } 2680 2681 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2682 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2683 /// \p OpValue. 2684 Value *isOneOf(Value *Op) const { 2685 auto *I = dyn_cast<Instruction>(Op); 2686 if (I && isOpcodeOrAlt(I)) 2687 return Op; 2688 return MainOp; 2689 } 2690 2691 void setOperations(const InstructionsState &S) { 2692 MainOp = S.MainOp; 2693 AltOp = S.AltOp; 2694 } 2695 2696 Instruction *getMainOp() const { 2697 return MainOp; 2698 } 2699 2700 Instruction *getAltOp() const { 2701 return AltOp; 2702 } 2703 2704 /// The main/alternate opcodes for the list of instructions. 2705 unsigned getOpcode() const { 2706 return MainOp ? MainOp->getOpcode() : 0; 2707 } 2708 2709 unsigned getAltOpcode() const { 2710 return AltOp ? AltOp->getOpcode() : 0; 2711 } 2712 2713 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2714 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2715 int findLaneForValue(Value *V) const { 2716 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2717 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2718 if (!ReorderIndices.empty()) 2719 FoundLane = ReorderIndices[FoundLane]; 2720 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2721 if (!ReuseShuffleIndices.empty()) { 2722 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2723 find(ReuseShuffleIndices, FoundLane)); 2724 } 2725 return FoundLane; 2726 } 2727 2728 /// Build a shuffle mask for graph entry which represents a merge of main 2729 /// and alternate operations. 2730 void 2731 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp, 2732 SmallVectorImpl<int> &Mask, 2733 SmallVectorImpl<Value *> *OpScalars = nullptr, 2734 SmallVectorImpl<Value *> *AltScalars = nullptr) const; 2735 2736 #ifndef NDEBUG 2737 /// Debug printer. 2738 LLVM_DUMP_METHOD void dump() const { 2739 dbgs() << Idx << ".\n"; 2740 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2741 dbgs() << "Operand " << OpI << ":\n"; 2742 for (const Value *V : Operands[OpI]) 2743 dbgs().indent(2) << *V << "\n"; 2744 } 2745 dbgs() << "Scalars: \n"; 2746 for (Value *V : Scalars) 2747 dbgs().indent(2) << *V << "\n"; 2748 dbgs() << "State: "; 2749 switch (State) { 2750 case Vectorize: 2751 dbgs() << "Vectorize\n"; 2752 break; 2753 case ScatterVectorize: 2754 dbgs() << "ScatterVectorize\n"; 2755 break; 2756 case PossibleStridedVectorize: 2757 dbgs() << "PossibleStridedVectorize\n"; 2758 break; 2759 case NeedToGather: 2760 dbgs() << "NeedToGather\n"; 2761 break; 2762 } 2763 dbgs() << "MainOp: "; 2764 if (MainOp) 2765 dbgs() << *MainOp << "\n"; 2766 else 2767 dbgs() << "NULL\n"; 2768 dbgs() << "AltOp: "; 2769 if (AltOp) 2770 dbgs() << *AltOp << "\n"; 2771 else 2772 dbgs() << "NULL\n"; 2773 dbgs() << "VectorizedValue: "; 2774 if (VectorizedValue) 2775 dbgs() << *VectorizedValue << "\n"; 2776 else 2777 dbgs() << "NULL\n"; 2778 dbgs() << "ReuseShuffleIndices: "; 2779 if (ReuseShuffleIndices.empty()) 2780 dbgs() << "Empty"; 2781 else 2782 for (int ReuseIdx : ReuseShuffleIndices) 2783 dbgs() << ReuseIdx << ", "; 2784 dbgs() << "\n"; 2785 dbgs() << "ReorderIndices: "; 2786 for (unsigned ReorderIdx : ReorderIndices) 2787 dbgs() << ReorderIdx << ", "; 2788 dbgs() << "\n"; 2789 dbgs() << "UserTreeIndices: "; 2790 for (const auto &EInfo : UserTreeIndices) 2791 dbgs() << EInfo << ", "; 2792 dbgs() << "\n"; 2793 } 2794 #endif 2795 }; 2796 2797 #ifndef NDEBUG 2798 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2799 InstructionCost VecCost, InstructionCost ScalarCost, 2800 StringRef Banner) const { 2801 dbgs() << "SLP: " << Banner << ":\n"; 2802 E->dump(); 2803 dbgs() << "SLP: Costs:\n"; 2804 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2805 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2806 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2807 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " 2808 << ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2809 } 2810 #endif 2811 2812 /// Create a new VectorizableTree entry. 2813 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2814 std::optional<ScheduleData *> Bundle, 2815 const InstructionsState &S, 2816 const EdgeInfo &UserTreeIdx, 2817 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2818 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2819 TreeEntry::EntryState EntryState = 2820 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2821 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2822 ReuseShuffleIndices, ReorderIndices); 2823 } 2824 2825 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2826 TreeEntry::EntryState EntryState, 2827 std::optional<ScheduleData *> Bundle, 2828 const InstructionsState &S, 2829 const EdgeInfo &UserTreeIdx, 2830 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2831 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2832 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2833 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2834 "Need to vectorize gather entry?"); 2835 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2836 TreeEntry *Last = VectorizableTree.back().get(); 2837 Last->Idx = VectorizableTree.size() - 1; 2838 Last->State = EntryState; 2839 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2840 ReuseShuffleIndices.end()); 2841 if (ReorderIndices.empty()) { 2842 Last->Scalars.assign(VL.begin(), VL.end()); 2843 Last->setOperations(S); 2844 } else { 2845 // Reorder scalars and build final mask. 2846 Last->Scalars.assign(VL.size(), nullptr); 2847 transform(ReorderIndices, Last->Scalars.begin(), 2848 [VL](unsigned Idx) -> Value * { 2849 if (Idx >= VL.size()) 2850 return UndefValue::get(VL.front()->getType()); 2851 return VL[Idx]; 2852 }); 2853 InstructionsState S = getSameOpcode(Last->Scalars, *TLI); 2854 Last->setOperations(S); 2855 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2856 } 2857 if (Last->State != TreeEntry::NeedToGather) { 2858 for (Value *V : VL) { 2859 const TreeEntry *TE = getTreeEntry(V); 2860 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) && 2861 "Scalar already in tree!"); 2862 if (TE) { 2863 if (TE != Last) 2864 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last); 2865 continue; 2866 } 2867 ScalarToTreeEntry[V] = Last; 2868 } 2869 // Update the scheduler bundle to point to this TreeEntry. 2870 ScheduleData *BundleMember = *Bundle; 2871 assert((BundleMember || isa<PHINode>(S.MainOp) || 2872 isVectorLikeInstWithConstOps(S.MainOp) || 2873 doesNotNeedToSchedule(VL)) && 2874 "Bundle and VL out of sync"); 2875 if (BundleMember) { 2876 for (Value *V : VL) { 2877 if (doesNotNeedToBeScheduled(V)) 2878 continue; 2879 if (!BundleMember) 2880 continue; 2881 BundleMember->TE = Last; 2882 BundleMember = BundleMember->NextInBundle; 2883 } 2884 } 2885 assert(!BundleMember && "Bundle and VL out of sync"); 2886 } else { 2887 MustGather.insert(VL.begin(), VL.end()); 2888 // Build a map for gathered scalars to the nodes where they are used. 2889 for (Value *V : VL) 2890 if (!isConstant(V)) 2891 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last); 2892 } 2893 2894 if (UserTreeIdx.UserTE) 2895 Last->UserTreeIndices.push_back(UserTreeIdx); 2896 2897 return Last; 2898 } 2899 2900 /// -- Vectorization State -- 2901 /// Holds all of the tree entries. 2902 TreeEntry::VecTreeTy VectorizableTree; 2903 2904 #ifndef NDEBUG 2905 /// Debug printer. 2906 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2907 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2908 VectorizableTree[Id]->dump(); 2909 dbgs() << "\n"; 2910 } 2911 } 2912 #endif 2913 2914 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2915 2916 const TreeEntry *getTreeEntry(Value *V) const { 2917 return ScalarToTreeEntry.lookup(V); 2918 } 2919 2920 /// Checks if the specified list of the instructions/values can be vectorized 2921 /// and fills required data before actual scheduling of the instructions. 2922 TreeEntry::EntryState getScalarsVectorizationState( 2923 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 2924 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const; 2925 2926 /// Maps a specific scalar to its tree entry. 2927 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry; 2928 2929 /// List of scalars, used in several vectorize nodes, and the list of the 2930 /// nodes. 2931 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars; 2932 2933 /// Maps a value to the proposed vectorizable size. 2934 SmallDenseMap<Value *, unsigned> InstrElementSize; 2935 2936 /// A list of scalars that we found that we need to keep as scalars. 2937 ValueSet MustGather; 2938 2939 /// A map between the vectorized entries and the last instructions in the 2940 /// bundles. The bundles are built in use order, not in the def order of the 2941 /// instructions. So, we cannot rely directly on the last instruction in the 2942 /// bundle being the last instruction in the program order during 2943 /// vectorization process since the basic blocks are affected, need to 2944 /// pre-gather them before. 2945 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction; 2946 2947 /// List of gather nodes, depending on other gather/vector nodes, which should 2948 /// be emitted after the vector instruction emission process to correctly 2949 /// handle order of the vector instructions and shuffles. 2950 SetVector<const TreeEntry *> PostponedGathers; 2951 2952 using ValueToGatherNodesMap = 2953 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>; 2954 ValueToGatherNodesMap ValueToGatherNodes; 2955 2956 /// This POD struct describes one external user in the vectorized tree. 2957 struct ExternalUser { 2958 ExternalUser(Value *S, llvm::User *U, int L) 2959 : Scalar(S), User(U), Lane(L) {} 2960 2961 // Which scalar in our function. 2962 Value *Scalar; 2963 2964 // Which user that uses the scalar. 2965 llvm::User *User; 2966 2967 // Which lane does the scalar belong to. 2968 int Lane; 2969 }; 2970 using UserList = SmallVector<ExternalUser, 16>; 2971 2972 /// Checks if two instructions may access the same memory. 2973 /// 2974 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2975 /// is invariant in the calling loop. 2976 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2977 Instruction *Inst2) { 2978 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2)) 2979 return true; 2980 // First check if the result is already in the cache. 2981 AliasCacheKey Key = std::make_pair(Inst1, Inst2); 2982 auto It = AliasCache.find(Key); 2983 if (It != AliasCache.end()) 2984 return It->second; 2985 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2986 // Store the result in the cache. 2987 AliasCache.try_emplace(Key, Aliased); 2988 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased); 2989 return Aliased; 2990 } 2991 2992 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2993 2994 /// Cache for alias results. 2995 /// TODO: consider moving this to the AliasAnalysis itself. 2996 DenseMap<AliasCacheKey, bool> AliasCache; 2997 2998 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2999 // globally through SLP because we don't perform any action which 3000 // invalidates capture results. 3001 BatchAAResults BatchAA; 3002 3003 /// Temporary store for deleted instructions. Instructions will be deleted 3004 /// eventually when the BoUpSLP is destructed. The deferral is required to 3005 /// ensure that there are no incorrect collisions in the AliasCache, which 3006 /// can happen if a new instruction is allocated at the same address as a 3007 /// previously deleted instruction. 3008 DenseSet<Instruction *> DeletedInstructions; 3009 3010 /// Set of the instruction, being analyzed already for reductions. 3011 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 3012 3013 /// Set of hashes for the list of reduction values already being analyzed. 3014 DenseSet<size_t> AnalyzedReductionVals; 3015 3016 /// A list of values that need to extracted out of the tree. 3017 /// This list holds pairs of (Internal Scalar : External User). External User 3018 /// can be nullptr, it means that this Internal Scalar will be used later, 3019 /// after vectorization. 3020 UserList ExternalUses; 3021 3022 /// Values used only by @llvm.assume calls. 3023 SmallPtrSet<const Value *, 32> EphValues; 3024 3025 /// Holds all of the instructions that we gathered, shuffle instructions and 3026 /// extractelements. 3027 SetVector<Instruction *> GatherShuffleExtractSeq; 3028 3029 /// A list of blocks that we are going to CSE. 3030 DenseSet<BasicBlock *> CSEBlocks; 3031 3032 /// Contains all scheduling relevant data for an instruction. 3033 /// A ScheduleData either represents a single instruction or a member of an 3034 /// instruction bundle (= a group of instructions which is combined into a 3035 /// vector instruction). 3036 struct ScheduleData { 3037 // The initial value for the dependency counters. It means that the 3038 // dependencies are not calculated yet. 3039 enum { InvalidDeps = -1 }; 3040 3041 ScheduleData() = default; 3042 3043 void init(int BlockSchedulingRegionID, Value *OpVal) { 3044 FirstInBundle = this; 3045 NextInBundle = nullptr; 3046 NextLoadStore = nullptr; 3047 IsScheduled = false; 3048 SchedulingRegionID = BlockSchedulingRegionID; 3049 clearDependencies(); 3050 OpValue = OpVal; 3051 TE = nullptr; 3052 } 3053 3054 /// Verify basic self consistency properties 3055 void verify() { 3056 if (hasValidDependencies()) { 3057 assert(UnscheduledDeps <= Dependencies && "invariant"); 3058 } else { 3059 assert(UnscheduledDeps == Dependencies && "invariant"); 3060 } 3061 3062 if (IsScheduled) { 3063 assert(isSchedulingEntity() && 3064 "unexpected scheduled state"); 3065 for (const ScheduleData *BundleMember = this; BundleMember; 3066 BundleMember = BundleMember->NextInBundle) { 3067 assert(BundleMember->hasValidDependencies() && 3068 BundleMember->UnscheduledDeps == 0 && 3069 "unexpected scheduled state"); 3070 assert((BundleMember == this || !BundleMember->IsScheduled) && 3071 "only bundle is marked scheduled"); 3072 } 3073 } 3074 3075 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 3076 "all bundle members must be in same basic block"); 3077 } 3078 3079 /// Returns true if the dependency information has been calculated. 3080 /// Note that depenendency validity can vary between instructions within 3081 /// a single bundle. 3082 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 3083 3084 /// Returns true for single instructions and for bundle representatives 3085 /// (= the head of a bundle). 3086 bool isSchedulingEntity() const { return FirstInBundle == this; } 3087 3088 /// Returns true if it represents an instruction bundle and not only a 3089 /// single instruction. 3090 bool isPartOfBundle() const { 3091 return NextInBundle != nullptr || FirstInBundle != this || TE; 3092 } 3093 3094 /// Returns true if it is ready for scheduling, i.e. it has no more 3095 /// unscheduled depending instructions/bundles. 3096 bool isReady() const { 3097 assert(isSchedulingEntity() && 3098 "can't consider non-scheduling entity for ready list"); 3099 return unscheduledDepsInBundle() == 0 && !IsScheduled; 3100 } 3101 3102 /// Modifies the number of unscheduled dependencies for this instruction, 3103 /// and returns the number of remaining dependencies for the containing 3104 /// bundle. 3105 int incrementUnscheduledDeps(int Incr) { 3106 assert(hasValidDependencies() && 3107 "increment of unscheduled deps would be meaningless"); 3108 UnscheduledDeps += Incr; 3109 return FirstInBundle->unscheduledDepsInBundle(); 3110 } 3111 3112 /// Sets the number of unscheduled dependencies to the number of 3113 /// dependencies. 3114 void resetUnscheduledDeps() { 3115 UnscheduledDeps = Dependencies; 3116 } 3117 3118 /// Clears all dependency information. 3119 void clearDependencies() { 3120 Dependencies = InvalidDeps; 3121 resetUnscheduledDeps(); 3122 MemoryDependencies.clear(); 3123 ControlDependencies.clear(); 3124 } 3125 3126 int unscheduledDepsInBundle() const { 3127 assert(isSchedulingEntity() && "only meaningful on the bundle"); 3128 int Sum = 0; 3129 for (const ScheduleData *BundleMember = this; BundleMember; 3130 BundleMember = BundleMember->NextInBundle) { 3131 if (BundleMember->UnscheduledDeps == InvalidDeps) 3132 return InvalidDeps; 3133 Sum += BundleMember->UnscheduledDeps; 3134 } 3135 return Sum; 3136 } 3137 3138 void dump(raw_ostream &os) const { 3139 if (!isSchedulingEntity()) { 3140 os << "/ " << *Inst; 3141 } else if (NextInBundle) { 3142 os << '[' << *Inst; 3143 ScheduleData *SD = NextInBundle; 3144 while (SD) { 3145 os << ';' << *SD->Inst; 3146 SD = SD->NextInBundle; 3147 } 3148 os << ']'; 3149 } else { 3150 os << *Inst; 3151 } 3152 } 3153 3154 Instruction *Inst = nullptr; 3155 3156 /// Opcode of the current instruction in the schedule data. 3157 Value *OpValue = nullptr; 3158 3159 /// The TreeEntry that this instruction corresponds to. 3160 TreeEntry *TE = nullptr; 3161 3162 /// Points to the head in an instruction bundle (and always to this for 3163 /// single instructions). 3164 ScheduleData *FirstInBundle = nullptr; 3165 3166 /// Single linked list of all instructions in a bundle. Null if it is a 3167 /// single instruction. 3168 ScheduleData *NextInBundle = nullptr; 3169 3170 /// Single linked list of all memory instructions (e.g. load, store, call) 3171 /// in the block - until the end of the scheduling region. 3172 ScheduleData *NextLoadStore = nullptr; 3173 3174 /// The dependent memory instructions. 3175 /// This list is derived on demand in calculateDependencies(). 3176 SmallVector<ScheduleData *, 4> MemoryDependencies; 3177 3178 /// List of instructions which this instruction could be control dependent 3179 /// on. Allowing such nodes to be scheduled below this one could introduce 3180 /// a runtime fault which didn't exist in the original program. 3181 /// ex: this is a load or udiv following a readonly call which inf loops 3182 SmallVector<ScheduleData *, 4> ControlDependencies; 3183 3184 /// This ScheduleData is in the current scheduling region if this matches 3185 /// the current SchedulingRegionID of BlockScheduling. 3186 int SchedulingRegionID = 0; 3187 3188 /// Used for getting a "good" final ordering of instructions. 3189 int SchedulingPriority = 0; 3190 3191 /// The number of dependencies. Constitutes of the number of users of the 3192 /// instruction plus the number of dependent memory instructions (if any). 3193 /// This value is calculated on demand. 3194 /// If InvalidDeps, the number of dependencies is not calculated yet. 3195 int Dependencies = InvalidDeps; 3196 3197 /// The number of dependencies minus the number of dependencies of scheduled 3198 /// instructions. As soon as this is zero, the instruction/bundle gets ready 3199 /// for scheduling. 3200 /// Note that this is negative as long as Dependencies is not calculated. 3201 int UnscheduledDeps = InvalidDeps; 3202 3203 /// True if this instruction is scheduled (or considered as scheduled in the 3204 /// dry-run). 3205 bool IsScheduled = false; 3206 }; 3207 3208 #ifndef NDEBUG 3209 friend inline raw_ostream &operator<<(raw_ostream &os, 3210 const BoUpSLP::ScheduleData &SD) { 3211 SD.dump(os); 3212 return os; 3213 } 3214 #endif 3215 3216 friend struct GraphTraits<BoUpSLP *>; 3217 friend struct DOTGraphTraits<BoUpSLP *>; 3218 3219 /// Contains all scheduling data for a basic block. 3220 /// It does not schedules instructions, which are not memory read/write 3221 /// instructions and their operands are either constants, or arguments, or 3222 /// phis, or instructions from others blocks, or their users are phis or from 3223 /// the other blocks. The resulting vector instructions can be placed at the 3224 /// beginning of the basic block without scheduling (if operands does not need 3225 /// to be scheduled) or at the end of the block (if users are outside of the 3226 /// block). It allows to save some compile time and memory used by the 3227 /// compiler. 3228 /// ScheduleData is assigned for each instruction in between the boundaries of 3229 /// the tree entry, even for those, which are not part of the graph. It is 3230 /// required to correctly follow the dependencies between the instructions and 3231 /// their correct scheduling. The ScheduleData is not allocated for the 3232 /// instructions, which do not require scheduling, like phis, nodes with 3233 /// extractelements/insertelements only or nodes with instructions, with 3234 /// uses/operands outside of the block. 3235 struct BlockScheduling { 3236 BlockScheduling(BasicBlock *BB) 3237 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 3238 3239 void clear() { 3240 ReadyInsts.clear(); 3241 ScheduleStart = nullptr; 3242 ScheduleEnd = nullptr; 3243 FirstLoadStoreInRegion = nullptr; 3244 LastLoadStoreInRegion = nullptr; 3245 RegionHasStackSave = false; 3246 3247 // Reduce the maximum schedule region size by the size of the 3248 // previous scheduling run. 3249 ScheduleRegionSizeLimit -= ScheduleRegionSize; 3250 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 3251 ScheduleRegionSizeLimit = MinScheduleRegionSize; 3252 ScheduleRegionSize = 0; 3253 3254 // Make a new scheduling region, i.e. all existing ScheduleData is not 3255 // in the new region yet. 3256 ++SchedulingRegionID; 3257 } 3258 3259 ScheduleData *getScheduleData(Instruction *I) { 3260 if (BB != I->getParent()) 3261 // Avoid lookup if can't possibly be in map. 3262 return nullptr; 3263 ScheduleData *SD = ScheduleDataMap.lookup(I); 3264 if (SD && isInSchedulingRegion(SD)) 3265 return SD; 3266 return nullptr; 3267 } 3268 3269 ScheduleData *getScheduleData(Value *V) { 3270 if (auto *I = dyn_cast<Instruction>(V)) 3271 return getScheduleData(I); 3272 return nullptr; 3273 } 3274 3275 ScheduleData *getScheduleData(Value *V, Value *Key) { 3276 if (V == Key) 3277 return getScheduleData(V); 3278 auto I = ExtraScheduleDataMap.find(V); 3279 if (I != ExtraScheduleDataMap.end()) { 3280 ScheduleData *SD = I->second.lookup(Key); 3281 if (SD && isInSchedulingRegion(SD)) 3282 return SD; 3283 } 3284 return nullptr; 3285 } 3286 3287 bool isInSchedulingRegion(ScheduleData *SD) const { 3288 return SD->SchedulingRegionID == SchedulingRegionID; 3289 } 3290 3291 /// Marks an instruction as scheduled and puts all dependent ready 3292 /// instructions into the ready-list. 3293 template <typename ReadyListType> 3294 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 3295 SD->IsScheduled = true; 3296 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 3297 3298 for (ScheduleData *BundleMember = SD; BundleMember; 3299 BundleMember = BundleMember->NextInBundle) { 3300 if (BundleMember->Inst != BundleMember->OpValue) 3301 continue; 3302 3303 // Handle the def-use chain dependencies. 3304 3305 // Decrement the unscheduled counter and insert to ready list if ready. 3306 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 3307 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 3308 if (OpDef && OpDef->hasValidDependencies() && 3309 OpDef->incrementUnscheduledDeps(-1) == 0) { 3310 // There are no more unscheduled dependencies after 3311 // decrementing, so we can put the dependent instruction 3312 // into the ready list. 3313 ScheduleData *DepBundle = OpDef->FirstInBundle; 3314 assert(!DepBundle->IsScheduled && 3315 "already scheduled bundle gets ready"); 3316 ReadyList.insert(DepBundle); 3317 LLVM_DEBUG(dbgs() 3318 << "SLP: gets ready (def): " << *DepBundle << "\n"); 3319 } 3320 }); 3321 }; 3322 3323 // If BundleMember is a vector bundle, its operands may have been 3324 // reordered during buildTree(). We therefore need to get its operands 3325 // through the TreeEntry. 3326 if (TreeEntry *TE = BundleMember->TE) { 3327 // Need to search for the lane since the tree entry can be reordered. 3328 int Lane = std::distance(TE->Scalars.begin(), 3329 find(TE->Scalars, BundleMember->Inst)); 3330 assert(Lane >= 0 && "Lane not set"); 3331 3332 // Since vectorization tree is being built recursively this assertion 3333 // ensures that the tree entry has all operands set before reaching 3334 // this code. Couple of exceptions known at the moment are extracts 3335 // where their second (immediate) operand is not added. Since 3336 // immediates do not affect scheduler behavior this is considered 3337 // okay. 3338 auto *In = BundleMember->Inst; 3339 assert(In && 3340 (isa<ExtractValueInst, ExtractElementInst>(In) || 3341 In->getNumOperands() == TE->getNumOperands()) && 3342 "Missed TreeEntry operands?"); 3343 (void)In; // fake use to avoid build failure when assertions disabled 3344 3345 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3346 OpIdx != NumOperands; ++OpIdx) 3347 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3348 DecrUnsched(I); 3349 } else { 3350 // If BundleMember is a stand-alone instruction, no operand reordering 3351 // has taken place, so we directly access its operands. 3352 for (Use &U : BundleMember->Inst->operands()) 3353 if (auto *I = dyn_cast<Instruction>(U.get())) 3354 DecrUnsched(I); 3355 } 3356 // Handle the memory dependencies. 3357 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3358 if (MemoryDepSD->hasValidDependencies() && 3359 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3360 // There are no more unscheduled dependencies after decrementing, 3361 // so we can put the dependent instruction into the ready list. 3362 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3363 assert(!DepBundle->IsScheduled && 3364 "already scheduled bundle gets ready"); 3365 ReadyList.insert(DepBundle); 3366 LLVM_DEBUG(dbgs() 3367 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3368 } 3369 } 3370 // Handle the control dependencies. 3371 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3372 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3373 // There are no more unscheduled dependencies after decrementing, 3374 // so we can put the dependent instruction into the ready list. 3375 ScheduleData *DepBundle = DepSD->FirstInBundle; 3376 assert(!DepBundle->IsScheduled && 3377 "already scheduled bundle gets ready"); 3378 ReadyList.insert(DepBundle); 3379 LLVM_DEBUG(dbgs() 3380 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// Verify basic self consistency properties of the data structure. 3387 void verify() { 3388 if (!ScheduleStart) 3389 return; 3390 3391 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3392 ScheduleStart->comesBefore(ScheduleEnd) && 3393 "Not a valid scheduling region?"); 3394 3395 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3396 auto *SD = getScheduleData(I); 3397 if (!SD) 3398 continue; 3399 assert(isInSchedulingRegion(SD) && 3400 "primary schedule data not in window?"); 3401 assert(isInSchedulingRegion(SD->FirstInBundle) && 3402 "entire bundle in window!"); 3403 (void)SD; 3404 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3405 } 3406 3407 for (auto *SD : ReadyInsts) { 3408 assert(SD->isSchedulingEntity() && SD->isReady() && 3409 "item in ready list not ready?"); 3410 (void)SD; 3411 } 3412 } 3413 3414 void doForAllOpcodes(Value *V, 3415 function_ref<void(ScheduleData *SD)> Action) { 3416 if (ScheduleData *SD = getScheduleData(V)) 3417 Action(SD); 3418 auto I = ExtraScheduleDataMap.find(V); 3419 if (I != ExtraScheduleDataMap.end()) 3420 for (auto &P : I->second) 3421 if (isInSchedulingRegion(P.second)) 3422 Action(P.second); 3423 } 3424 3425 /// Put all instructions into the ReadyList which are ready for scheduling. 3426 template <typename ReadyListType> 3427 void initialFillReadyList(ReadyListType &ReadyList) { 3428 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3429 doForAllOpcodes(I, [&](ScheduleData *SD) { 3430 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3431 SD->isReady()) { 3432 ReadyList.insert(SD); 3433 LLVM_DEBUG(dbgs() 3434 << "SLP: initially in ready list: " << *SD << "\n"); 3435 } 3436 }); 3437 } 3438 } 3439 3440 /// Build a bundle from the ScheduleData nodes corresponding to the 3441 /// scalar instruction for each lane. 3442 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3443 3444 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3445 /// cyclic dependencies. This is only a dry-run, no instructions are 3446 /// actually moved at this stage. 3447 /// \returns the scheduling bundle. The returned Optional value is not 3448 /// std::nullopt if \p VL is allowed to be scheduled. 3449 std::optional<ScheduleData *> 3450 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3451 const InstructionsState &S); 3452 3453 /// Un-bundles a group of instructions. 3454 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3455 3456 /// Allocates schedule data chunk. 3457 ScheduleData *allocateScheduleDataChunks(); 3458 3459 /// Extends the scheduling region so that V is inside the region. 3460 /// \returns true if the region size is within the limit. 3461 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3462 3463 /// Initialize the ScheduleData structures for new instructions in the 3464 /// scheduling region. 3465 void initScheduleData(Instruction *FromI, Instruction *ToI, 3466 ScheduleData *PrevLoadStore, 3467 ScheduleData *NextLoadStore); 3468 3469 /// Updates the dependency information of a bundle and of all instructions/ 3470 /// bundles which depend on the original bundle. 3471 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3472 BoUpSLP *SLP); 3473 3474 /// Sets all instruction in the scheduling region to un-scheduled. 3475 void resetSchedule(); 3476 3477 BasicBlock *BB; 3478 3479 /// Simple memory allocation for ScheduleData. 3480 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3481 3482 /// The size of a ScheduleData array in ScheduleDataChunks. 3483 int ChunkSize; 3484 3485 /// The allocator position in the current chunk, which is the last entry 3486 /// of ScheduleDataChunks. 3487 int ChunkPos; 3488 3489 /// Attaches ScheduleData to Instruction. 3490 /// Note that the mapping survives during all vectorization iterations, i.e. 3491 /// ScheduleData structures are recycled. 3492 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3493 3494 /// Attaches ScheduleData to Instruction with the leading key. 3495 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3496 ExtraScheduleDataMap; 3497 3498 /// The ready-list for scheduling (only used for the dry-run). 3499 SetVector<ScheduleData *> ReadyInsts; 3500 3501 /// The first instruction of the scheduling region. 3502 Instruction *ScheduleStart = nullptr; 3503 3504 /// The first instruction _after_ the scheduling region. 3505 Instruction *ScheduleEnd = nullptr; 3506 3507 /// The first memory accessing instruction in the scheduling region 3508 /// (can be null). 3509 ScheduleData *FirstLoadStoreInRegion = nullptr; 3510 3511 /// The last memory accessing instruction in the scheduling region 3512 /// (can be null). 3513 ScheduleData *LastLoadStoreInRegion = nullptr; 3514 3515 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3516 /// region? Used to optimize the dependence calculation for the 3517 /// common case where there isn't. 3518 bool RegionHasStackSave = false; 3519 3520 /// The current size of the scheduling region. 3521 int ScheduleRegionSize = 0; 3522 3523 /// The maximum size allowed for the scheduling region. 3524 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3525 3526 /// The ID of the scheduling region. For a new vectorization iteration this 3527 /// is incremented which "removes" all ScheduleData from the region. 3528 /// Make sure that the initial SchedulingRegionID is greater than the 3529 /// initial SchedulingRegionID in ScheduleData (which is 0). 3530 int SchedulingRegionID = 1; 3531 }; 3532 3533 /// Attaches the BlockScheduling structures to basic blocks. 3534 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3535 3536 /// Performs the "real" scheduling. Done before vectorization is actually 3537 /// performed in a basic block. 3538 void scheduleBlock(BlockScheduling *BS); 3539 3540 /// List of users to ignore during scheduling and that don't need extracting. 3541 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3542 3543 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3544 /// sorted SmallVectors of unsigned. 3545 struct OrdersTypeDenseMapInfo { 3546 static OrdersType getEmptyKey() { 3547 OrdersType V; 3548 V.push_back(~1U); 3549 return V; 3550 } 3551 3552 static OrdersType getTombstoneKey() { 3553 OrdersType V; 3554 V.push_back(~2U); 3555 return V; 3556 } 3557 3558 static unsigned getHashValue(const OrdersType &V) { 3559 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3560 } 3561 3562 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3563 return LHS == RHS; 3564 } 3565 }; 3566 3567 // Analysis and block reference. 3568 Function *F; 3569 ScalarEvolution *SE; 3570 TargetTransformInfo *TTI; 3571 TargetLibraryInfo *TLI; 3572 LoopInfo *LI; 3573 DominatorTree *DT; 3574 AssumptionCache *AC; 3575 DemandedBits *DB; 3576 const DataLayout *DL; 3577 OptimizationRemarkEmitter *ORE; 3578 3579 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3580 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3581 3582 /// Instruction builder to construct the vectorized tree. 3583 IRBuilder<> Builder; 3584 3585 /// A map of scalar integer values to the smallest bit width with which they 3586 /// can legally be represented. The values map to (width, signed) pairs, 3587 /// where "width" indicates the minimum bit width and "signed" is True if the 3588 /// value must be signed-extended, rather than zero-extended, back to its 3589 /// original width. 3590 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs; 3591 }; 3592 3593 } // end namespace slpvectorizer 3594 3595 template <> struct GraphTraits<BoUpSLP *> { 3596 using TreeEntry = BoUpSLP::TreeEntry; 3597 3598 /// NodeRef has to be a pointer per the GraphWriter. 3599 using NodeRef = TreeEntry *; 3600 3601 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3602 3603 /// Add the VectorizableTree to the index iterator to be able to return 3604 /// TreeEntry pointers. 3605 struct ChildIteratorType 3606 : public iterator_adaptor_base< 3607 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3608 ContainerTy &VectorizableTree; 3609 3610 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3611 ContainerTy &VT) 3612 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3613 3614 NodeRef operator*() { return I->UserTE; } 3615 }; 3616 3617 static NodeRef getEntryNode(BoUpSLP &R) { 3618 return R.VectorizableTree[0].get(); 3619 } 3620 3621 static ChildIteratorType child_begin(NodeRef N) { 3622 return {N->UserTreeIndices.begin(), N->Container}; 3623 } 3624 3625 static ChildIteratorType child_end(NodeRef N) { 3626 return {N->UserTreeIndices.end(), N->Container}; 3627 } 3628 3629 /// For the node iterator we just need to turn the TreeEntry iterator into a 3630 /// TreeEntry* iterator so that it dereferences to NodeRef. 3631 class nodes_iterator { 3632 using ItTy = ContainerTy::iterator; 3633 ItTy It; 3634 3635 public: 3636 nodes_iterator(const ItTy &It2) : It(It2) {} 3637 NodeRef operator*() { return It->get(); } 3638 nodes_iterator operator++() { 3639 ++It; 3640 return *this; 3641 } 3642 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3643 }; 3644 3645 static nodes_iterator nodes_begin(BoUpSLP *R) { 3646 return nodes_iterator(R->VectorizableTree.begin()); 3647 } 3648 3649 static nodes_iterator nodes_end(BoUpSLP *R) { 3650 return nodes_iterator(R->VectorizableTree.end()); 3651 } 3652 3653 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3654 }; 3655 3656 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3657 using TreeEntry = BoUpSLP::TreeEntry; 3658 3659 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 3660 3661 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3662 std::string Str; 3663 raw_string_ostream OS(Str); 3664 OS << Entry->Idx << ".\n"; 3665 if (isSplat(Entry->Scalars)) 3666 OS << "<splat> "; 3667 for (auto *V : Entry->Scalars) { 3668 OS << *V; 3669 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3670 return EU.Scalar == V; 3671 })) 3672 OS << " <extract>"; 3673 OS << "\n"; 3674 } 3675 return Str; 3676 } 3677 3678 static std::string getNodeAttributes(const TreeEntry *Entry, 3679 const BoUpSLP *) { 3680 if (Entry->State == TreeEntry::NeedToGather) 3681 return "color=red"; 3682 if (Entry->State == TreeEntry::ScatterVectorize || 3683 Entry->State == TreeEntry::PossibleStridedVectorize) 3684 return "color=blue"; 3685 return ""; 3686 } 3687 }; 3688 3689 } // end namespace llvm 3690 3691 BoUpSLP::~BoUpSLP() { 3692 SmallVector<WeakTrackingVH> DeadInsts; 3693 for (auto *I : DeletedInstructions) { 3694 for (Use &U : I->operands()) { 3695 auto *Op = dyn_cast<Instruction>(U.get()); 3696 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3697 wouldInstructionBeTriviallyDead(Op, TLI)) 3698 DeadInsts.emplace_back(Op); 3699 } 3700 I->dropAllReferences(); 3701 } 3702 for (auto *I : DeletedInstructions) { 3703 assert(I->use_empty() && 3704 "trying to erase instruction with users."); 3705 I->eraseFromParent(); 3706 } 3707 3708 // Cleanup any dead scalar code feeding the vectorized instructions 3709 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3710 3711 #ifdef EXPENSIVE_CHECKS 3712 // If we could guarantee that this call is not extremely slow, we could 3713 // remove the ifdef limitation (see PR47712). 3714 assert(!verifyFunction(*F, &dbgs())); 3715 #endif 3716 } 3717 3718 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3719 /// contains original mask for the scalars reused in the node. Procedure 3720 /// transform this mask in accordance with the given \p Mask. 3721 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3722 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3723 "Expected non-empty mask."); 3724 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3725 Prev.swap(Reuses); 3726 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3727 if (Mask[I] != PoisonMaskElem) 3728 Reuses[Mask[I]] = Prev[I]; 3729 } 3730 3731 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3732 /// the original order of the scalars. Procedure transforms the provided order 3733 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3734 /// identity order, \p Order is cleared. 3735 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3736 assert(!Mask.empty() && "Expected non-empty mask."); 3737 SmallVector<int> MaskOrder; 3738 if (Order.empty()) { 3739 MaskOrder.resize(Mask.size()); 3740 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3741 } else { 3742 inversePermutation(Order, MaskOrder); 3743 } 3744 reorderReuses(MaskOrder, Mask); 3745 if (ShuffleVectorInst::isIdentityMask(MaskOrder, MaskOrder.size())) { 3746 Order.clear(); 3747 return; 3748 } 3749 Order.assign(Mask.size(), Mask.size()); 3750 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3751 if (MaskOrder[I] != PoisonMaskElem) 3752 Order[MaskOrder[I]] = I; 3753 fixupOrderingIndices(Order); 3754 } 3755 3756 std::optional<BoUpSLP::OrdersType> 3757 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3758 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3759 unsigned NumScalars = TE.Scalars.size(); 3760 OrdersType CurrentOrder(NumScalars, NumScalars); 3761 SmallVector<int> Positions; 3762 SmallBitVector UsedPositions(NumScalars); 3763 DenseMap<const TreeEntry *, unsigned> UsedEntries; 3764 DenseMap<Value *, std::pair<const TreeEntry *, unsigned>> ValueToEntryPos; 3765 for (Value *V : TE.Scalars) { 3766 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3767 continue; 3768 const auto *LocalSTE = getTreeEntry(V); 3769 if (!LocalSTE) 3770 continue; 3771 unsigned Lane = 3772 std::distance(LocalSTE->Scalars.begin(), find(LocalSTE->Scalars, V)); 3773 if (Lane >= NumScalars) 3774 continue; 3775 ++UsedEntries.try_emplace(LocalSTE, 0).first->getSecond(); 3776 ValueToEntryPos.try_emplace(V, LocalSTE, Lane); 3777 } 3778 if (UsedEntries.empty()) 3779 return std::nullopt; 3780 const TreeEntry &BestSTE = 3781 *std::max_element(UsedEntries.begin(), UsedEntries.end(), 3782 [](const std::pair<const TreeEntry *, unsigned> &P1, 3783 const std::pair<const TreeEntry *, unsigned> &P2) { 3784 return P1.second < P2.second; 3785 }) 3786 ->first; 3787 UsedEntries.erase(&BestSTE); 3788 const TreeEntry *SecondBestSTE = nullptr; 3789 if (!UsedEntries.empty()) 3790 SecondBestSTE = 3791 std::max_element(UsedEntries.begin(), UsedEntries.end(), 3792 [](const std::pair<const TreeEntry *, unsigned> &P1, 3793 const std::pair<const TreeEntry *, unsigned> &P2) { 3794 return P1.second < P2.second; 3795 }) 3796 ->first; 3797 // Try to find all gathered scalars that are gets vectorized in other 3798 // vectorize node. Here we can have only one single tree vector node to 3799 // correctly identify order of the gathered scalars. 3800 for (unsigned I = 0; I < NumScalars; ++I) { 3801 Value *V = TE.Scalars[I]; 3802 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3803 continue; 3804 const auto [LocalSTE, Lane] = ValueToEntryPos.lookup(V); 3805 if (!LocalSTE || (LocalSTE != &BestSTE && LocalSTE != SecondBestSTE)) 3806 continue; 3807 if (CurrentOrder[Lane] != NumScalars) { 3808 if ((CurrentOrder[Lane] >= BestSTE.Scalars.size() || 3809 BestSTE.Scalars[CurrentOrder[Lane]] == V) && 3810 (Lane != I || LocalSTE == SecondBestSTE)) 3811 continue; 3812 UsedPositions.reset(CurrentOrder[Lane]); 3813 } 3814 // The partial identity (where only some elements of the gather node are 3815 // in the identity order) is good. 3816 CurrentOrder[Lane] = I; 3817 UsedPositions.set(I); 3818 } 3819 // Need to keep the order if we have a vector entry and at least 2 scalars or 3820 // the vectorized entry has just 2 scalars. 3821 if (BestSTE.Scalars.size() != 2 && UsedPositions.count() <= 1) 3822 return std::nullopt; 3823 auto IsIdentityOrder = [&](ArrayRef<unsigned> CurrentOrder) { 3824 for (unsigned I = 0; I < NumScalars; ++I) 3825 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3826 return false; 3827 return true; 3828 }; 3829 if (IsIdentityOrder(CurrentOrder)) 3830 return OrdersType(); 3831 auto *It = CurrentOrder.begin(); 3832 for (unsigned I = 0; I < NumScalars;) { 3833 if (UsedPositions.test(I)) { 3834 ++I; 3835 continue; 3836 } 3837 if (*It == NumScalars) { 3838 *It = I; 3839 ++I; 3840 } 3841 ++It; 3842 } 3843 return std::move(CurrentOrder); 3844 } 3845 3846 namespace { 3847 /// Tracks the state we can represent the loads in the given sequence. 3848 enum class LoadsState { 3849 Gather, 3850 Vectorize, 3851 ScatterVectorize, 3852 PossibleStridedVectorize 3853 }; 3854 } // anonymous namespace 3855 3856 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2, 3857 const TargetLibraryInfo &TLI, 3858 bool CompareOpcodes = true) { 3859 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2)) 3860 return false; 3861 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 3862 if (!GEP1) 3863 return false; 3864 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 3865 if (!GEP2) 3866 return false; 3867 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 && 3868 ((isConstant(GEP1->getOperand(1)) && 3869 isConstant(GEP2->getOperand(1))) || 3870 !CompareOpcodes || 3871 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI) 3872 .getOpcode()); 3873 } 3874 3875 /// Checks if the given array of loads can be represented as a vectorized, 3876 /// scatter or just simple gather. 3877 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3878 const TargetTransformInfo &TTI, 3879 const DataLayout &DL, ScalarEvolution &SE, 3880 LoopInfo &LI, const TargetLibraryInfo &TLI, 3881 SmallVectorImpl<unsigned> &Order, 3882 SmallVectorImpl<Value *> &PointerOps) { 3883 // Check that a vectorized load would load the same memory as a scalar 3884 // load. For example, we don't want to vectorize loads that are smaller 3885 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3886 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3887 // from such a struct, we read/write packed bits disagreeing with the 3888 // unvectorized version. 3889 Type *ScalarTy = VL0->getType(); 3890 3891 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3892 return LoadsState::Gather; 3893 3894 // Make sure all loads in the bundle are simple - we can't vectorize 3895 // atomic or volatile loads. 3896 PointerOps.clear(); 3897 PointerOps.resize(VL.size()); 3898 auto *POIter = PointerOps.begin(); 3899 for (Value *V : VL) { 3900 auto *L = cast<LoadInst>(V); 3901 if (!L->isSimple()) 3902 return LoadsState::Gather; 3903 *POIter = L->getPointerOperand(); 3904 ++POIter; 3905 } 3906 3907 Order.clear(); 3908 // Check the order of pointer operands or that all pointers are the same. 3909 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order); 3910 if (IsSorted || all_of(PointerOps, [&](Value *P) { 3911 return arePointersCompatible(P, PointerOps.front(), TLI); 3912 })) { 3913 bool IsPossibleStrided = false; 3914 if (IsSorted) { 3915 Value *Ptr0; 3916 Value *PtrN; 3917 if (Order.empty()) { 3918 Ptr0 = PointerOps.front(); 3919 PtrN = PointerOps.back(); 3920 } else { 3921 Ptr0 = PointerOps[Order.front()]; 3922 PtrN = PointerOps[Order.back()]; 3923 } 3924 std::optional<int> Diff = 3925 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3926 // Check that the sorted loads are consecutive. 3927 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3928 return LoadsState::Vectorize; 3929 // Simple check if not a strided access - clear order. 3930 IsPossibleStrided = *Diff % (VL.size() - 1) == 0; 3931 } 3932 // TODO: need to improve analysis of the pointers, if not all of them are 3933 // GEPs or have > 2 operands, we end up with a gather node, which just 3934 // increases the cost. 3935 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent()); 3936 bool ProfitableGatherPointers = 3937 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) { 3938 return L && L->isLoopInvariant(V); 3939 })) <= VL.size() / 2 && VL.size() > 2; 3940 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) { 3941 auto *GEP = dyn_cast<GetElementPtrInst>(P); 3942 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) || 3943 (GEP && GEP->getNumOperands() == 2); 3944 })) { 3945 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3946 for (Value *V : VL) 3947 CommonAlignment = 3948 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3949 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3950 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) && 3951 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) 3952 return IsPossibleStrided ? LoadsState::PossibleStridedVectorize 3953 : LoadsState::ScatterVectorize; 3954 } 3955 } 3956 3957 return LoadsState::Gather; 3958 } 3959 3960 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3961 const DataLayout &DL, ScalarEvolution &SE, 3962 SmallVectorImpl<unsigned> &SortedIndices) { 3963 assert(llvm::all_of( 3964 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3965 "Expected list of pointer operands."); 3966 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3967 // Ptr into, sort and return the sorted indices with values next to one 3968 // another. 3969 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3970 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3971 3972 unsigned Cnt = 1; 3973 for (Value *Ptr : VL.drop_front()) { 3974 bool Found = any_of(Bases, [&](auto &Base) { 3975 std::optional<int> Diff = 3976 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3977 /*StrictCheck=*/true); 3978 if (!Diff) 3979 return false; 3980 3981 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3982 return true; 3983 }); 3984 3985 if (!Found) { 3986 // If we haven't found enough to usefully cluster, return early. 3987 if (Bases.size() > VL.size() / 2 - 1) 3988 return false; 3989 3990 // Not found already - add a new Base 3991 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3992 } 3993 } 3994 3995 // For each of the bases sort the pointers by Offset and check if any of the 3996 // base become consecutively allocated. 3997 bool AnyConsecutive = false; 3998 for (auto &Base : Bases) { 3999 auto &Vec = Base.second; 4000 if (Vec.size() > 1) { 4001 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 4002 const std::tuple<Value *, int, unsigned> &Y) { 4003 return std::get<1>(X) < std::get<1>(Y); 4004 }); 4005 int InitialOffset = std::get<1>(Vec[0]); 4006 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](const auto &P) { 4007 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 4008 }); 4009 } 4010 } 4011 4012 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 4013 SortedIndices.clear(); 4014 if (!AnyConsecutive) 4015 return false; 4016 4017 for (auto &Base : Bases) { 4018 for (auto &T : Base.second) 4019 SortedIndices.push_back(std::get<2>(T)); 4020 } 4021 4022 assert(SortedIndices.size() == VL.size() && 4023 "Expected SortedIndices to be the size of VL"); 4024 return true; 4025 } 4026 4027 std::optional<BoUpSLP::OrdersType> 4028 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 4029 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 4030 Type *ScalarTy = TE.Scalars[0]->getType(); 4031 4032 SmallVector<Value *> Ptrs; 4033 Ptrs.reserve(TE.Scalars.size()); 4034 for (Value *V : TE.Scalars) { 4035 auto *L = dyn_cast<LoadInst>(V); 4036 if (!L || !L->isSimple()) 4037 return std::nullopt; 4038 Ptrs.push_back(L->getPointerOperand()); 4039 } 4040 4041 BoUpSLP::OrdersType Order; 4042 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 4043 return std::move(Order); 4044 return std::nullopt; 4045 } 4046 4047 /// Check if two insertelement instructions are from the same buildvector. 4048 static bool areTwoInsertFromSameBuildVector( 4049 InsertElementInst *VU, InsertElementInst *V, 4050 function_ref<Value *(InsertElementInst *)> GetBaseOperand) { 4051 // Instructions must be from the same basic blocks. 4052 if (VU->getParent() != V->getParent()) 4053 return false; 4054 // Checks if 2 insertelements are from the same buildvector. 4055 if (VU->getType() != V->getType()) 4056 return false; 4057 // Multiple used inserts are separate nodes. 4058 if (!VU->hasOneUse() && !V->hasOneUse()) 4059 return false; 4060 auto *IE1 = VU; 4061 auto *IE2 = V; 4062 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4063 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4064 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4065 return false; 4066 // Go through the vector operand of insertelement instructions trying to find 4067 // either VU as the original vector for IE2 or V as the original vector for 4068 // IE1. 4069 SmallBitVector ReusedIdx( 4070 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue()); 4071 bool IsReusedIdx = false; 4072 do { 4073 if (IE2 == VU && !IE1) 4074 return VU->hasOneUse(); 4075 if (IE1 == V && !IE2) 4076 return V->hasOneUse(); 4077 if (IE1 && IE1 != V) { 4078 unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2); 4079 IsReusedIdx |= ReusedIdx.test(Idx1); 4080 ReusedIdx.set(Idx1); 4081 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx) 4082 IE1 = nullptr; 4083 else 4084 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1)); 4085 } 4086 if (IE2 && IE2 != VU) { 4087 unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1); 4088 IsReusedIdx |= ReusedIdx.test(Idx2); 4089 ReusedIdx.set(Idx2); 4090 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx) 4091 IE2 = nullptr; 4092 else 4093 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2)); 4094 } 4095 } while (!IsReusedIdx && (IE1 || IE2)); 4096 return false; 4097 } 4098 4099 std::optional<BoUpSLP::OrdersType> 4100 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { 4101 // No need to reorder if need to shuffle reuses, still need to shuffle the 4102 // node. 4103 if (!TE.ReuseShuffleIndices.empty()) { 4104 // Check if reuse shuffle indices can be improved by reordering. 4105 // For this, check that reuse mask is "clustered", i.e. each scalar values 4106 // is used once in each submask of size <number_of_scalars>. 4107 // Example: 4 scalar values. 4108 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered. 4109 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because 4110 // element 3 is used twice in the second submask. 4111 unsigned Sz = TE.Scalars.size(); 4112 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4113 Sz)) 4114 return std::nullopt; 4115 unsigned VF = TE.getVectorFactor(); 4116 // Try build correct order for extractelement instructions. 4117 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(), 4118 TE.ReuseShuffleIndices.end()); 4119 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() && 4120 all_of(TE.Scalars, [Sz](Value *V) { 4121 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V)); 4122 return Idx && *Idx < Sz; 4123 })) { 4124 SmallVector<int> ReorderMask(Sz, PoisonMaskElem); 4125 if (TE.ReorderIndices.empty()) 4126 std::iota(ReorderMask.begin(), ReorderMask.end(), 0); 4127 else 4128 inversePermutation(TE.ReorderIndices, ReorderMask); 4129 for (unsigned I = 0; I < VF; ++I) { 4130 int &Idx = ReusedMask[I]; 4131 if (Idx == PoisonMaskElem) 4132 continue; 4133 Value *V = TE.Scalars[ReorderMask[Idx]]; 4134 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V)); 4135 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI)); 4136 } 4137 } 4138 // Build the order of the VF size, need to reorder reuses shuffles, they are 4139 // always of VF size. 4140 OrdersType ResOrder(VF); 4141 std::iota(ResOrder.begin(), ResOrder.end(), 0); 4142 auto *It = ResOrder.begin(); 4143 for (unsigned K = 0; K < VF; K += Sz) { 4144 OrdersType CurrentOrder(TE.ReorderIndices); 4145 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)}; 4146 if (SubMask.front() == PoisonMaskElem) 4147 std::iota(SubMask.begin(), SubMask.end(), 0); 4148 reorderOrder(CurrentOrder, SubMask); 4149 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; }); 4150 std::advance(It, Sz); 4151 } 4152 if (all_of(enumerate(ResOrder), 4153 [](const auto &Data) { return Data.index() == Data.value(); })) 4154 return std::nullopt; // No need to reorder. 4155 return std::move(ResOrder); 4156 } 4157 if ((TE.State == TreeEntry::Vectorize || 4158 TE.State == TreeEntry::PossibleStridedVectorize) && 4159 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 4160 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 4161 !TE.isAltShuffle()) 4162 return TE.ReorderIndices; 4163 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) { 4164 auto PHICompare = [&](unsigned I1, unsigned I2) { 4165 Value *V1 = TE.Scalars[I1]; 4166 Value *V2 = TE.Scalars[I2]; 4167 if (V1 == V2) 4168 return false; 4169 if (!V1->hasOneUse() || !V2->hasOneUse()) 4170 return false; 4171 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin()); 4172 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin()); 4173 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1)) 4174 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) { 4175 if (!areTwoInsertFromSameBuildVector( 4176 IE1, IE2, 4177 [](InsertElementInst *II) { return II->getOperand(0); })) 4178 return false; 4179 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4180 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4181 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4182 return false; 4183 return *Idx1 < *Idx2; 4184 } 4185 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1)) 4186 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) { 4187 if (EE1->getOperand(0) != EE2->getOperand(0)) 4188 return false; 4189 std::optional<unsigned> Idx1 = getExtractIndex(EE1); 4190 std::optional<unsigned> Idx2 = getExtractIndex(EE2); 4191 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4192 return false; 4193 return *Idx1 < *Idx2; 4194 } 4195 return false; 4196 }; 4197 auto IsIdentityOrder = [](const OrdersType &Order) { 4198 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4199 if (Idx != Order[Idx]) 4200 return false; 4201 return true; 4202 }; 4203 if (!TE.ReorderIndices.empty()) 4204 return TE.ReorderIndices; 4205 DenseMap<unsigned, unsigned> PhiToId; 4206 SmallVector<unsigned> Phis(TE.Scalars.size()); 4207 std::iota(Phis.begin(), Phis.end(), 0); 4208 OrdersType ResOrder(TE.Scalars.size()); 4209 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) 4210 PhiToId[Id] = Id; 4211 stable_sort(Phis, PHICompare); 4212 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id) 4213 ResOrder[Id] = PhiToId[Phis[Id]]; 4214 if (IsIdentityOrder(ResOrder)) 4215 return std::nullopt; // No need to reorder. 4216 return std::move(ResOrder); 4217 } 4218 if (TE.State == TreeEntry::NeedToGather) { 4219 // TODO: add analysis of other gather nodes with extractelement 4220 // instructions and other values/instructions, not only undefs. 4221 if (((TE.getOpcode() == Instruction::ExtractElement && 4222 !TE.isAltShuffle()) || 4223 (all_of(TE.Scalars, 4224 [](Value *V) { 4225 return isa<UndefValue, ExtractElementInst>(V); 4226 }) && 4227 any_of(TE.Scalars, 4228 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 4229 all_of(TE.Scalars, 4230 [](Value *V) { 4231 auto *EE = dyn_cast<ExtractElementInst>(V); 4232 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 4233 }) && 4234 allSameType(TE.Scalars)) { 4235 // Check that gather of extractelements can be represented as 4236 // just a shuffle of a single vector. 4237 OrdersType CurrentOrder; 4238 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder, 4239 /*ResizeAllowed=*/true); 4240 if (Reuse || !CurrentOrder.empty()) { 4241 if (!CurrentOrder.empty()) 4242 fixupOrderingIndices(CurrentOrder); 4243 return std::move(CurrentOrder); 4244 } 4245 } 4246 // If the gather node is <undef, v, .., poison> and 4247 // insertelement poison, v, 0 [+ permute] 4248 // is cheaper than 4249 // insertelement poison, v, n - try to reorder. 4250 // If rotating the whole graph, exclude the permute cost, the whole graph 4251 // might be transformed. 4252 int Sz = TE.Scalars.size(); 4253 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) && 4254 count_if(TE.Scalars, UndefValue::classof) == Sz - 1) { 4255 const auto *It = 4256 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); }); 4257 if (It == TE.Scalars.begin()) 4258 return OrdersType(); 4259 auto *Ty = FixedVectorType::get(TE.Scalars.front()->getType(), Sz); 4260 if (It != TE.Scalars.end()) { 4261 OrdersType Order(Sz, Sz); 4262 unsigned Idx = std::distance(TE.Scalars.begin(), It); 4263 Order[Idx] = 0; 4264 fixupOrderingIndices(Order); 4265 SmallVector<int> Mask; 4266 inversePermutation(Order, Mask); 4267 InstructionCost PermuteCost = 4268 TopToBottom 4269 ? 0 4270 : TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, Mask); 4271 InstructionCost InsertFirstCost = TTI->getVectorInstrCost( 4272 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0, 4273 PoisonValue::get(Ty), *It); 4274 InstructionCost InsertIdxCost = TTI->getVectorInstrCost( 4275 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx, 4276 PoisonValue::get(Ty), *It); 4277 if (InsertFirstCost + PermuteCost < InsertIdxCost) 4278 return std::move(Order); 4279 } 4280 } 4281 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 4282 return CurrentOrder; 4283 if (TE.Scalars.size() >= 4) 4284 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 4285 return Order; 4286 } 4287 return std::nullopt; 4288 } 4289 4290 /// Checks if the given mask is a "clustered" mask with the same clusters of 4291 /// size \p Sz, which are not identity submasks. 4292 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask, 4293 unsigned Sz) { 4294 ArrayRef<int> FirstCluster = Mask.slice(0, Sz); 4295 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz)) 4296 return false; 4297 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) { 4298 ArrayRef<int> Cluster = Mask.slice(I, Sz); 4299 if (Cluster != FirstCluster) 4300 return false; 4301 } 4302 return true; 4303 } 4304 4305 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const { 4306 // Reorder reuses mask. 4307 reorderReuses(TE.ReuseShuffleIndices, Mask); 4308 const unsigned Sz = TE.Scalars.size(); 4309 // For vectorized and non-clustered reused no need to do anything else. 4310 if (TE.State != TreeEntry::NeedToGather || 4311 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4312 Sz) || 4313 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) 4314 return; 4315 SmallVector<int> NewMask; 4316 inversePermutation(TE.ReorderIndices, NewMask); 4317 addMask(NewMask, TE.ReuseShuffleIndices); 4318 // Clear reorder since it is going to be applied to the new mask. 4319 TE.ReorderIndices.clear(); 4320 // Try to improve gathered nodes with clustered reuses, if possible. 4321 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz); 4322 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end()); 4323 inversePermutation(NewOrder, NewMask); 4324 reorderScalars(TE.Scalars, NewMask); 4325 // Fill the reuses mask with the identity submasks. 4326 for (auto *It = TE.ReuseShuffleIndices.begin(), 4327 *End = TE.ReuseShuffleIndices.end(); 4328 It != End; std::advance(It, Sz)) 4329 std::iota(It, std::next(It, Sz), 0); 4330 } 4331 4332 void BoUpSLP::reorderTopToBottom() { 4333 // Maps VF to the graph nodes. 4334 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 4335 // ExtractElement gather nodes which can be vectorized and need to handle 4336 // their ordering. 4337 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4338 4339 // Phi nodes can have preferred ordering based on their result users 4340 DenseMap<const TreeEntry *, OrdersType> PhisToOrders; 4341 4342 // AltShuffles can also have a preferred ordering that leads to fewer 4343 // instructions, e.g., the addsub instruction in x86. 4344 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders; 4345 4346 // Maps a TreeEntry to the reorder indices of external users. 4347 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 4348 ExternalUserReorderMap; 4349 // FIXME: Workaround for syntax error reported by MSVC buildbots. 4350 TargetTransformInfo &TTIRef = *TTI; 4351 // Find all reorderable nodes with the given VF. 4352 // Currently the are vectorized stores,loads,extracts + some gathering of 4353 // extracts. 4354 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries, 4355 &GathersToOrders, &ExternalUserReorderMap, 4356 &AltShufflesToOrders, &PhisToOrders]( 4357 const std::unique_ptr<TreeEntry> &TE) { 4358 // Look for external users that will probably be vectorized. 4359 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 4360 findExternalStoreUsersReorderIndices(TE.get()); 4361 if (!ExternalUserReorderIndices.empty()) { 4362 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4363 ExternalUserReorderMap.try_emplace(TE.get(), 4364 std::move(ExternalUserReorderIndices)); 4365 } 4366 4367 // Patterns like [fadd,fsub] can be combined into a single instruction in 4368 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need 4369 // to take into account their order when looking for the most used order. 4370 if (TE->isAltShuffle()) { 4371 VectorType *VecTy = 4372 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size()); 4373 unsigned Opcode0 = TE->getOpcode(); 4374 unsigned Opcode1 = TE->getAltOpcode(); 4375 // The opcode mask selects between the two opcodes. 4376 SmallBitVector OpcodeMask(TE->Scalars.size(), false); 4377 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) 4378 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1) 4379 OpcodeMask.set(Lane); 4380 // If this pattern is supported by the target then we consider the order. 4381 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 4382 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4383 AltShufflesToOrders.try_emplace(TE.get(), OrdersType()); 4384 } 4385 // TODO: Check the reverse order too. 4386 } 4387 4388 if (std::optional<OrdersType> CurrentOrder = 4389 getReorderingData(*TE, /*TopToBottom=*/true)) { 4390 // Do not include ordering for nodes used in the alt opcode vectorization, 4391 // better to reorder them during bottom-to-top stage. If follow the order 4392 // here, it causes reordering of the whole graph though actually it is 4393 // profitable just to reorder the subgraph that starts from the alternate 4394 // opcode vectorization node. Such nodes already end-up with the shuffle 4395 // instruction and it is just enough to change this shuffle rather than 4396 // rotate the scalars for the whole graph. 4397 unsigned Cnt = 0; 4398 const TreeEntry *UserTE = TE.get(); 4399 while (UserTE && Cnt < RecursionMaxDepth) { 4400 if (UserTE->UserTreeIndices.size() != 1) 4401 break; 4402 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 4403 return EI.UserTE->State == TreeEntry::Vectorize && 4404 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 4405 })) 4406 return; 4407 UserTE = UserTE->UserTreeIndices.back().UserTE; 4408 ++Cnt; 4409 } 4410 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4411 if (!(TE->State == TreeEntry::Vectorize || 4412 TE->State == TreeEntry::PossibleStridedVectorize) || 4413 !TE->ReuseShuffleIndices.empty()) 4414 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4415 if (TE->State == TreeEntry::Vectorize && 4416 TE->getOpcode() == Instruction::PHI) 4417 PhisToOrders.try_emplace(TE.get(), *CurrentOrder); 4418 } 4419 }); 4420 4421 // Reorder the graph nodes according to their vectorization factor. 4422 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1; 4423 VF /= 2) { 4424 auto It = VFToOrderedEntries.find(VF); 4425 if (It == VFToOrderedEntries.end()) 4426 continue; 4427 // Try to find the most profitable order. We just are looking for the most 4428 // used order and reorder scalar elements in the nodes according to this 4429 // mostly used order. 4430 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 4431 // All operands are reordered and used only in this node - propagate the 4432 // most used order to the user node. 4433 MapVector<OrdersType, unsigned, 4434 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4435 OrdersUses; 4436 // Last chance orders - scatter vectorize. Try to use their orders if no 4437 // other orders or the order is counted already. 4438 SmallVector<OrdersType> StridedVectorizeOrders; 4439 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4440 for (const TreeEntry *OpTE : OrderedEntries) { 4441 // No need to reorder this nodes, still need to extend and to use shuffle, 4442 // just need to merge reordering shuffle and the reuse shuffle. 4443 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4444 continue; 4445 // Count number of orders uses. 4446 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders, 4447 &PhisToOrders]() -> const OrdersType & { 4448 if (OpTE->State == TreeEntry::NeedToGather || 4449 !OpTE->ReuseShuffleIndices.empty()) { 4450 auto It = GathersToOrders.find(OpTE); 4451 if (It != GathersToOrders.end()) 4452 return It->second; 4453 } 4454 if (OpTE->isAltShuffle()) { 4455 auto It = AltShufflesToOrders.find(OpTE); 4456 if (It != AltShufflesToOrders.end()) 4457 return It->second; 4458 } 4459 if (OpTE->State == TreeEntry::Vectorize && 4460 OpTE->getOpcode() == Instruction::PHI) { 4461 auto It = PhisToOrders.find(OpTE); 4462 if (It != PhisToOrders.end()) 4463 return It->second; 4464 } 4465 return OpTE->ReorderIndices; 4466 }(); 4467 // First consider the order of the external scalar users. 4468 auto It = ExternalUserReorderMap.find(OpTE); 4469 if (It != ExternalUserReorderMap.end()) { 4470 const auto &ExternalUserReorderIndices = It->second; 4471 // If the OpTE vector factor != number of scalars - use natural order, 4472 // it is an attempt to reorder node with reused scalars but with 4473 // external uses. 4474 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) { 4475 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 4476 ExternalUserReorderIndices.size(); 4477 } else { 4478 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 4479 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 4480 } 4481 // No other useful reorder data in this entry. 4482 if (Order.empty()) 4483 continue; 4484 } 4485 // Postpone scatter orders. 4486 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4487 StridedVectorizeOrders.push_back(Order); 4488 continue; 4489 } 4490 // Stores actually store the mask, not the order, need to invert. 4491 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4492 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4493 SmallVector<int> Mask; 4494 inversePermutation(Order, Mask); 4495 unsigned E = Order.size(); 4496 OrdersType CurrentOrder(E, E); 4497 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4498 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4499 }); 4500 fixupOrderingIndices(CurrentOrder); 4501 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 4502 } else { 4503 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4504 } 4505 } 4506 // Set order of the user node. 4507 if (OrdersUses.empty()) { 4508 if (StridedVectorizeOrders.empty()) 4509 continue; 4510 // Add (potentially!) strided vectorize orders. 4511 for (OrdersType &Order : StridedVectorizeOrders) 4512 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4513 } else { 4514 // Account (potentially!) strided vectorize orders only if it was used 4515 // already. 4516 for (OrdersType &Order : StridedVectorizeOrders) { 4517 auto *It = OrdersUses.find(Order); 4518 if (It != OrdersUses.end()) 4519 ++It->second; 4520 } 4521 } 4522 // Choose the most used order. 4523 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4524 unsigned Cnt = OrdersUses.front().second; 4525 for (const auto &Pair : drop_begin(OrdersUses)) { 4526 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4527 BestOrder = Pair.first; 4528 Cnt = Pair.second; 4529 } 4530 } 4531 // Set order of the user node. 4532 if (BestOrder.empty()) 4533 continue; 4534 SmallVector<int> Mask; 4535 inversePermutation(BestOrder, Mask); 4536 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4537 unsigned E = BestOrder.size(); 4538 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4539 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4540 }); 4541 // Do an actual reordering, if profitable. 4542 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4543 // Just do the reordering for the nodes with the given VF. 4544 if (TE->Scalars.size() != VF) { 4545 if (TE->ReuseShuffleIndices.size() == VF) { 4546 // Need to reorder the reuses masks of the operands with smaller VF to 4547 // be able to find the match between the graph nodes and scalar 4548 // operands of the given node during vectorization/cost estimation. 4549 assert(all_of(TE->UserTreeIndices, 4550 [VF, &TE](const EdgeInfo &EI) { 4551 return EI.UserTE->Scalars.size() == VF || 4552 EI.UserTE->Scalars.size() == 4553 TE->Scalars.size(); 4554 }) && 4555 "All users must be of VF size."); 4556 // Update ordering of the operands with the smaller VF than the given 4557 // one. 4558 reorderNodeWithReuses(*TE, Mask); 4559 } 4560 continue; 4561 } 4562 if ((TE->State == TreeEntry::Vectorize || 4563 TE->State == TreeEntry::PossibleStridedVectorize) && 4564 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 4565 InsertElementInst>(TE->getMainOp()) && 4566 !TE->isAltShuffle()) { 4567 // Build correct orders for extract{element,value}, loads and 4568 // stores. 4569 reorderOrder(TE->ReorderIndices, Mask); 4570 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 4571 TE->reorderOperands(Mask); 4572 } else { 4573 // Reorder the node and its operands. 4574 TE->reorderOperands(Mask); 4575 assert(TE->ReorderIndices.empty() && 4576 "Expected empty reorder sequence."); 4577 reorderScalars(TE->Scalars, Mask); 4578 } 4579 if (!TE->ReuseShuffleIndices.empty()) { 4580 // Apply reversed order to keep the original ordering of the reused 4581 // elements to avoid extra reorder indices shuffling. 4582 OrdersType CurrentOrder; 4583 reorderOrder(CurrentOrder, MaskOrder); 4584 SmallVector<int> NewReuses; 4585 inversePermutation(CurrentOrder, NewReuses); 4586 addMask(NewReuses, TE->ReuseShuffleIndices); 4587 TE->ReuseShuffleIndices.swap(NewReuses); 4588 } 4589 } 4590 } 4591 } 4592 4593 bool BoUpSLP::canReorderOperands( 4594 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 4595 ArrayRef<TreeEntry *> ReorderableGathers, 4596 SmallVectorImpl<TreeEntry *> &GatherOps) { 4597 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 4598 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 4599 return OpData.first == I && 4600 OpData.second->State == TreeEntry::Vectorize; 4601 })) 4602 continue; 4603 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 4604 // FIXME: Do not reorder (possible!) strided vectorized nodes, they 4605 // require reordering of the operands, which is not implemented yet. 4606 if (TE->State == TreeEntry::PossibleStridedVectorize) 4607 return false; 4608 // Do not reorder if operand node is used by many user nodes. 4609 if (any_of(TE->UserTreeIndices, 4610 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 4611 return false; 4612 // Add the node to the list of the ordered nodes with the identity 4613 // order. 4614 Edges.emplace_back(I, TE); 4615 // Add ScatterVectorize nodes to the list of operands, where just 4616 // reordering of the scalars is required. Similar to the gathers, so 4617 // simply add to the list of gathered ops. 4618 // If there are reused scalars, process this node as a regular vectorize 4619 // node, just reorder reuses mask. 4620 if (TE->State != TreeEntry::Vectorize && 4621 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) 4622 GatherOps.push_back(TE); 4623 continue; 4624 } 4625 TreeEntry *Gather = nullptr; 4626 if (count_if(ReorderableGathers, 4627 [&Gather, UserTE, I](TreeEntry *TE) { 4628 assert(TE->State != TreeEntry::Vectorize && 4629 "Only non-vectorized nodes are expected."); 4630 if (any_of(TE->UserTreeIndices, 4631 [UserTE, I](const EdgeInfo &EI) { 4632 return EI.UserTE == UserTE && EI.EdgeIdx == I; 4633 })) { 4634 assert(TE->isSame(UserTE->getOperand(I)) && 4635 "Operand entry does not match operands."); 4636 Gather = TE; 4637 return true; 4638 } 4639 return false; 4640 }) > 1 && 4641 !allConstant(UserTE->getOperand(I))) 4642 return false; 4643 if (Gather) 4644 GatherOps.push_back(Gather); 4645 } 4646 return true; 4647 } 4648 4649 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 4650 SetVector<TreeEntry *> OrderedEntries; 4651 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4652 // Find all reorderable leaf nodes with the given VF. 4653 // Currently the are vectorized loads,extracts without alternate operands + 4654 // some gathering of extracts. 4655 SmallVector<TreeEntry *> NonVectorized; 4656 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4657 if (TE->State != TreeEntry::Vectorize && 4658 TE->State != TreeEntry::PossibleStridedVectorize) 4659 NonVectorized.push_back(TE.get()); 4660 if (std::optional<OrdersType> CurrentOrder = 4661 getReorderingData(*TE, /*TopToBottom=*/false)) { 4662 OrderedEntries.insert(TE.get()); 4663 if (!(TE->State == TreeEntry::Vectorize || 4664 TE->State == TreeEntry::PossibleStridedVectorize) || 4665 !TE->ReuseShuffleIndices.empty()) 4666 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4667 } 4668 } 4669 4670 // 1. Propagate order to the graph nodes, which use only reordered nodes. 4671 // I.e., if the node has operands, that are reordered, try to make at least 4672 // one operand order in the natural order and reorder others + reorder the 4673 // user node itself. 4674 SmallPtrSet<const TreeEntry *, 4> Visited; 4675 while (!OrderedEntries.empty()) { 4676 // 1. Filter out only reordered nodes. 4677 // 2. If the entry has multiple uses - skip it and jump to the next node. 4678 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 4679 SmallVector<TreeEntry *> Filtered; 4680 for (TreeEntry *TE : OrderedEntries) { 4681 if (!(TE->State == TreeEntry::Vectorize || 4682 TE->State == TreeEntry::PossibleStridedVectorize || 4683 (TE->State == TreeEntry::NeedToGather && 4684 GathersToOrders.count(TE))) || 4685 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4686 !all_of(drop_begin(TE->UserTreeIndices), 4687 [TE](const EdgeInfo &EI) { 4688 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 4689 }) || 4690 !Visited.insert(TE).second) { 4691 Filtered.push_back(TE); 4692 continue; 4693 } 4694 // Build a map between user nodes and their operands order to speedup 4695 // search. The graph currently does not provide this dependency directly. 4696 for (EdgeInfo &EI : TE->UserTreeIndices) { 4697 TreeEntry *UserTE = EI.UserTE; 4698 auto It = Users.find(UserTE); 4699 if (It == Users.end()) 4700 It = Users.insert({UserTE, {}}).first; 4701 It->second.emplace_back(EI.EdgeIdx, TE); 4702 } 4703 } 4704 // Erase filtered entries. 4705 for (TreeEntry *TE : Filtered) 4706 OrderedEntries.remove(TE); 4707 SmallVector< 4708 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>> 4709 UsersVec(Users.begin(), Users.end()); 4710 sort(UsersVec, [](const auto &Data1, const auto &Data2) { 4711 return Data1.first->Idx > Data2.first->Idx; 4712 }); 4713 for (auto &Data : UsersVec) { 4714 // Check that operands are used only in the User node. 4715 SmallVector<TreeEntry *> GatherOps; 4716 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 4717 GatherOps)) { 4718 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4719 OrderedEntries.remove(Op.second); 4720 continue; 4721 } 4722 // All operands are reordered and used only in this node - propagate the 4723 // most used order to the user node. 4724 MapVector<OrdersType, unsigned, 4725 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4726 OrdersUses; 4727 // Last chance orders - scatter vectorize. Try to use their orders if no 4728 // other orders or the order is counted already. 4729 SmallVector<std::pair<OrdersType, unsigned>> StridedVectorizeOrders; 4730 // Do the analysis for each tree entry only once, otherwise the order of 4731 // the same node my be considered several times, though might be not 4732 // profitable. 4733 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4734 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 4735 for (const auto &Op : Data.second) { 4736 TreeEntry *OpTE = Op.second; 4737 if (!VisitedOps.insert(OpTE).second) 4738 continue; 4739 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4740 continue; 4741 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 4742 if (OpTE->State == TreeEntry::NeedToGather || 4743 !OpTE->ReuseShuffleIndices.empty()) 4744 return GathersToOrders.find(OpTE)->second; 4745 return OpTE->ReorderIndices; 4746 }(); 4747 unsigned NumOps = count_if( 4748 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 4749 return P.second == OpTE; 4750 }); 4751 // Postpone scatter orders. 4752 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4753 StridedVectorizeOrders.emplace_back(Order, NumOps); 4754 continue; 4755 } 4756 // Stores actually store the mask, not the order, need to invert. 4757 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4758 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4759 SmallVector<int> Mask; 4760 inversePermutation(Order, Mask); 4761 unsigned E = Order.size(); 4762 OrdersType CurrentOrder(E, E); 4763 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4764 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4765 }); 4766 fixupOrderingIndices(CurrentOrder); 4767 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 4768 NumOps; 4769 } else { 4770 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 4771 } 4772 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 4773 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 4774 const TreeEntry *TE) { 4775 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4776 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 4777 (IgnoreReorder && TE->Idx == 0)) 4778 return true; 4779 if (TE->State == TreeEntry::NeedToGather) { 4780 auto It = GathersToOrders.find(TE); 4781 if (It != GathersToOrders.end()) 4782 return !It->second.empty(); 4783 return true; 4784 } 4785 return false; 4786 }; 4787 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 4788 TreeEntry *UserTE = EI.UserTE; 4789 if (!VisitedUsers.insert(UserTE).second) 4790 continue; 4791 // May reorder user node if it requires reordering, has reused 4792 // scalars, is an alternate op vectorize node or its op nodes require 4793 // reordering. 4794 if (AllowsReordering(UserTE)) 4795 continue; 4796 // Check if users allow reordering. 4797 // Currently look up just 1 level of operands to avoid increase of 4798 // the compile time. 4799 // Profitable to reorder if definitely more operands allow 4800 // reordering rather than those with natural order. 4801 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 4802 if (static_cast<unsigned>(count_if( 4803 Ops, [UserTE, &AllowsReordering]( 4804 const std::pair<unsigned, TreeEntry *> &Op) { 4805 return AllowsReordering(Op.second) && 4806 all_of(Op.second->UserTreeIndices, 4807 [UserTE](const EdgeInfo &EI) { 4808 return EI.UserTE == UserTE; 4809 }); 4810 })) <= Ops.size() / 2) 4811 ++Res.first->second; 4812 } 4813 } 4814 // If no orders - skip current nodes and jump to the next one, if any. 4815 if (OrdersUses.empty()) { 4816 if (StridedVectorizeOrders.empty() || 4817 (Data.first->ReorderIndices.empty() && 4818 Data.first->ReuseShuffleIndices.empty() && 4819 !(IgnoreReorder && 4820 Data.first == VectorizableTree.front().get()))) { 4821 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4822 OrderedEntries.remove(Op.second); 4823 continue; 4824 } 4825 // Add (potentially!) strided vectorize orders. 4826 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) 4827 OrdersUses.insert(std::make_pair(Pair.first, 0)).first->second += 4828 Pair.second; 4829 } else { 4830 // Account (potentially!) strided vectorize orders only if it was used 4831 // already. 4832 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) { 4833 auto *It = OrdersUses.find(Pair.first); 4834 if (It != OrdersUses.end()) 4835 It->second += Pair.second; 4836 } 4837 } 4838 // Choose the best order. 4839 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4840 unsigned Cnt = OrdersUses.front().second; 4841 for (const auto &Pair : drop_begin(OrdersUses)) { 4842 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4843 BestOrder = Pair.first; 4844 Cnt = Pair.second; 4845 } 4846 } 4847 // Set order of the user node (reordering of operands and user nodes). 4848 if (BestOrder.empty()) { 4849 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4850 OrderedEntries.remove(Op.second); 4851 continue; 4852 } 4853 // Erase operands from OrderedEntries list and adjust their orders. 4854 VisitedOps.clear(); 4855 SmallVector<int> Mask; 4856 inversePermutation(BestOrder, Mask); 4857 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4858 unsigned E = BestOrder.size(); 4859 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4860 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4861 }); 4862 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4863 TreeEntry *TE = Op.second; 4864 OrderedEntries.remove(TE); 4865 if (!VisitedOps.insert(TE).second) 4866 continue; 4867 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4868 reorderNodeWithReuses(*TE, Mask); 4869 continue; 4870 } 4871 // Gathers are processed separately. 4872 if (TE->State != TreeEntry::Vectorize && 4873 TE->State != TreeEntry::PossibleStridedVectorize && 4874 (TE->State != TreeEntry::ScatterVectorize || 4875 TE->ReorderIndices.empty())) 4876 continue; 4877 assert((BestOrder.size() == TE->ReorderIndices.size() || 4878 TE->ReorderIndices.empty()) && 4879 "Non-matching sizes of user/operand entries."); 4880 reorderOrder(TE->ReorderIndices, Mask); 4881 if (IgnoreReorder && TE == VectorizableTree.front().get()) 4882 IgnoreReorder = false; 4883 } 4884 // For gathers just need to reorder its scalars. 4885 for (TreeEntry *Gather : GatherOps) { 4886 assert(Gather->ReorderIndices.empty() && 4887 "Unexpected reordering of gathers."); 4888 if (!Gather->ReuseShuffleIndices.empty()) { 4889 // Just reorder reuses indices. 4890 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4891 continue; 4892 } 4893 reorderScalars(Gather->Scalars, Mask); 4894 OrderedEntries.remove(Gather); 4895 } 4896 // Reorder operands of the user node and set the ordering for the user 4897 // node itself. 4898 if (Data.first->State != TreeEntry::Vectorize || 4899 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4900 Data.first->getMainOp()) || 4901 Data.first->isAltShuffle()) 4902 Data.first->reorderOperands(Mask); 4903 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4904 Data.first->isAltShuffle() || 4905 Data.first->State == TreeEntry::PossibleStridedVectorize) { 4906 reorderScalars(Data.first->Scalars, Mask); 4907 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4908 if (Data.first->ReuseShuffleIndices.empty() && 4909 !Data.first->ReorderIndices.empty() && 4910 !Data.first->isAltShuffle()) { 4911 // Insert user node to the list to try to sink reordering deeper in 4912 // the graph. 4913 OrderedEntries.insert(Data.first); 4914 } 4915 } else { 4916 reorderOrder(Data.first->ReorderIndices, Mask); 4917 } 4918 } 4919 } 4920 // If the reordering is unnecessary, just remove the reorder. 4921 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4922 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4923 VectorizableTree.front()->ReorderIndices.clear(); 4924 } 4925 4926 void BoUpSLP::buildExternalUses( 4927 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4928 // Collect the values that we need to extract from the tree. 4929 for (auto &TEPtr : VectorizableTree) { 4930 TreeEntry *Entry = TEPtr.get(); 4931 4932 // No need to handle users of gathered values. 4933 if (Entry->State == TreeEntry::NeedToGather) 4934 continue; 4935 4936 // For each lane: 4937 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4938 Value *Scalar = Entry->Scalars[Lane]; 4939 if (!isa<Instruction>(Scalar)) 4940 continue; 4941 int FoundLane = Entry->findLaneForValue(Scalar); 4942 4943 // Check if the scalar is externally used as an extra arg. 4944 const auto *ExtI = ExternallyUsedValues.find(Scalar); 4945 if (ExtI != ExternallyUsedValues.end()) { 4946 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4947 << Lane << " from " << *Scalar << ".\n"); 4948 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4949 } 4950 for (User *U : Scalar->users()) { 4951 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4952 4953 Instruction *UserInst = dyn_cast<Instruction>(U); 4954 if (!UserInst) 4955 continue; 4956 4957 if (isDeleted(UserInst)) 4958 continue; 4959 4960 // Skip in-tree scalars that become vectors 4961 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4962 Value *UseScalar = UseEntry->Scalars[0]; 4963 // Some in-tree scalars will remain as scalar in vectorized 4964 // instructions. If that is the case, the one in Lane 0 will 4965 // be used. 4966 if (UseScalar != U || 4967 UseEntry->State == TreeEntry::ScatterVectorize || 4968 UseEntry->State == TreeEntry::PossibleStridedVectorize || 4969 !doesInTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 4970 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4971 << ".\n"); 4972 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4973 continue; 4974 } 4975 } 4976 4977 // Ignore users in the user ignore list. 4978 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4979 continue; 4980 4981 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 4982 << Lane << " from " << *Scalar << ".\n"); 4983 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 4984 } 4985 } 4986 } 4987 } 4988 4989 DenseMap<Value *, SmallVector<StoreInst *>> 4990 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4991 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap; 4992 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4993 Value *V = TE->Scalars[Lane]; 4994 // To save compilation time we don't visit if we have too many users. 4995 static constexpr unsigned UsersLimit = 4; 4996 if (V->hasNUsesOrMore(UsersLimit)) 4997 break; 4998 4999 // Collect stores per pointer object. 5000 for (User *U : V->users()) { 5001 auto *SI = dyn_cast<StoreInst>(U); 5002 if (SI == nullptr || !SI->isSimple() || 5003 !isValidElementType(SI->getValueOperand()->getType())) 5004 continue; 5005 // Skip entry if already 5006 if (getTreeEntry(U)) 5007 continue; 5008 5009 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 5010 auto &StoresVec = PtrToStoresMap[Ptr]; 5011 // For now just keep one store per pointer object per lane. 5012 // TODO: Extend this to support multiple stores per pointer per lane 5013 if (StoresVec.size() > Lane) 5014 continue; 5015 // Skip if in different BBs. 5016 if (!StoresVec.empty() && 5017 SI->getParent() != StoresVec.back()->getParent()) 5018 continue; 5019 // Make sure that the stores are of the same type. 5020 if (!StoresVec.empty() && 5021 SI->getValueOperand()->getType() != 5022 StoresVec.back()->getValueOperand()->getType()) 5023 continue; 5024 StoresVec.push_back(SI); 5025 } 5026 } 5027 return PtrToStoresMap; 5028 } 5029 5030 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec, 5031 OrdersType &ReorderIndices) const { 5032 // We check whether the stores in StoreVec can form a vector by sorting them 5033 // and checking whether they are consecutive. 5034 5035 // To avoid calling getPointersDiff() while sorting we create a vector of 5036 // pairs {store, offset from first} and sort this instead. 5037 SmallVector<std::pair<StoreInst *, int>> StoreOffsetVec(StoresVec.size()); 5038 StoreInst *S0 = StoresVec[0]; 5039 StoreOffsetVec[0] = {S0, 0}; 5040 Type *S0Ty = S0->getValueOperand()->getType(); 5041 Value *S0Ptr = S0->getPointerOperand(); 5042 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 5043 StoreInst *SI = StoresVec[Idx]; 5044 std::optional<int> Diff = 5045 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 5046 SI->getPointerOperand(), *DL, *SE, 5047 /*StrictCheck=*/true); 5048 // We failed to compare the pointers so just abandon this StoresVec. 5049 if (!Diff) 5050 return false; 5051 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 5052 } 5053 5054 // Sort the vector based on the pointers. We create a copy because we may 5055 // need the original later for calculating the reorder (shuffle) indices. 5056 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 5057 const std::pair<StoreInst *, int> &Pair2) { 5058 int Offset1 = Pair1.second; 5059 int Offset2 = Pair2.second; 5060 return Offset1 < Offset2; 5061 }); 5062 5063 // Check if the stores are consecutive by checking if their difference is 1. 5064 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 5065 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx - 1].second + 1) 5066 return false; 5067 5068 // Calculate the shuffle indices according to their offset against the sorted 5069 // StoreOffsetVec. 5070 ReorderIndices.reserve(StoresVec.size()); 5071 for (StoreInst *SI : StoresVec) { 5072 unsigned Idx = find_if(StoreOffsetVec, 5073 [SI](const std::pair<StoreInst *, int> &Pair) { 5074 return Pair.first == SI; 5075 }) - 5076 StoreOffsetVec.begin(); 5077 ReorderIndices.push_back(Idx); 5078 } 5079 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 5080 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 5081 // same convention here. 5082 auto IsIdentityOrder = [](const OrdersType &Order) { 5083 for (unsigned Idx : seq<unsigned>(0, Order.size())) 5084 if (Idx != Order[Idx]) 5085 return false; 5086 return true; 5087 }; 5088 if (IsIdentityOrder(ReorderIndices)) 5089 ReorderIndices.clear(); 5090 5091 return true; 5092 } 5093 5094 #ifndef NDEBUG 5095 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 5096 for (unsigned Idx : Order) 5097 dbgs() << Idx << ", "; 5098 dbgs() << "\n"; 5099 } 5100 #endif 5101 5102 SmallVector<BoUpSLP::OrdersType, 1> 5103 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 5104 unsigned NumLanes = TE->Scalars.size(); 5105 5106 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap = 5107 collectUserStores(TE); 5108 5109 // Holds the reorder indices for each candidate store vector that is a user of 5110 // the current TreeEntry. 5111 SmallVector<OrdersType, 1> ExternalReorderIndices; 5112 5113 // Now inspect the stores collected per pointer and look for vectorization 5114 // candidates. For each candidate calculate the reorder index vector and push 5115 // it into `ExternalReorderIndices` 5116 for (const auto &Pair : PtrToStoresMap) { 5117 auto &StoresVec = Pair.second; 5118 // If we have fewer than NumLanes stores, then we can't form a vector. 5119 if (StoresVec.size() != NumLanes) 5120 continue; 5121 5122 // If the stores are not consecutive then abandon this StoresVec. 5123 OrdersType ReorderIndices; 5124 if (!canFormVector(StoresVec, ReorderIndices)) 5125 continue; 5126 5127 // We now know that the scalars in StoresVec can form a vector instruction, 5128 // so set the reorder indices. 5129 ExternalReorderIndices.push_back(ReorderIndices); 5130 } 5131 return ExternalReorderIndices; 5132 } 5133 5134 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 5135 const SmallDenseSet<Value *> &UserIgnoreLst) { 5136 deleteTree(); 5137 UserIgnoreList = &UserIgnoreLst; 5138 if (!allSameType(Roots)) 5139 return; 5140 buildTree_rec(Roots, 0, EdgeInfo()); 5141 } 5142 5143 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 5144 deleteTree(); 5145 if (!allSameType(Roots)) 5146 return; 5147 buildTree_rec(Roots, 0, EdgeInfo()); 5148 } 5149 5150 /// \return true if the specified list of values has only one instruction that 5151 /// requires scheduling, false otherwise. 5152 #ifndef NDEBUG 5153 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 5154 Value *NeedsScheduling = nullptr; 5155 for (Value *V : VL) { 5156 if (doesNotNeedToBeScheduled(V)) 5157 continue; 5158 if (!NeedsScheduling) { 5159 NeedsScheduling = V; 5160 continue; 5161 } 5162 return false; 5163 } 5164 return NeedsScheduling; 5165 } 5166 #endif 5167 5168 /// Generates key/subkey pair for the given value to provide effective sorting 5169 /// of the values and better detection of the vectorizable values sequences. The 5170 /// keys/subkeys can be used for better sorting of the values themselves (keys) 5171 /// and in values subgroups (subkeys). 5172 static std::pair<size_t, size_t> generateKeySubkey( 5173 Value *V, const TargetLibraryInfo *TLI, 5174 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 5175 bool AllowAlternate) { 5176 hash_code Key = hash_value(V->getValueID() + 2); 5177 hash_code SubKey = hash_value(0); 5178 // Sort the loads by the distance between the pointers. 5179 if (auto *LI = dyn_cast<LoadInst>(V)) { 5180 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key); 5181 if (LI->isSimple()) 5182 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 5183 else 5184 Key = SubKey = hash_value(LI); 5185 } else if (isVectorLikeInstWithConstOps(V)) { 5186 // Sort extracts by the vector operands. 5187 if (isa<ExtractElementInst, UndefValue>(V)) 5188 Key = hash_value(Value::UndefValueVal + 1); 5189 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 5190 if (!isUndefVector(EI->getVectorOperand()).all() && 5191 !isa<UndefValue>(EI->getIndexOperand())) 5192 SubKey = hash_value(EI->getVectorOperand()); 5193 } 5194 } else if (auto *I = dyn_cast<Instruction>(V)) { 5195 // Sort other instructions just by the opcodes except for CMPInst. 5196 // For CMP also sort by the predicate kind. 5197 if ((isa<BinaryOperator, CastInst>(I)) && 5198 isValidForAlternation(I->getOpcode())) { 5199 if (AllowAlternate) 5200 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 5201 else 5202 Key = hash_combine(hash_value(I->getOpcode()), Key); 5203 SubKey = hash_combine( 5204 hash_value(I->getOpcode()), hash_value(I->getType()), 5205 hash_value(isa<BinaryOperator>(I) 5206 ? I->getType() 5207 : cast<CastInst>(I)->getOperand(0)->getType())); 5208 // For casts, look through the only operand to improve compile time. 5209 if (isa<CastInst>(I)) { 5210 std::pair<size_t, size_t> OpVals = 5211 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator, 5212 /*AllowAlternate=*/true); 5213 Key = hash_combine(OpVals.first, Key); 5214 SubKey = hash_combine(OpVals.first, SubKey); 5215 } 5216 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 5217 CmpInst::Predicate Pred = CI->getPredicate(); 5218 if (CI->isCommutative()) 5219 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 5220 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 5221 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 5222 hash_value(SwapPred), 5223 hash_value(CI->getOperand(0)->getType())); 5224 } else if (auto *Call = dyn_cast<CallInst>(I)) { 5225 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 5226 if (isTriviallyVectorizable(ID)) { 5227 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 5228 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) { 5229 SubKey = hash_combine(hash_value(I->getOpcode()), 5230 hash_value(Call->getCalledFunction())); 5231 } else { 5232 Key = hash_combine(hash_value(Call), Key); 5233 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 5234 } 5235 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 5236 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 5237 hash_value(Op.Tag), SubKey); 5238 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 5239 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 5240 SubKey = hash_value(Gep->getPointerOperand()); 5241 else 5242 SubKey = hash_value(Gep); 5243 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 5244 !isa<ConstantInt>(I->getOperand(1))) { 5245 // Do not try to vectorize instructions with potentially high cost. 5246 SubKey = hash_value(I); 5247 } else { 5248 SubKey = hash_value(I->getOpcode()); 5249 } 5250 Key = hash_combine(hash_value(I->getParent()), Key); 5251 } 5252 return std::make_pair(Key, SubKey); 5253 } 5254 5255 /// Checks if the specified instruction \p I is an alternate operation for 5256 /// the given \p MainOp and \p AltOp instructions. 5257 static bool isAlternateInstruction(const Instruction *I, 5258 const Instruction *MainOp, 5259 const Instruction *AltOp, 5260 const TargetLibraryInfo &TLI); 5261 5262 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState( 5263 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 5264 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const { 5265 assert(S.MainOp && "Expected instructions with same/alternate opcodes only."); 5266 5267 unsigned ShuffleOrOp = 5268 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode(); 5269 auto *VL0 = cast<Instruction>(S.OpValue); 5270 switch (ShuffleOrOp) { 5271 case Instruction::PHI: { 5272 // Check for terminator values (e.g. invoke). 5273 for (Value *V : VL) 5274 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 5275 Instruction *Term = dyn_cast<Instruction>(Incoming); 5276 if (Term && Term->isTerminator()) { 5277 LLVM_DEBUG(dbgs() 5278 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 5279 return TreeEntry::NeedToGather; 5280 } 5281 } 5282 5283 return TreeEntry::Vectorize; 5284 } 5285 case Instruction::ExtractValue: 5286 case Instruction::ExtractElement: { 5287 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 5288 if (Reuse || !CurrentOrder.empty()) 5289 return TreeEntry::Vectorize; 5290 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 5291 return TreeEntry::NeedToGather; 5292 } 5293 case Instruction::InsertElement: { 5294 // Check that we have a buildvector and not a shuffle of 2 or more 5295 // different vectors. 5296 ValueSet SourceVectors; 5297 for (Value *V : VL) { 5298 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 5299 assert(getInsertIndex(V) != std::nullopt && 5300 "Non-constant or undef index?"); 5301 } 5302 5303 if (count_if(VL, [&SourceVectors](Value *V) { 5304 return !SourceVectors.contains(V); 5305 }) >= 2) { 5306 // Found 2nd source vector - cancel. 5307 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 5308 "different source vectors.\n"); 5309 return TreeEntry::NeedToGather; 5310 } 5311 5312 return TreeEntry::Vectorize; 5313 } 5314 case Instruction::Load: { 5315 // Check that a vectorized load would load the same memory as a scalar 5316 // load. For example, we don't want to vectorize loads that are smaller 5317 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5318 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5319 // from such a struct, we read/write packed bits disagreeing with the 5320 // unvectorized version. 5321 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder, 5322 PointerOps)) { 5323 case LoadsState::Vectorize: 5324 return TreeEntry::Vectorize; 5325 case LoadsState::ScatterVectorize: 5326 return TreeEntry::ScatterVectorize; 5327 case LoadsState::PossibleStridedVectorize: 5328 return TreeEntry::PossibleStridedVectorize; 5329 case LoadsState::Gather: 5330 #ifndef NDEBUG 5331 Type *ScalarTy = VL0->getType(); 5332 if (DL->getTypeSizeInBits(ScalarTy) != 5333 DL->getTypeAllocSizeInBits(ScalarTy)) 5334 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 5335 else if (any_of(VL, 5336 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); })) 5337 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 5338 else 5339 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 5340 #endif // NDEBUG 5341 return TreeEntry::NeedToGather; 5342 } 5343 llvm_unreachable("Unexpected state of loads"); 5344 } 5345 case Instruction::ZExt: 5346 case Instruction::SExt: 5347 case Instruction::FPToUI: 5348 case Instruction::FPToSI: 5349 case Instruction::FPExt: 5350 case Instruction::PtrToInt: 5351 case Instruction::IntToPtr: 5352 case Instruction::SIToFP: 5353 case Instruction::UIToFP: 5354 case Instruction::Trunc: 5355 case Instruction::FPTrunc: 5356 case Instruction::BitCast: { 5357 Type *SrcTy = VL0->getOperand(0)->getType(); 5358 for (Value *V : VL) { 5359 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 5360 if (Ty != SrcTy || !isValidElementType(Ty)) { 5361 LLVM_DEBUG( 5362 dbgs() << "SLP: Gathering casts with different src types.\n"); 5363 return TreeEntry::NeedToGather; 5364 } 5365 } 5366 return TreeEntry::Vectorize; 5367 } 5368 case Instruction::ICmp: 5369 case Instruction::FCmp: { 5370 // Check that all of the compares have the same predicate. 5371 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5372 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 5373 Type *ComparedTy = VL0->getOperand(0)->getType(); 5374 for (Value *V : VL) { 5375 CmpInst *Cmp = cast<CmpInst>(V); 5376 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 5377 Cmp->getOperand(0)->getType() != ComparedTy) { 5378 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 5379 return TreeEntry::NeedToGather; 5380 } 5381 } 5382 return TreeEntry::Vectorize; 5383 } 5384 case Instruction::Select: 5385 case Instruction::FNeg: 5386 case Instruction::Add: 5387 case Instruction::FAdd: 5388 case Instruction::Sub: 5389 case Instruction::FSub: 5390 case Instruction::Mul: 5391 case Instruction::FMul: 5392 case Instruction::UDiv: 5393 case Instruction::SDiv: 5394 case Instruction::FDiv: 5395 case Instruction::URem: 5396 case Instruction::SRem: 5397 case Instruction::FRem: 5398 case Instruction::Shl: 5399 case Instruction::LShr: 5400 case Instruction::AShr: 5401 case Instruction::And: 5402 case Instruction::Or: 5403 case Instruction::Xor: 5404 return TreeEntry::Vectorize; 5405 case Instruction::GetElementPtr: { 5406 // We don't combine GEPs with complicated (nested) indexing. 5407 for (Value *V : VL) { 5408 auto *I = dyn_cast<GetElementPtrInst>(V); 5409 if (!I) 5410 continue; 5411 if (I->getNumOperands() != 2) { 5412 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5413 return TreeEntry::NeedToGather; 5414 } 5415 } 5416 5417 // We can't combine several GEPs into one vector if they operate on 5418 // different types. 5419 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5420 for (Value *V : VL) { 5421 auto *GEP = dyn_cast<GEPOperator>(V); 5422 if (!GEP) 5423 continue; 5424 Type *CurTy = GEP->getSourceElementType(); 5425 if (Ty0 != CurTy) { 5426 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 5427 return TreeEntry::NeedToGather; 5428 } 5429 } 5430 5431 // We don't combine GEPs with non-constant indexes. 5432 Type *Ty1 = VL0->getOperand(1)->getType(); 5433 for (Value *V : VL) { 5434 auto *I = dyn_cast<GetElementPtrInst>(V); 5435 if (!I) 5436 continue; 5437 auto *Op = I->getOperand(1); 5438 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5439 (Op->getType() != Ty1 && 5440 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5441 Op->getType()->getScalarSizeInBits() > 5442 DL->getIndexSizeInBits( 5443 V->getType()->getPointerAddressSpace())))) { 5444 LLVM_DEBUG( 5445 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5446 return TreeEntry::NeedToGather; 5447 } 5448 } 5449 5450 return TreeEntry::Vectorize; 5451 } 5452 case Instruction::Store: { 5453 // Check if the stores are consecutive or if we need to swizzle them. 5454 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5455 // Avoid types that are padded when being allocated as scalars, while 5456 // being packed together in a vector (such as i1). 5457 if (DL->getTypeSizeInBits(ScalarTy) != 5458 DL->getTypeAllocSizeInBits(ScalarTy)) { 5459 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5460 return TreeEntry::NeedToGather; 5461 } 5462 // Make sure all stores in the bundle are simple - we can't vectorize 5463 // atomic or volatile stores. 5464 for (Value *V : VL) { 5465 auto *SI = cast<StoreInst>(V); 5466 if (!SI->isSimple()) { 5467 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5468 return TreeEntry::NeedToGather; 5469 } 5470 PointerOps.push_back(SI->getPointerOperand()); 5471 } 5472 5473 // Check the order of pointer operands. 5474 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5475 Value *Ptr0; 5476 Value *PtrN; 5477 if (CurrentOrder.empty()) { 5478 Ptr0 = PointerOps.front(); 5479 PtrN = PointerOps.back(); 5480 } else { 5481 Ptr0 = PointerOps[CurrentOrder.front()]; 5482 PtrN = PointerOps[CurrentOrder.back()]; 5483 } 5484 std::optional<int> Dist = 5485 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5486 // Check that the sorted pointer operands are consecutive. 5487 if (static_cast<unsigned>(*Dist) == VL.size() - 1) 5488 return TreeEntry::Vectorize; 5489 } 5490 5491 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5492 return TreeEntry::NeedToGather; 5493 } 5494 case Instruction::Call: { 5495 // Check if the calls are all to the same vectorizable intrinsic or 5496 // library function. 5497 CallInst *CI = cast<CallInst>(VL0); 5498 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5499 5500 VFShape Shape = VFShape::get( 5501 CI->getFunctionType(), 5502 ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5503 false /*HasGlobalPred*/); 5504 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5505 5506 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5507 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5508 return TreeEntry::NeedToGather; 5509 } 5510 Function *F = CI->getCalledFunction(); 5511 unsigned NumArgs = CI->arg_size(); 5512 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr); 5513 for (unsigned J = 0; J != NumArgs; ++J) 5514 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) 5515 ScalarArgs[J] = CI->getArgOperand(J); 5516 for (Value *V : VL) { 5517 CallInst *CI2 = dyn_cast<CallInst>(V); 5518 if (!CI2 || CI2->getCalledFunction() != F || 5519 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5520 (VecFunc && 5521 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5522 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5523 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5524 << "\n"); 5525 return TreeEntry::NeedToGather; 5526 } 5527 // Some intrinsics have scalar arguments and should be same in order for 5528 // them to be vectorized. 5529 for (unsigned J = 0; J != NumArgs; ++J) { 5530 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) { 5531 Value *A1J = CI2->getArgOperand(J); 5532 if (ScalarArgs[J] != A1J) { 5533 LLVM_DEBUG(dbgs() 5534 << "SLP: mismatched arguments in call:" << *CI 5535 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n"); 5536 return TreeEntry::NeedToGather; 5537 } 5538 } 5539 } 5540 // Verify that the bundle operands are identical between the two calls. 5541 if (CI->hasOperandBundles() && 5542 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5543 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5544 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5545 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI 5546 << "!=" << *V << '\n'); 5547 return TreeEntry::NeedToGather; 5548 } 5549 } 5550 5551 return TreeEntry::Vectorize; 5552 } 5553 case Instruction::ShuffleVector: { 5554 // If this is not an alternate sequence of opcode like add-sub 5555 // then do not vectorize this instruction. 5556 if (!S.isAltShuffle()) { 5557 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5558 return TreeEntry::NeedToGather; 5559 } 5560 return TreeEntry::Vectorize; 5561 } 5562 default: 5563 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5564 return TreeEntry::NeedToGather; 5565 } 5566 } 5567 5568 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 5569 const EdgeInfo &UserTreeIdx) { 5570 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 5571 5572 SmallVector<int> ReuseShuffleIndicies; 5573 SmallVector<Value *> UniqueValues; 5574 SmallVector<Value *> NonUniqueValueVL; 5575 auto TryToFindDuplicates = [&](const InstructionsState &S, 5576 bool DoNotFail = false) { 5577 // Check that every instruction appears once in this bundle. 5578 DenseMap<Value *, unsigned> UniquePositions(VL.size()); 5579 for (Value *V : VL) { 5580 if (isConstant(V)) { 5581 ReuseShuffleIndicies.emplace_back( 5582 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size()); 5583 UniqueValues.emplace_back(V); 5584 continue; 5585 } 5586 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5587 ReuseShuffleIndicies.emplace_back(Res.first->second); 5588 if (Res.second) 5589 UniqueValues.emplace_back(V); 5590 } 5591 size_t NumUniqueScalarValues = UniqueValues.size(); 5592 if (NumUniqueScalarValues == VL.size()) { 5593 ReuseShuffleIndicies.clear(); 5594 } else { 5595 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 5596 if (NumUniqueScalarValues <= 1 || 5597 (UniquePositions.size() == 1 && all_of(UniqueValues, 5598 [](Value *V) { 5599 return isa<UndefValue>(V) || 5600 !isConstant(V); 5601 })) || 5602 !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) { 5603 if (DoNotFail && UniquePositions.size() > 1 && 5604 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() && 5605 all_of(UniqueValues, [=](Value *V) { 5606 return isa<ExtractElementInst>(V) || 5607 areAllUsersVectorized(cast<Instruction>(V), 5608 UserIgnoreList); 5609 })) { 5610 unsigned PWSz = PowerOf2Ceil(UniqueValues.size()); 5611 if (PWSz == VL.size()) { 5612 ReuseShuffleIndicies.clear(); 5613 } else { 5614 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end()); 5615 NonUniqueValueVL.append(PWSz - UniqueValues.size(), 5616 UniqueValues.back()); 5617 VL = NonUniqueValueVL; 5618 } 5619 return true; 5620 } 5621 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 5622 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5623 return false; 5624 } 5625 VL = UniqueValues; 5626 } 5627 return true; 5628 }; 5629 5630 InstructionsState S = getSameOpcode(VL, *TLI); 5631 5632 // Don't vectorize ephemeral values. 5633 if (!EphValues.empty()) { 5634 for (Value *V : VL) { 5635 if (EphValues.count(V)) { 5636 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5637 << ") is ephemeral.\n"); 5638 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5639 return; 5640 } 5641 } 5642 } 5643 5644 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of 5645 // a load), in which case peek through to include it in the tree, without 5646 // ballooning over-budget. 5647 if (Depth >= RecursionMaxDepth && 5648 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp && 5649 VL.size() >= 4 && 5650 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) { 5651 return match(I, 5652 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) && 5653 cast<Instruction>(I)->getOpcode() == 5654 cast<Instruction>(S.MainOp)->getOpcode(); 5655 })))) { 5656 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 5657 if (TryToFindDuplicates(S)) 5658 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5659 ReuseShuffleIndicies); 5660 return; 5661 } 5662 5663 // Don't handle scalable vectors 5664 if (S.getOpcode() == Instruction::ExtractElement && 5665 isa<ScalableVectorType>( 5666 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 5667 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 5668 if (TryToFindDuplicates(S)) 5669 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5670 ReuseShuffleIndicies); 5671 return; 5672 } 5673 5674 // Don't handle vectors. 5675 if (S.OpValue->getType()->isVectorTy() && 5676 !isa<InsertElementInst>(S.OpValue)) { 5677 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 5678 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5679 return; 5680 } 5681 5682 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 5683 if (SI->getValueOperand()->getType()->isVectorTy()) { 5684 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 5685 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5686 return; 5687 } 5688 5689 // If all of the operands are identical or constant we have a simple solution. 5690 // If we deal with insert/extract instructions, they all must have constant 5691 // indices, otherwise we should gather them, not try to vectorize. 5692 // If alternate op node with 2 elements with gathered operands - do not 5693 // vectorize. 5694 auto &&NotProfitableForVectorization = [&S, this, 5695 Depth](ArrayRef<Value *> VL) { 5696 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 5697 return false; 5698 if (VectorizableTree.size() < MinTreeSize) 5699 return false; 5700 if (Depth >= RecursionMaxDepth - 1) 5701 return true; 5702 // Check if all operands are extracts, part of vector node or can build a 5703 // regular vectorize node. 5704 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 5705 for (Value *V : VL) { 5706 auto *I = cast<Instruction>(V); 5707 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 5708 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 5709 })); 5710 } 5711 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 5712 if ((IsCommutative && 5713 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 5714 (!IsCommutative && 5715 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 5716 return true; 5717 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 5718 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 5719 auto *I1 = cast<Instruction>(VL.front()); 5720 auto *I2 = cast<Instruction>(VL.back()); 5721 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5722 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5723 I2->getOperand(Op)); 5724 if (static_cast<unsigned>(count_if( 5725 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5726 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5727 })) >= S.MainOp->getNumOperands() / 2) 5728 return false; 5729 if (S.MainOp->getNumOperands() > 2) 5730 return true; 5731 if (IsCommutative) { 5732 // Check permuted operands. 5733 Candidates.clear(); 5734 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5735 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5736 I2->getOperand((Op + 1) % E)); 5737 if (any_of( 5738 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5739 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5740 })) 5741 return false; 5742 } 5743 return true; 5744 }; 5745 SmallVector<unsigned> SortedIndices; 5746 BasicBlock *BB = nullptr; 5747 bool IsScatterVectorizeUserTE = 5748 UserTreeIdx.UserTE && 5749 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5750 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize); 5751 bool AreAllSameInsts = 5752 (S.getOpcode() && allSameBlock(VL)) || 5753 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE && 5754 VL.size() > 2 && 5755 all_of(VL, 5756 [&BB](Value *V) { 5757 auto *I = dyn_cast<GetElementPtrInst>(V); 5758 if (!I) 5759 return doesNotNeedToBeScheduled(V); 5760 if (!BB) 5761 BB = I->getParent(); 5762 return BB == I->getParent() && I->getNumOperands() == 2; 5763 }) && 5764 BB && 5765 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE, 5766 SortedIndices)); 5767 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) || 5768 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>( 5769 S.OpValue) && 5770 !all_of(VL, isVectorLikeInstWithConstOps)) || 5771 NotProfitableForVectorization(VL)) { 5772 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 5773 if (TryToFindDuplicates(S)) 5774 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5775 ReuseShuffleIndicies); 5776 return; 5777 } 5778 5779 // We now know that this is a vector of instructions of the same type from 5780 // the same block. 5781 5782 // Check if this is a duplicate of another entry. 5783 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 5784 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 5785 if (!E->isSame(VL)) { 5786 auto It = MultiNodeScalars.find(S.OpValue); 5787 if (It != MultiNodeScalars.end()) { 5788 auto *TEIt = find_if(It->getSecond(), 5789 [&](TreeEntry *ME) { return ME->isSame(VL); }); 5790 if (TEIt != It->getSecond().end()) 5791 E = *TEIt; 5792 else 5793 E = nullptr; 5794 } else { 5795 E = nullptr; 5796 } 5797 } 5798 if (!E) { 5799 if (!doesNotNeedToBeScheduled(S.OpValue)) { 5800 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 5801 if (TryToFindDuplicates(S)) 5802 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5803 ReuseShuffleIndicies); 5804 return; 5805 } 5806 } else { 5807 // Record the reuse of the tree node. FIXME, currently this is only used 5808 // to properly draw the graph rather than for the actual vectorization. 5809 E->UserTreeIndices.push_back(UserTreeIdx); 5810 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 5811 << ".\n"); 5812 return; 5813 } 5814 } 5815 5816 // Check that none of the instructions in the bundle are already in the tree. 5817 for (Value *V : VL) { 5818 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) || 5819 doesNotNeedToBeScheduled(V)) 5820 continue; 5821 if (getTreeEntry(V)) { 5822 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5823 << ") is already in tree.\n"); 5824 if (TryToFindDuplicates(S)) 5825 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5826 ReuseShuffleIndicies); 5827 return; 5828 } 5829 } 5830 5831 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 5832 if (UserIgnoreList && !UserIgnoreList->empty()) { 5833 for (Value *V : VL) { 5834 if (UserIgnoreList && UserIgnoreList->contains(V)) { 5835 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 5836 if (TryToFindDuplicates(S)) 5837 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5838 ReuseShuffleIndicies); 5839 return; 5840 } 5841 } 5842 } 5843 5844 // Special processing for sorted pointers for ScatterVectorize node with 5845 // constant indeces only. 5846 if (AreAllSameInsts && UserTreeIdx.UserTE && 5847 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5848 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize) && 5849 !(S.getOpcode() && allSameBlock(VL))) { 5850 assert(S.OpValue->getType()->isPointerTy() && 5851 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 5852 2 && 5853 "Expected pointers only."); 5854 // Reset S to make it GetElementPtr kind of node. 5855 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 5856 assert(It != VL.end() && "Expected at least one GEP."); 5857 S = getSameOpcode(*It, *TLI); 5858 } 5859 5860 // Check that all of the users of the scalars that we want to vectorize are 5861 // schedulable. 5862 auto *VL0 = cast<Instruction>(S.OpValue); 5863 BB = VL0->getParent(); 5864 5865 if (!DT->isReachableFromEntry(BB)) { 5866 // Don't go into unreachable blocks. They may contain instructions with 5867 // dependency cycles which confuse the final scheduling. 5868 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 5869 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5870 return; 5871 } 5872 5873 // Don't go into catchswitch blocks, which can happen with PHIs. 5874 // Such blocks can only have PHIs and the catchswitch. There is no 5875 // place to insert a shuffle if we need to, so just avoid that issue. 5876 if (isa<CatchSwitchInst>(BB->getTerminator())) { 5877 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n"); 5878 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5879 return; 5880 } 5881 5882 // Check that every instruction appears once in this bundle. 5883 if (!TryToFindDuplicates(S, /*DoNotFail=*/true)) 5884 return; 5885 5886 // Perform specific checks for each particular instruction kind. 5887 OrdersType CurrentOrder; 5888 SmallVector<Value *> PointerOps; 5889 TreeEntry::EntryState State = getScalarsVectorizationState( 5890 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps); 5891 if (State == TreeEntry::NeedToGather) { 5892 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5893 ReuseShuffleIndicies); 5894 return; 5895 } 5896 5897 auto &BSRef = BlocksSchedules[BB]; 5898 if (!BSRef) 5899 BSRef = std::make_unique<BlockScheduling>(BB); 5900 5901 BlockScheduling &BS = *BSRef; 5902 5903 std::optional<ScheduleData *> Bundle = 5904 BS.tryScheduleBundle(UniqueValues, this, S); 5905 #ifdef EXPENSIVE_CHECKS 5906 // Make sure we didn't break any internal invariants 5907 BS.verify(); 5908 #endif 5909 if (!Bundle) { 5910 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 5911 assert((!BS.getScheduleData(VL0) || 5912 !BS.getScheduleData(VL0)->isPartOfBundle()) && 5913 "tryScheduleBundle should cancelScheduling on failure"); 5914 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5915 ReuseShuffleIndicies); 5916 return; 5917 } 5918 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 5919 5920 unsigned ShuffleOrOp = S.isAltShuffle() ? 5921 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 5922 switch (ShuffleOrOp) { 5923 case Instruction::PHI: { 5924 auto *PH = cast<PHINode>(VL0); 5925 5926 TreeEntry *TE = 5927 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 5928 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 5929 5930 // Keeps the reordered operands to avoid code duplication. 5931 SmallVector<ValueList, 2> OperandsVec; 5932 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 5933 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 5934 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 5935 TE->setOperand(I, Operands); 5936 OperandsVec.push_back(Operands); 5937 continue; 5938 } 5939 ValueList Operands; 5940 // Prepare the operand vector. 5941 for (Value *V : VL) 5942 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 5943 PH->getIncomingBlock(I))); 5944 TE->setOperand(I, Operands); 5945 OperandsVec.push_back(Operands); 5946 } 5947 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 5948 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 5949 return; 5950 } 5951 case Instruction::ExtractValue: 5952 case Instruction::ExtractElement: { 5953 if (CurrentOrder.empty()) { 5954 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 5955 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5956 ReuseShuffleIndicies); 5957 // This is a special case, as it does not gather, but at the same time 5958 // we are not extending buildTree_rec() towards the operands. 5959 ValueList Op0; 5960 Op0.assign(VL.size(), VL0->getOperand(0)); 5961 VectorizableTree.back()->setOperand(0, Op0); 5962 return; 5963 } 5964 LLVM_DEBUG({ 5965 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 5966 "with order"; 5967 for (unsigned Idx : CurrentOrder) 5968 dbgs() << " " << Idx; 5969 dbgs() << "\n"; 5970 }); 5971 fixupOrderingIndices(CurrentOrder); 5972 // Insert new order with initial value 0, if it does not exist, 5973 // otherwise return the iterator to the existing one. 5974 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5975 ReuseShuffleIndicies, CurrentOrder); 5976 // This is a special case, as it does not gather, but at the same time 5977 // we are not extending buildTree_rec() towards the operands. 5978 ValueList Op0; 5979 Op0.assign(VL.size(), VL0->getOperand(0)); 5980 VectorizableTree.back()->setOperand(0, Op0); 5981 return; 5982 } 5983 case Instruction::InsertElement: { 5984 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 5985 5986 auto OrdCompare = [](const std::pair<int, int> &P1, 5987 const std::pair<int, int> &P2) { 5988 return P1.first > P2.first; 5989 }; 5990 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 5991 decltype(OrdCompare)> 5992 Indices(OrdCompare); 5993 for (int I = 0, E = VL.size(); I < E; ++I) { 5994 unsigned Idx = *getInsertIndex(VL[I]); 5995 Indices.emplace(Idx, I); 5996 } 5997 OrdersType CurrentOrder(VL.size(), VL.size()); 5998 bool IsIdentity = true; 5999 for (int I = 0, E = VL.size(); I < E; ++I) { 6000 CurrentOrder[Indices.top().second] = I; 6001 IsIdentity &= Indices.top().second == I; 6002 Indices.pop(); 6003 } 6004 if (IsIdentity) 6005 CurrentOrder.clear(); 6006 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6007 std::nullopt, CurrentOrder); 6008 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 6009 6010 constexpr int NumOps = 2; 6011 ValueList VectorOperands[NumOps]; 6012 for (int I = 0; I < NumOps; ++I) { 6013 for (Value *V : VL) 6014 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 6015 6016 TE->setOperand(I, VectorOperands[I]); 6017 } 6018 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 6019 return; 6020 } 6021 case Instruction::Load: { 6022 // Check that a vectorized load would load the same memory as a scalar 6023 // load. For example, we don't want to vectorize loads that are smaller 6024 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 6025 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 6026 // from such a struct, we read/write packed bits disagreeing with the 6027 // unvectorized version. 6028 TreeEntry *TE = nullptr; 6029 fixupOrderingIndices(CurrentOrder); 6030 switch (State) { 6031 case TreeEntry::Vectorize: 6032 if (CurrentOrder.empty()) { 6033 // Original loads are consecutive and does not require reordering. 6034 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6035 ReuseShuffleIndicies); 6036 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 6037 } else { 6038 // Need to reorder. 6039 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6040 ReuseShuffleIndicies, CurrentOrder); 6041 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 6042 } 6043 TE->setOperandsInOrder(); 6044 break; 6045 case TreeEntry::PossibleStridedVectorize: 6046 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6047 if (CurrentOrder.empty()) { 6048 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6049 UserTreeIdx, ReuseShuffleIndicies); 6050 } else { 6051 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6052 UserTreeIdx, ReuseShuffleIndicies, CurrentOrder); 6053 } 6054 TE->setOperandsInOrder(); 6055 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6056 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6057 break; 6058 case TreeEntry::ScatterVectorize: 6059 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6060 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 6061 UserTreeIdx, ReuseShuffleIndicies); 6062 TE->setOperandsInOrder(); 6063 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6064 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6065 break; 6066 case TreeEntry::NeedToGather: 6067 llvm_unreachable("Unexpected loads state."); 6068 } 6069 return; 6070 } 6071 case Instruction::ZExt: 6072 case Instruction::SExt: 6073 case Instruction::FPToUI: 6074 case Instruction::FPToSI: 6075 case Instruction::FPExt: 6076 case Instruction::PtrToInt: 6077 case Instruction::IntToPtr: 6078 case Instruction::SIToFP: 6079 case Instruction::UIToFP: 6080 case Instruction::Trunc: 6081 case Instruction::FPTrunc: 6082 case Instruction::BitCast: { 6083 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6084 ReuseShuffleIndicies); 6085 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 6086 6087 TE->setOperandsInOrder(); 6088 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6089 ValueList Operands; 6090 // Prepare the operand vector. 6091 for (Value *V : VL) 6092 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6093 6094 buildTree_rec(Operands, Depth + 1, {TE, I}); 6095 } 6096 return; 6097 } 6098 case Instruction::ICmp: 6099 case Instruction::FCmp: { 6100 // Check that all of the compares have the same predicate. 6101 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6102 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6103 ReuseShuffleIndicies); 6104 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 6105 6106 ValueList Left, Right; 6107 if (cast<CmpInst>(VL0)->isCommutative()) { 6108 // Commutative predicate - collect + sort operands of the instructions 6109 // so that each side is more likely to have the same opcode. 6110 assert(P0 == CmpInst::getSwappedPredicate(P0) && 6111 "Commutative Predicate mismatch"); 6112 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6113 } else { 6114 // Collect operands - commute if it uses the swapped predicate. 6115 for (Value *V : VL) { 6116 auto *Cmp = cast<CmpInst>(V); 6117 Value *LHS = Cmp->getOperand(0); 6118 Value *RHS = Cmp->getOperand(1); 6119 if (Cmp->getPredicate() != P0) 6120 std::swap(LHS, RHS); 6121 Left.push_back(LHS); 6122 Right.push_back(RHS); 6123 } 6124 } 6125 TE->setOperand(0, Left); 6126 TE->setOperand(1, Right); 6127 buildTree_rec(Left, Depth + 1, {TE, 0}); 6128 buildTree_rec(Right, Depth + 1, {TE, 1}); 6129 return; 6130 } 6131 case Instruction::Select: 6132 case Instruction::FNeg: 6133 case Instruction::Add: 6134 case Instruction::FAdd: 6135 case Instruction::Sub: 6136 case Instruction::FSub: 6137 case Instruction::Mul: 6138 case Instruction::FMul: 6139 case Instruction::UDiv: 6140 case Instruction::SDiv: 6141 case Instruction::FDiv: 6142 case Instruction::URem: 6143 case Instruction::SRem: 6144 case Instruction::FRem: 6145 case Instruction::Shl: 6146 case Instruction::LShr: 6147 case Instruction::AShr: 6148 case Instruction::And: 6149 case Instruction::Or: 6150 case Instruction::Xor: { 6151 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6152 ReuseShuffleIndicies); 6153 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 6154 6155 // Sort operands of the instructions so that each side is more likely to 6156 // have the same opcode. 6157 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 6158 ValueList Left, Right; 6159 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6160 TE->setOperand(0, Left); 6161 TE->setOperand(1, Right); 6162 buildTree_rec(Left, Depth + 1, {TE, 0}); 6163 buildTree_rec(Right, Depth + 1, {TE, 1}); 6164 return; 6165 } 6166 6167 TE->setOperandsInOrder(); 6168 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6169 ValueList Operands; 6170 // Prepare the operand vector. 6171 for (Value *V : VL) 6172 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6173 6174 buildTree_rec(Operands, Depth + 1, {TE, I}); 6175 } 6176 return; 6177 } 6178 case Instruction::GetElementPtr: { 6179 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6180 ReuseShuffleIndicies); 6181 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 6182 SmallVector<ValueList, 2> Operands(2); 6183 // Prepare the operand vector for pointer operands. 6184 for (Value *V : VL) { 6185 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6186 if (!GEP) { 6187 Operands.front().push_back(V); 6188 continue; 6189 } 6190 Operands.front().push_back(GEP->getPointerOperand()); 6191 } 6192 TE->setOperand(0, Operands.front()); 6193 // Need to cast all indices to the same type before vectorization to 6194 // avoid crash. 6195 // Required to be able to find correct matches between different gather 6196 // nodes and reuse the vectorized values rather than trying to gather them 6197 // again. 6198 int IndexIdx = 1; 6199 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 6200 Type *Ty = all_of(VL, 6201 [VL0Ty, IndexIdx](Value *V) { 6202 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6203 if (!GEP) 6204 return true; 6205 return VL0Ty == GEP->getOperand(IndexIdx)->getType(); 6206 }) 6207 ? VL0Ty 6208 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6209 ->getPointerOperandType() 6210 ->getScalarType()); 6211 // Prepare the operand vector. 6212 for (Value *V : VL) { 6213 auto *I = dyn_cast<GetElementPtrInst>(V); 6214 if (!I) { 6215 Operands.back().push_back( 6216 ConstantInt::get(Ty, 0, /*isSigned=*/false)); 6217 continue; 6218 } 6219 auto *Op = I->getOperand(IndexIdx); 6220 auto *CI = dyn_cast<ConstantInt>(Op); 6221 if (!CI) 6222 Operands.back().push_back(Op); 6223 else 6224 Operands.back().push_back(ConstantFoldIntegerCast( 6225 CI, Ty, CI->getValue().isSignBitSet(), *DL)); 6226 } 6227 TE->setOperand(IndexIdx, Operands.back()); 6228 6229 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 6230 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 6231 return; 6232 } 6233 case Instruction::Store: { 6234 // Check if the stores are consecutive or if we need to swizzle them. 6235 ValueList Operands(VL.size()); 6236 auto *OIter = Operands.begin(); 6237 for (Value *V : VL) { 6238 auto *SI = cast<StoreInst>(V); 6239 *OIter = SI->getValueOperand(); 6240 ++OIter; 6241 } 6242 // Check that the sorted pointer operands are consecutive. 6243 if (CurrentOrder.empty()) { 6244 // Original stores are consecutive and does not require reordering. 6245 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6246 ReuseShuffleIndicies); 6247 TE->setOperandsInOrder(); 6248 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6249 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 6250 } else { 6251 fixupOrderingIndices(CurrentOrder); 6252 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6253 ReuseShuffleIndicies, CurrentOrder); 6254 TE->setOperandsInOrder(); 6255 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6256 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 6257 } 6258 return; 6259 } 6260 case Instruction::Call: { 6261 // Check if the calls are all to the same vectorizable intrinsic or 6262 // library function. 6263 CallInst *CI = cast<CallInst>(VL0); 6264 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6265 6266 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6267 ReuseShuffleIndicies); 6268 TE->setOperandsInOrder(); 6269 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 6270 // For scalar operands no need to create an entry since no need to 6271 // vectorize it. 6272 if (isVectorIntrinsicWithScalarOpAtArg(ID, I)) 6273 continue; 6274 ValueList Operands; 6275 // Prepare the operand vector. 6276 for (Value *V : VL) { 6277 auto *CI2 = cast<CallInst>(V); 6278 Operands.push_back(CI2->getArgOperand(I)); 6279 } 6280 buildTree_rec(Operands, Depth + 1, {TE, I}); 6281 } 6282 return; 6283 } 6284 case Instruction::ShuffleVector: { 6285 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6286 ReuseShuffleIndicies); 6287 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 6288 6289 // Reorder operands if reordering would enable vectorization. 6290 auto *CI = dyn_cast<CmpInst>(VL0); 6291 if (isa<BinaryOperator>(VL0) || CI) { 6292 ValueList Left, Right; 6293 if (!CI || all_of(VL, [](Value *V) { 6294 return cast<CmpInst>(V)->isCommutative(); 6295 })) { 6296 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, 6297 *this); 6298 } else { 6299 auto *MainCI = cast<CmpInst>(S.MainOp); 6300 auto *AltCI = cast<CmpInst>(S.AltOp); 6301 CmpInst::Predicate MainP = MainCI->getPredicate(); 6302 CmpInst::Predicate AltP = AltCI->getPredicate(); 6303 assert(MainP != AltP && 6304 "Expected different main/alternate predicates."); 6305 // Collect operands - commute if it uses the swapped predicate or 6306 // alternate operation. 6307 for (Value *V : VL) { 6308 auto *Cmp = cast<CmpInst>(V); 6309 Value *LHS = Cmp->getOperand(0); 6310 Value *RHS = Cmp->getOperand(1); 6311 6312 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) { 6313 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6314 std::swap(LHS, RHS); 6315 } else { 6316 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6317 std::swap(LHS, RHS); 6318 } 6319 Left.push_back(LHS); 6320 Right.push_back(RHS); 6321 } 6322 } 6323 TE->setOperand(0, Left); 6324 TE->setOperand(1, Right); 6325 buildTree_rec(Left, Depth + 1, {TE, 0}); 6326 buildTree_rec(Right, Depth + 1, {TE, 1}); 6327 return; 6328 } 6329 6330 TE->setOperandsInOrder(); 6331 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6332 ValueList Operands; 6333 // Prepare the operand vector. 6334 for (Value *V : VL) 6335 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6336 6337 buildTree_rec(Operands, Depth + 1, {TE, I}); 6338 } 6339 return; 6340 } 6341 default: 6342 break; 6343 } 6344 llvm_unreachable("Unexpected vectorization of the instructions."); 6345 } 6346 6347 unsigned BoUpSLP::canMapToVector(Type *T) const { 6348 unsigned N = 1; 6349 Type *EltTy = T; 6350 6351 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) { 6352 if (auto *ST = dyn_cast<StructType>(EltTy)) { 6353 // Check that struct is homogeneous. 6354 for (const auto *Ty : ST->elements()) 6355 if (Ty != *ST->element_begin()) 6356 return 0; 6357 N *= ST->getNumElements(); 6358 EltTy = *ST->element_begin(); 6359 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 6360 N *= AT->getNumElements(); 6361 EltTy = AT->getElementType(); 6362 } else { 6363 auto *VT = cast<FixedVectorType>(EltTy); 6364 N *= VT->getNumElements(); 6365 EltTy = VT->getElementType(); 6366 } 6367 } 6368 6369 if (!isValidElementType(EltTy)) 6370 return 0; 6371 uint64_t VTSize = DL->getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 6372 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || 6373 VTSize != DL->getTypeStoreSizeInBits(T)) 6374 return 0; 6375 return N; 6376 } 6377 6378 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 6379 SmallVectorImpl<unsigned> &CurrentOrder, 6380 bool ResizeAllowed) const { 6381 const auto *It = find_if(VL, [](Value *V) { 6382 return isa<ExtractElementInst, ExtractValueInst>(V); 6383 }); 6384 assert(It != VL.end() && "Expected at least one extract instruction."); 6385 auto *E0 = cast<Instruction>(*It); 6386 assert(all_of(VL, 6387 [](Value *V) { 6388 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 6389 V); 6390 }) && 6391 "Invalid opcode"); 6392 // Check if all of the extracts come from the same vector and from the 6393 // correct offset. 6394 Value *Vec = E0->getOperand(0); 6395 6396 CurrentOrder.clear(); 6397 6398 // We have to extract from a vector/aggregate with the same number of elements. 6399 unsigned NElts; 6400 if (E0->getOpcode() == Instruction::ExtractValue) { 6401 NElts = canMapToVector(Vec->getType()); 6402 if (!NElts) 6403 return false; 6404 // Check if load can be rewritten as load of vector. 6405 LoadInst *LI = dyn_cast<LoadInst>(Vec); 6406 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 6407 return false; 6408 } else { 6409 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 6410 } 6411 6412 unsigned E = VL.size(); 6413 if (!ResizeAllowed && NElts != E) 6414 return false; 6415 SmallVector<int> Indices(E, PoisonMaskElem); 6416 unsigned MinIdx = NElts, MaxIdx = 0; 6417 for (auto [I, V] : enumerate(VL)) { 6418 auto *Inst = dyn_cast<Instruction>(V); 6419 if (!Inst) 6420 continue; 6421 if (Inst->getOperand(0) != Vec) 6422 return false; 6423 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 6424 if (isa<UndefValue>(EE->getIndexOperand())) 6425 continue; 6426 std::optional<unsigned> Idx = getExtractIndex(Inst); 6427 if (!Idx) 6428 return false; 6429 const unsigned ExtIdx = *Idx; 6430 if (ExtIdx >= NElts) 6431 continue; 6432 Indices[I] = ExtIdx; 6433 if (MinIdx > ExtIdx) 6434 MinIdx = ExtIdx; 6435 if (MaxIdx < ExtIdx) 6436 MaxIdx = ExtIdx; 6437 } 6438 if (MaxIdx - MinIdx + 1 > E) 6439 return false; 6440 if (MaxIdx + 1 <= E) 6441 MinIdx = 0; 6442 6443 // Check that all of the indices extract from the correct offset. 6444 bool ShouldKeepOrder = true; 6445 // Assign to all items the initial value E + 1 so we can check if the extract 6446 // instruction index was used already. 6447 // Also, later we can check that all the indices are used and we have a 6448 // consecutive access in the extract instructions, by checking that no 6449 // element of CurrentOrder still has value E + 1. 6450 CurrentOrder.assign(E, E); 6451 for (unsigned I = 0; I < E; ++I) { 6452 if (Indices[I] == PoisonMaskElem) 6453 continue; 6454 const unsigned ExtIdx = Indices[I] - MinIdx; 6455 if (CurrentOrder[ExtIdx] != E) { 6456 CurrentOrder.clear(); 6457 return false; 6458 } 6459 ShouldKeepOrder &= ExtIdx == I; 6460 CurrentOrder[ExtIdx] = I; 6461 } 6462 if (ShouldKeepOrder) 6463 CurrentOrder.clear(); 6464 6465 return ShouldKeepOrder; 6466 } 6467 6468 bool BoUpSLP::areAllUsersVectorized( 6469 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const { 6470 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) || 6471 all_of(I->users(), [this](User *U) { 6472 return ScalarToTreeEntry.count(U) > 0 || 6473 isVectorLikeInstWithConstOps(U) || 6474 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 6475 }); 6476 } 6477 6478 static std::pair<InstructionCost, InstructionCost> 6479 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 6480 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 6481 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6482 6483 // Calculate the cost of the scalar and vector calls. 6484 SmallVector<Type *, 4> VecTys; 6485 for (Use &Arg : CI->args()) 6486 VecTys.push_back( 6487 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 6488 FastMathFlags FMF; 6489 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 6490 FMF = FPCI->getFastMathFlags(); 6491 SmallVector<const Value *> Arguments(CI->args()); 6492 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 6493 dyn_cast<IntrinsicInst>(CI)); 6494 auto IntrinsicCost = 6495 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 6496 6497 auto Shape = VFShape::get(CI->getFunctionType(), 6498 ElementCount::getFixed(VecTy->getNumElements()), 6499 false /*HasGlobalPred*/); 6500 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 6501 auto LibCost = IntrinsicCost; 6502 if (!CI->isNoBuiltin() && VecFunc) { 6503 // Calculate the cost of the vector library call. 6504 // If the corresponding vector call is cheaper, return its cost. 6505 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 6506 TTI::TCK_RecipThroughput); 6507 } 6508 return {IntrinsicCost, LibCost}; 6509 } 6510 6511 void BoUpSLP::TreeEntry::buildAltOpShuffleMask( 6512 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask, 6513 SmallVectorImpl<Value *> *OpScalars, 6514 SmallVectorImpl<Value *> *AltScalars) const { 6515 unsigned Sz = Scalars.size(); 6516 Mask.assign(Sz, PoisonMaskElem); 6517 SmallVector<int> OrderMask; 6518 if (!ReorderIndices.empty()) 6519 inversePermutation(ReorderIndices, OrderMask); 6520 for (unsigned I = 0; I < Sz; ++I) { 6521 unsigned Idx = I; 6522 if (!ReorderIndices.empty()) 6523 Idx = OrderMask[I]; 6524 auto *OpInst = cast<Instruction>(Scalars[Idx]); 6525 if (IsAltOp(OpInst)) { 6526 Mask[I] = Sz + Idx; 6527 if (AltScalars) 6528 AltScalars->push_back(OpInst); 6529 } else { 6530 Mask[I] = Idx; 6531 if (OpScalars) 6532 OpScalars->push_back(OpInst); 6533 } 6534 } 6535 if (!ReuseShuffleIndices.empty()) { 6536 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem); 6537 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) { 6538 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem; 6539 }); 6540 Mask.swap(NewMask); 6541 } 6542 } 6543 6544 static bool isAlternateInstruction(const Instruction *I, 6545 const Instruction *MainOp, 6546 const Instruction *AltOp, 6547 const TargetLibraryInfo &TLI) { 6548 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) { 6549 auto *AltCI = cast<CmpInst>(AltOp); 6550 CmpInst::Predicate MainP = MainCI->getPredicate(); 6551 CmpInst::Predicate AltP = AltCI->getPredicate(); 6552 assert(MainP != AltP && "Expected different main/alternate predicates."); 6553 auto *CI = cast<CmpInst>(I); 6554 if (isCmpSameOrSwapped(MainCI, CI, TLI)) 6555 return false; 6556 if (isCmpSameOrSwapped(AltCI, CI, TLI)) 6557 return true; 6558 CmpInst::Predicate P = CI->getPredicate(); 6559 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P); 6560 6561 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && 6562 "CmpInst expected to match either main or alternate predicate or " 6563 "their swap."); 6564 (void)AltP; 6565 return MainP != P && MainP != SwappedP; 6566 } 6567 return I->getOpcode() == AltOp->getOpcode(); 6568 } 6569 6570 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) { 6571 assert(!Ops.empty()); 6572 const auto *Op0 = Ops.front(); 6573 6574 const bool IsConstant = all_of(Ops, [](Value *V) { 6575 // TODO: We should allow undef elements here 6576 return isConstant(V) && !isa<UndefValue>(V); 6577 }); 6578 const bool IsUniform = all_of(Ops, [=](Value *V) { 6579 // TODO: We should allow undef elements here 6580 return V == Op0; 6581 }); 6582 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) { 6583 // TODO: We should allow undef elements here 6584 if (auto *CI = dyn_cast<ConstantInt>(V)) 6585 return CI->getValue().isPowerOf2(); 6586 return false; 6587 }); 6588 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) { 6589 // TODO: We should allow undef elements here 6590 if (auto *CI = dyn_cast<ConstantInt>(V)) 6591 return CI->getValue().isNegatedPowerOf2(); 6592 return false; 6593 }); 6594 6595 TTI::OperandValueKind VK = TTI::OK_AnyValue; 6596 if (IsConstant && IsUniform) 6597 VK = TTI::OK_UniformConstantValue; 6598 else if (IsConstant) 6599 VK = TTI::OK_NonUniformConstantValue; 6600 else if (IsUniform) 6601 VK = TTI::OK_UniformValue; 6602 6603 TTI::OperandValueProperties VP = TTI::OP_None; 6604 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP; 6605 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP; 6606 6607 return {VK, VP}; 6608 } 6609 6610 namespace { 6611 /// The base class for shuffle instruction emission and shuffle cost estimation. 6612 class BaseShuffleAnalysis { 6613 protected: 6614 /// Checks if the mask is an identity mask. 6615 /// \param IsStrict if is true the function returns false if mask size does 6616 /// not match vector size. 6617 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy, 6618 bool IsStrict) { 6619 int Limit = Mask.size(); 6620 int VF = VecTy->getNumElements(); 6621 int Index = -1; 6622 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit)) 6623 return true; 6624 if (!IsStrict) { 6625 // Consider extract subvector starting from index 0. 6626 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 6627 Index == 0) 6628 return true; 6629 // All VF-size submasks are identity (e.g. 6630 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4). 6631 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) { 6632 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF); 6633 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) || 6634 ShuffleVectorInst::isIdentityMask(Slice, VF); 6635 })) 6636 return true; 6637 } 6638 return false; 6639 } 6640 6641 /// Tries to combine 2 different masks into single one. 6642 /// \param LocalVF Vector length of the permuted input vector. \p Mask may 6643 /// change the size of the vector, \p LocalVF is the original size of the 6644 /// shuffled vector. 6645 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask, 6646 ArrayRef<int> ExtMask) { 6647 unsigned VF = Mask.size(); 6648 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 6649 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 6650 if (ExtMask[I] == PoisonMaskElem) 6651 continue; 6652 int MaskedIdx = Mask[ExtMask[I] % VF]; 6653 NewMask[I] = 6654 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF; 6655 } 6656 Mask.swap(NewMask); 6657 } 6658 6659 /// Looks through shuffles trying to reduce final number of shuffles in the 6660 /// code. The function looks through the previously emitted shuffle 6661 /// instructions and properly mark indices in mask as undef. 6662 /// For example, given the code 6663 /// \code 6664 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 6665 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 6666 /// \endcode 6667 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 6668 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6669 /// <0, 1, 2, 3> for the shuffle. 6670 /// If 2 operands are of different size, the smallest one will be resized and 6671 /// the mask recalculated properly. 6672 /// For example, given the code 6673 /// \code 6674 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 6675 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 6676 /// \endcode 6677 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 6678 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6679 /// <0, 1, 2, 3> for the shuffle. 6680 /// So, it tries to transform permutations to simple vector merge, if 6681 /// possible. 6682 /// \param V The input vector which must be shuffled using the given \p Mask. 6683 /// If the better candidate is found, \p V is set to this best candidate 6684 /// vector. 6685 /// \param Mask The input mask for the shuffle. If the best candidate is found 6686 /// during looking-through-shuffles attempt, it is updated accordingly. 6687 /// \param SinglePermute true if the shuffle operation is originally a 6688 /// single-value-permutation. In this case the look-through-shuffles procedure 6689 /// may look for resizing shuffles as the best candidates. 6690 /// \return true if the shuffle results in the non-resizing identity shuffle 6691 /// (and thus can be ignored), false - otherwise. 6692 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask, 6693 bool SinglePermute) { 6694 Value *Op = V; 6695 ShuffleVectorInst *IdentityOp = nullptr; 6696 SmallVector<int> IdentityMask; 6697 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) { 6698 // Exit if not a fixed vector type or changing size shuffle. 6699 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType()); 6700 if (!SVTy) 6701 break; 6702 // Remember the identity or broadcast mask, if it is not a resizing 6703 // shuffle. If no better candidates are found, this Op and Mask will be 6704 // used in the final shuffle. 6705 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) { 6706 if (!IdentityOp || !SinglePermute || 6707 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) && 6708 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask, 6709 IdentityMask.size()))) { 6710 IdentityOp = SV; 6711 // Store current mask in the IdentityMask so later we did not lost 6712 // this info if IdentityOp is selected as the best candidate for the 6713 // permutation. 6714 IdentityMask.assign(Mask); 6715 } 6716 } 6717 // Remember the broadcast mask. If no better candidates are found, this Op 6718 // and Mask will be used in the final shuffle. 6719 // Zero splat can be used as identity too, since it might be used with 6720 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling. 6721 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is 6722 // expensive, the analysis founds out, that the source vector is just a 6723 // broadcast, this original mask can be transformed to identity mask <0, 6724 // 1, 2, 3>. 6725 // \code 6726 // %0 = shuffle %v, poison, zeroinitalizer 6727 // %res = shuffle %0, poison, <3, 1, 2, 0> 6728 // \endcode 6729 // may be transformed to 6730 // \code 6731 // %0 = shuffle %v, poison, zeroinitalizer 6732 // %res = shuffle %0, poison, <0, 1, 2, 3> 6733 // \endcode 6734 if (SV->isZeroEltSplat()) { 6735 IdentityOp = SV; 6736 IdentityMask.assign(Mask); 6737 } 6738 int LocalVF = Mask.size(); 6739 if (auto *SVOpTy = 6740 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType())) 6741 LocalVF = SVOpTy->getNumElements(); 6742 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem); 6743 for (auto [Idx, I] : enumerate(Mask)) { 6744 if (I == PoisonMaskElem || 6745 static_cast<unsigned>(I) >= SV->getShuffleMask().size()) 6746 continue; 6747 ExtMask[Idx] = SV->getMaskValue(I); 6748 } 6749 bool IsOp1Undef = 6750 isUndefVector(SV->getOperand(0), 6751 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg)) 6752 .all(); 6753 bool IsOp2Undef = 6754 isUndefVector(SV->getOperand(1), 6755 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg)) 6756 .all(); 6757 if (!IsOp1Undef && !IsOp2Undef) { 6758 // Update mask and mark undef elems. 6759 for (int &I : Mask) { 6760 if (I == PoisonMaskElem) 6761 continue; 6762 if (SV->getMaskValue(I % SV->getShuffleMask().size()) == 6763 PoisonMaskElem) 6764 I = PoisonMaskElem; 6765 } 6766 break; 6767 } 6768 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 6769 SV->getShuffleMask().end()); 6770 combineMasks(LocalVF, ShuffleMask, Mask); 6771 Mask.swap(ShuffleMask); 6772 if (IsOp2Undef) 6773 Op = SV->getOperand(0); 6774 else 6775 Op = SV->getOperand(1); 6776 } 6777 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType()); 6778 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) || 6779 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) { 6780 if (IdentityOp) { 6781 V = IdentityOp; 6782 assert(Mask.size() == IdentityMask.size() && 6783 "Expected masks of same sizes."); 6784 // Clear known poison elements. 6785 for (auto [I, Idx] : enumerate(Mask)) 6786 if (Idx == PoisonMaskElem) 6787 IdentityMask[I] = PoisonMaskElem; 6788 Mask.swap(IdentityMask); 6789 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V); 6790 return SinglePermute && 6791 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()), 6792 /*IsStrict=*/true) || 6793 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() && 6794 Shuffle->isZeroEltSplat() && 6795 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size()))); 6796 } 6797 V = Op; 6798 return false; 6799 } 6800 V = Op; 6801 return true; 6802 } 6803 6804 /// Smart shuffle instruction emission, walks through shuffles trees and 6805 /// tries to find the best matching vector for the actual shuffle 6806 /// instruction. 6807 template <typename T, typename ShuffleBuilderTy> 6808 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask, 6809 ShuffleBuilderTy &Builder) { 6810 assert(V1 && "Expected at least one vector value."); 6811 if (V2) 6812 Builder.resizeToMatch(V1, V2); 6813 int VF = Mask.size(); 6814 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 6815 VF = FTy->getNumElements(); 6816 if (V2 && 6817 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) { 6818 // Peek through shuffles. 6819 Value *Op1 = V1; 6820 Value *Op2 = V2; 6821 int VF = 6822 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 6823 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 6824 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 6825 for (int I = 0, E = Mask.size(); I < E; ++I) { 6826 if (Mask[I] < VF) 6827 CombinedMask1[I] = Mask[I]; 6828 else 6829 CombinedMask2[I] = Mask[I] - VF; 6830 } 6831 Value *PrevOp1; 6832 Value *PrevOp2; 6833 do { 6834 PrevOp1 = Op1; 6835 PrevOp2 = Op2; 6836 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false); 6837 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false); 6838 // Check if we have 2 resizing shuffles - need to peek through operands 6839 // again. 6840 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 6841 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) { 6842 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem); 6843 for (auto [Idx, I] : enumerate(CombinedMask1)) { 6844 if (I == PoisonMaskElem) 6845 continue; 6846 ExtMask1[Idx] = SV1->getMaskValue(I); 6847 } 6848 SmallBitVector UseMask1 = buildUseMask( 6849 cast<FixedVectorType>(SV1->getOperand(1)->getType()) 6850 ->getNumElements(), 6851 ExtMask1, UseMask::SecondArg); 6852 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem); 6853 for (auto [Idx, I] : enumerate(CombinedMask2)) { 6854 if (I == PoisonMaskElem) 6855 continue; 6856 ExtMask2[Idx] = SV2->getMaskValue(I); 6857 } 6858 SmallBitVector UseMask2 = buildUseMask( 6859 cast<FixedVectorType>(SV2->getOperand(1)->getType()) 6860 ->getNumElements(), 6861 ExtMask2, UseMask::SecondArg); 6862 if (SV1->getOperand(0)->getType() == 6863 SV2->getOperand(0)->getType() && 6864 SV1->getOperand(0)->getType() != SV1->getType() && 6865 isUndefVector(SV1->getOperand(1), UseMask1).all() && 6866 isUndefVector(SV2->getOperand(1), UseMask2).all()) { 6867 Op1 = SV1->getOperand(0); 6868 Op2 = SV2->getOperand(0); 6869 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 6870 SV1->getShuffleMask().end()); 6871 int LocalVF = ShuffleMask1.size(); 6872 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType())) 6873 LocalVF = FTy->getNumElements(); 6874 combineMasks(LocalVF, ShuffleMask1, CombinedMask1); 6875 CombinedMask1.swap(ShuffleMask1); 6876 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 6877 SV2->getShuffleMask().end()); 6878 LocalVF = ShuffleMask2.size(); 6879 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType())) 6880 LocalVF = FTy->getNumElements(); 6881 combineMasks(LocalVF, ShuffleMask2, CombinedMask2); 6882 CombinedMask2.swap(ShuffleMask2); 6883 } 6884 } 6885 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 6886 Builder.resizeToMatch(Op1, Op2); 6887 VF = std::max(cast<VectorType>(Op1->getType()) 6888 ->getElementCount() 6889 .getKnownMinValue(), 6890 cast<VectorType>(Op2->getType()) 6891 ->getElementCount() 6892 .getKnownMinValue()); 6893 for (int I = 0, E = Mask.size(); I < E; ++I) { 6894 if (CombinedMask2[I] != PoisonMaskElem) { 6895 assert(CombinedMask1[I] == PoisonMaskElem && 6896 "Expected undefined mask element"); 6897 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 6898 } 6899 } 6900 if (Op1 == Op2 && 6901 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) || 6902 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) && 6903 isa<ShuffleVectorInst>(Op1) && 6904 cast<ShuffleVectorInst>(Op1)->getShuffleMask() == 6905 ArrayRef(CombinedMask1)))) 6906 return Builder.createIdentity(Op1); 6907 return Builder.createShuffleVector( 6908 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 6909 CombinedMask1); 6910 } 6911 if (isa<PoisonValue>(V1)) 6912 return Builder.createPoison( 6913 cast<VectorType>(V1->getType())->getElementType(), Mask.size()); 6914 SmallVector<int> NewMask(Mask.begin(), Mask.end()); 6915 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true); 6916 assert(V1 && "Expected non-null value after looking through shuffles."); 6917 6918 if (!IsIdentity) 6919 return Builder.createShuffleVector(V1, NewMask); 6920 return Builder.createIdentity(V1); 6921 } 6922 }; 6923 } // namespace 6924 6925 /// Merges shuffle masks and emits final shuffle instruction, if required. It 6926 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 6927 /// when the actual shuffle instruction is generated only if this is actually 6928 /// required. Otherwise, the shuffle instruction emission is delayed till the 6929 /// end of the process, to reduce the number of emitted instructions and further 6930 /// analysis/transformations. 6931 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { 6932 bool IsFinalized = false; 6933 SmallVector<int> CommonMask; 6934 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors; 6935 const TargetTransformInfo &TTI; 6936 InstructionCost Cost = 0; 6937 SmallDenseSet<Value *> VectorizedVals; 6938 BoUpSLP &R; 6939 SmallPtrSetImpl<Value *> &CheckedExtracts; 6940 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6941 /// While set, still trying to estimate the cost for the same nodes and we 6942 /// can delay actual cost estimation (virtual shuffle instruction emission). 6943 /// May help better estimate the cost if same nodes must be permuted + allows 6944 /// to move most of the long shuffles cost estimation to TTI. 6945 bool SameNodesEstimated = true; 6946 6947 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) { 6948 if (Ty->getScalarType()->isPointerTy()) { 6949 Constant *Res = ConstantExpr::getIntToPtr( 6950 ConstantInt::getAllOnesValue( 6951 IntegerType::get(Ty->getContext(), 6952 DL.getTypeStoreSizeInBits(Ty->getScalarType()))), 6953 Ty->getScalarType()); 6954 if (auto *VTy = dyn_cast<VectorType>(Ty)) 6955 Res = ConstantVector::getSplat(VTy->getElementCount(), Res); 6956 return Res; 6957 } 6958 return Constant::getAllOnesValue(Ty); 6959 } 6960 6961 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) { 6962 if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof)) 6963 return TTI::TCC_Free; 6964 auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size()); 6965 InstructionCost GatherCost = 0; 6966 SmallVector<Value *> Gathers(VL.begin(), VL.end()); 6967 // Improve gather cost for gather of loads, if we can group some of the 6968 // loads into vector loads. 6969 InstructionsState S = getSameOpcode(VL, *R.TLI); 6970 const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType()); 6971 unsigned MinVF = R.getMinVF(2 * Sz); 6972 if (VL.size() > 2 && 6973 ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) || 6974 (InVectors.empty() && 6975 any_of(seq<unsigned>(0, VL.size() / MinVF), 6976 [&](unsigned Idx) { 6977 ArrayRef<Value *> SubVL = VL.slice(Idx * MinVF, MinVF); 6978 InstructionsState S = getSameOpcode(SubVL, *R.TLI); 6979 return S.getOpcode() == Instruction::Load && 6980 !S.isAltShuffle(); 6981 }))) && 6982 !all_of(Gathers, [&](Value *V) { return R.getTreeEntry(V); }) && 6983 !isSplat(Gathers)) { 6984 SetVector<Value *> VectorizedLoads; 6985 SmallVector<LoadInst *> VectorizedStarts; 6986 SmallVector<std::pair<unsigned, unsigned>> ScatterVectorized; 6987 unsigned StartIdx = 0; 6988 unsigned VF = VL.size() / 2; 6989 for (; VF >= MinVF; VF /= 2) { 6990 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 6991 Cnt += VF) { 6992 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 6993 if (S.getOpcode() != Instruction::Load || S.isAltShuffle()) { 6994 InstructionsState SliceS = getSameOpcode(Slice, *R.TLI); 6995 if (SliceS.getOpcode() != Instruction::Load || 6996 SliceS.isAltShuffle()) 6997 continue; 6998 } 6999 if (!VectorizedLoads.count(Slice.front()) && 7000 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 7001 SmallVector<Value *> PointerOps; 7002 OrdersType CurrentOrder; 7003 LoadsState LS = 7004 canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE, 7005 *R.LI, *R.TLI, CurrentOrder, PointerOps); 7006 switch (LS) { 7007 case LoadsState::Vectorize: 7008 case LoadsState::ScatterVectorize: 7009 case LoadsState::PossibleStridedVectorize: 7010 // Mark the vectorized loads so that we don't vectorize them 7011 // again. 7012 // TODO: better handling of loads with reorders. 7013 if (LS == LoadsState::Vectorize && CurrentOrder.empty()) 7014 VectorizedStarts.push_back(cast<LoadInst>(Slice.front())); 7015 else 7016 ScatterVectorized.emplace_back(Cnt, VF); 7017 VectorizedLoads.insert(Slice.begin(), Slice.end()); 7018 // If we vectorized initial block, no need to try to vectorize 7019 // it again. 7020 if (Cnt == StartIdx) 7021 StartIdx += VF; 7022 break; 7023 case LoadsState::Gather: 7024 break; 7025 } 7026 } 7027 } 7028 // Check if the whole array was vectorized already - exit. 7029 if (StartIdx >= VL.size()) 7030 break; 7031 // Found vectorizable parts - exit. 7032 if (!VectorizedLoads.empty()) 7033 break; 7034 } 7035 if (!VectorizedLoads.empty()) { 7036 unsigned NumParts = TTI.getNumberOfParts(VecTy); 7037 bool NeedInsertSubvectorAnalysis = 7038 !NumParts || (VL.size() / VF) > NumParts; 7039 // Get the cost for gathered loads. 7040 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 7041 if (VectorizedLoads.contains(VL[I])) 7042 continue; 7043 GatherCost += getBuildVectorCost(VL.slice(I, VF), Root); 7044 } 7045 // Exclude potentially vectorized loads from list of gathered 7046 // scalars. 7047 Gathers.assign(Gathers.size(), PoisonValue::get(VL.front()->getType())); 7048 // The cost for vectorized loads. 7049 InstructionCost ScalarsCost = 0; 7050 for (Value *V : VectorizedLoads) { 7051 auto *LI = cast<LoadInst>(V); 7052 ScalarsCost += 7053 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), 7054 LI->getAlign(), LI->getPointerAddressSpace(), 7055 CostKind, TTI::OperandValueInfo(), LI); 7056 } 7057 auto *LoadTy = FixedVectorType::get(VL.front()->getType(), VF); 7058 for (LoadInst *LI : VectorizedStarts) { 7059 Align Alignment = LI->getAlign(); 7060 GatherCost += 7061 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 7062 LI->getPointerAddressSpace(), CostKind, 7063 TTI::OperandValueInfo(), LI); 7064 } 7065 for (std::pair<unsigned, unsigned> P : ScatterVectorized) { 7066 auto *LI0 = cast<LoadInst>(VL[P.first]); 7067 Align CommonAlignment = LI0->getAlign(); 7068 for (Value *V : VL.slice(P.first + 1, VF - 1)) 7069 CommonAlignment = 7070 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7071 GatherCost += TTI.getGatherScatterOpCost( 7072 Instruction::Load, LoadTy, LI0->getPointerOperand(), 7073 /*VariableMask=*/false, CommonAlignment, CostKind, LI0); 7074 } 7075 if (NeedInsertSubvectorAnalysis) { 7076 // Add the cost for the subvectors insert. 7077 for (int I = VF, E = VL.size(); I < E; I += VF) 7078 GatherCost += TTI.getShuffleCost(TTI::SK_InsertSubvector, VecTy, 7079 std::nullopt, CostKind, I, LoadTy); 7080 } 7081 GatherCost -= ScalarsCost; 7082 } 7083 } else if (!Root && isSplat(VL)) { 7084 // Found the broadcasting of the single scalar, calculate the cost as 7085 // the broadcast. 7086 const auto *It = 7087 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); }); 7088 assert(It != VL.end() && "Expected at least one non-undef value."); 7089 // Add broadcast for non-identity shuffle only. 7090 bool NeedShuffle = 7091 count(VL, *It) > 1 && 7092 (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof)); 7093 InstructionCost InsertCost = TTI.getVectorInstrCost( 7094 Instruction::InsertElement, VecTy, CostKind, 7095 NeedShuffle ? 0 : std::distance(VL.begin(), It), 7096 PoisonValue::get(VecTy), *It); 7097 return InsertCost + 7098 (NeedShuffle ? TTI.getShuffleCost( 7099 TargetTransformInfo::SK_Broadcast, VecTy, 7100 /*Mask=*/std::nullopt, CostKind, /*Index=*/0, 7101 /*SubTp=*/nullptr, /*Args=*/*It) 7102 : TTI::TCC_Free); 7103 } 7104 return GatherCost + 7105 (all_of(Gathers, UndefValue::classof) 7106 ? TTI::TCC_Free 7107 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers))); 7108 }; 7109 7110 /// Compute the cost of creating a vector containing the extracted values from 7111 /// \p VL. 7112 InstructionCost 7113 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask, 7114 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7115 unsigned NumParts) { 7116 assert(VL.size() > NumParts && "Unexpected scalarized shuffle."); 7117 unsigned NumElts = 7118 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) { 7119 auto *EE = dyn_cast<ExtractElementInst>(V); 7120 if (!EE) 7121 return Sz; 7122 auto *VecTy = cast<FixedVectorType>(EE->getVectorOperandType()); 7123 return std::max(Sz, VecTy->getNumElements()); 7124 }); 7125 unsigned NumSrcRegs = TTI.getNumberOfParts( 7126 FixedVectorType::get(VL.front()->getType(), NumElts)); 7127 if (NumSrcRegs == 0) 7128 NumSrcRegs = 1; 7129 // FIXME: this must be moved to TTI for better estimation. 7130 unsigned EltsPerVector = PowerOf2Ceil(std::max( 7131 divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs))); 7132 auto CheckPerRegistersShuffle = 7133 [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> { 7134 DenseSet<int> RegIndices; 7135 // Check that if trying to permute same single/2 input vectors. 7136 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc; 7137 int FirstRegId = -1; 7138 for (int &I : Mask) { 7139 if (I == PoisonMaskElem) 7140 continue; 7141 int RegId = (I / NumElts) * NumParts + (I % NumElts) / EltsPerVector; 7142 if (FirstRegId < 0) 7143 FirstRegId = RegId; 7144 RegIndices.insert(RegId); 7145 if (RegIndices.size() > 2) 7146 return std::nullopt; 7147 if (RegIndices.size() == 2) 7148 ShuffleKind = TTI::SK_PermuteTwoSrc; 7149 I = (I % NumElts) % EltsPerVector + 7150 (RegId == FirstRegId ? 0 : EltsPerVector); 7151 } 7152 return ShuffleKind; 7153 }; 7154 InstructionCost Cost = 0; 7155 7156 // Process extracts in blocks of EltsPerVector to check if the source vector 7157 // operand can be re-used directly. If not, add the cost of creating a 7158 // shuffle to extract the values into a vector register. 7159 for (unsigned Part = 0; Part < NumParts; ++Part) { 7160 if (!ShuffleKinds[Part]) 7161 continue; 7162 ArrayRef<int> MaskSlice = 7163 Mask.slice(Part * EltsPerVector, 7164 (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0) 7165 ? Mask.size() % EltsPerVector 7166 : EltsPerVector); 7167 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem); 7168 copy(MaskSlice, SubMask.begin()); 7169 std::optional<TTI::ShuffleKind> RegShuffleKind = 7170 CheckPerRegistersShuffle(SubMask); 7171 if (!RegShuffleKind) { 7172 Cost += TTI.getShuffleCost( 7173 *ShuffleKinds[Part], 7174 FixedVectorType::get(VL.front()->getType(), NumElts), MaskSlice); 7175 continue; 7176 } 7177 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc || 7178 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) { 7179 Cost += TTI.getShuffleCost( 7180 *RegShuffleKind, 7181 FixedVectorType::get(VL.front()->getType(), EltsPerVector), 7182 SubMask); 7183 } 7184 } 7185 return Cost; 7186 } 7187 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 7188 /// shuffle emission. 7189 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 7190 ArrayRef<int> Mask) { 7191 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7192 if (Mask[Idx] != PoisonMaskElem) 7193 CommonMask[Idx] = Idx; 7194 } 7195 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given 7196 /// mask \p Mask, register number \p Part, that includes \p SliceSize 7197 /// elements. 7198 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2, 7199 ArrayRef<int> Mask, unsigned Part, 7200 unsigned SliceSize) { 7201 if (SameNodesEstimated) { 7202 // Delay the cost estimation if the same nodes are reshuffling. 7203 // If we already requested the cost of reshuffling of E1 and E2 before, no 7204 // need to estimate another cost with the sub-Mask, instead include this 7205 // sub-Mask into the CommonMask to estimate it later and avoid double cost 7206 // estimation. 7207 if ((InVectors.size() == 2 && 7208 InVectors.front().get<const TreeEntry *>() == &E1 && 7209 InVectors.back().get<const TreeEntry *>() == E2) || 7210 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) { 7211 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize), 7212 [](int Idx) { return Idx == PoisonMaskElem; }) && 7213 "Expected all poisoned elements."); 7214 ArrayRef<int> SubMask = 7215 ArrayRef(Mask).slice(Part * SliceSize, SliceSize); 7216 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part)); 7217 return; 7218 } 7219 // Found non-matching nodes - need to estimate the cost for the matched 7220 // and transform mask. 7221 Cost += createShuffle(InVectors.front(), 7222 InVectors.size() == 1 ? nullptr : InVectors.back(), 7223 CommonMask); 7224 transformMaskAfterShuffle(CommonMask, CommonMask); 7225 } 7226 SameNodesEstimated = false; 7227 Cost += createShuffle(&E1, E2, Mask); 7228 transformMaskAfterShuffle(CommonMask, Mask); 7229 } 7230 7231 class ShuffleCostBuilder { 7232 const TargetTransformInfo &TTI; 7233 7234 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) { 7235 int Index = -1; 7236 return Mask.empty() || 7237 (VF == Mask.size() && 7238 ShuffleVectorInst::isIdentityMask(Mask, VF)) || 7239 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 7240 Index == 0); 7241 } 7242 7243 public: 7244 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {} 7245 ~ShuffleCostBuilder() = default; 7246 InstructionCost createShuffleVector(Value *V1, Value *, 7247 ArrayRef<int> Mask) const { 7248 // Empty mask or identity mask are free. 7249 unsigned VF = 7250 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7251 if (isEmptyOrIdentity(Mask, VF)) 7252 return TTI::TCC_Free; 7253 return TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, 7254 cast<VectorType>(V1->getType()), Mask); 7255 } 7256 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const { 7257 // Empty mask or identity mask are free. 7258 unsigned VF = 7259 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7260 if (isEmptyOrIdentity(Mask, VF)) 7261 return TTI::TCC_Free; 7262 return TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, 7263 cast<VectorType>(V1->getType()), Mask); 7264 } 7265 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; } 7266 InstructionCost createPoison(Type *Ty, unsigned VF) const { 7267 return TTI::TCC_Free; 7268 } 7269 void resizeToMatch(Value *&, Value *&) const {} 7270 }; 7271 7272 /// Smart shuffle instruction emission, walks through shuffles trees and 7273 /// tries to find the best matching vector for the actual shuffle 7274 /// instruction. 7275 InstructionCost 7276 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1, 7277 const PointerUnion<Value *, const TreeEntry *> &P2, 7278 ArrayRef<int> Mask) { 7279 ShuffleCostBuilder Builder(TTI); 7280 SmallVector<int> CommonMask(Mask.begin(), Mask.end()); 7281 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>(); 7282 unsigned CommonVF = Mask.size(); 7283 if (!V1 && !V2 && !P2.isNull()) { 7284 // Shuffle 2 entry nodes. 7285 const TreeEntry *E = P1.get<const TreeEntry *>(); 7286 unsigned VF = E->getVectorFactor(); 7287 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7288 CommonVF = std::max(VF, E2->getVectorFactor()); 7289 assert(all_of(Mask, 7290 [=](int Idx) { 7291 return Idx < 2 * static_cast<int>(CommonVF); 7292 }) && 7293 "All elements in mask must be less than 2 * CommonVF."); 7294 if (E->Scalars.size() == E2->Scalars.size()) { 7295 SmallVector<int> EMask = E->getCommonMask(); 7296 SmallVector<int> E2Mask = E2->getCommonMask(); 7297 if (!EMask.empty() || !E2Mask.empty()) { 7298 for (int &Idx : CommonMask) { 7299 if (Idx == PoisonMaskElem) 7300 continue; 7301 if (Idx < static_cast<int>(CommonVF) && !EMask.empty()) 7302 Idx = EMask[Idx]; 7303 else if (Idx >= static_cast<int>(CommonVF)) 7304 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) + 7305 E->Scalars.size(); 7306 } 7307 } 7308 CommonVF = E->Scalars.size(); 7309 } 7310 V1 = Constant::getNullValue( 7311 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7312 V2 = getAllOnesValue( 7313 *R.DL, FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7314 } else if (!V1 && P2.isNull()) { 7315 // Shuffle single entry node. 7316 const TreeEntry *E = P1.get<const TreeEntry *>(); 7317 unsigned VF = E->getVectorFactor(); 7318 CommonVF = VF; 7319 assert( 7320 all_of(Mask, 7321 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7322 "All elements in mask must be less than CommonVF."); 7323 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) { 7324 SmallVector<int> EMask = E->getCommonMask(); 7325 assert(!EMask.empty() && "Expected non-empty common mask."); 7326 for (int &Idx : CommonMask) { 7327 if (Idx != PoisonMaskElem) 7328 Idx = EMask[Idx]; 7329 } 7330 CommonVF = E->Scalars.size(); 7331 } 7332 V1 = Constant::getNullValue( 7333 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7334 } else if (V1 && P2.isNull()) { 7335 // Shuffle single vector. 7336 CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7337 assert( 7338 all_of(Mask, 7339 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7340 "All elements in mask must be less than CommonVF."); 7341 } else if (V1 && !V2) { 7342 // Shuffle vector and tree node. 7343 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7344 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7345 CommonVF = std::max(VF, E2->getVectorFactor()); 7346 assert(all_of(Mask, 7347 [=](int Idx) { 7348 return Idx < 2 * static_cast<int>(CommonVF); 7349 }) && 7350 "All elements in mask must be less than 2 * CommonVF."); 7351 if (E2->Scalars.size() == VF && VF != CommonVF) { 7352 SmallVector<int> E2Mask = E2->getCommonMask(); 7353 assert(!E2Mask.empty() && "Expected non-empty common mask."); 7354 for (int &Idx : CommonMask) { 7355 if (Idx == PoisonMaskElem) 7356 continue; 7357 if (Idx >= static_cast<int>(CommonVF)) 7358 Idx = E2Mask[Idx - CommonVF] + VF; 7359 } 7360 CommonVF = VF; 7361 } 7362 V1 = Constant::getNullValue( 7363 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7364 V2 = getAllOnesValue( 7365 *R.DL, 7366 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7367 } else if (!V1 && V2) { 7368 // Shuffle vector and tree node. 7369 unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 7370 const TreeEntry *E1 = P1.get<const TreeEntry *>(); 7371 CommonVF = std::max(VF, E1->getVectorFactor()); 7372 assert(all_of(Mask, 7373 [=](int Idx) { 7374 return Idx < 2 * static_cast<int>(CommonVF); 7375 }) && 7376 "All elements in mask must be less than 2 * CommonVF."); 7377 if (E1->Scalars.size() == VF && VF != CommonVF) { 7378 SmallVector<int> E1Mask = E1->getCommonMask(); 7379 assert(!E1Mask.empty() && "Expected non-empty common mask."); 7380 for (int &Idx : CommonMask) { 7381 if (Idx == PoisonMaskElem) 7382 continue; 7383 if (Idx >= static_cast<int>(CommonVF)) 7384 Idx = E1Mask[Idx - CommonVF] + VF; 7385 } 7386 CommonVF = VF; 7387 } 7388 V1 = Constant::getNullValue( 7389 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7390 V2 = getAllOnesValue( 7391 *R.DL, 7392 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7393 } else { 7394 assert(V1 && V2 && "Expected both vectors."); 7395 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7396 CommonVF = 7397 std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements()); 7398 assert(all_of(Mask, 7399 [=](int Idx) { 7400 return Idx < 2 * static_cast<int>(CommonVF); 7401 }) && 7402 "All elements in mask must be less than 2 * CommonVF."); 7403 if (V1->getType() != V2->getType()) { 7404 V1 = Constant::getNullValue(FixedVectorType::get( 7405 cast<FixedVectorType>(V1->getType())->getElementType(), CommonVF)); 7406 V2 = getAllOnesValue( 7407 *R.DL, FixedVectorType::get( 7408 cast<FixedVectorType>(V1->getType())->getElementType(), 7409 CommonVF)); 7410 } 7411 } 7412 InVectors.front() = Constant::getNullValue(FixedVectorType::get( 7413 cast<FixedVectorType>(V1->getType())->getElementType(), 7414 CommonMask.size())); 7415 if (InVectors.size() == 2) 7416 InVectors.pop_back(); 7417 return BaseShuffleAnalysis::createShuffle<InstructionCost>( 7418 V1, V2, CommonMask, Builder); 7419 } 7420 7421 public: 7422 ShuffleCostEstimator(TargetTransformInfo &TTI, 7423 ArrayRef<Value *> VectorizedVals, BoUpSLP &R, 7424 SmallPtrSetImpl<Value *> &CheckedExtracts) 7425 : TTI(TTI), VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), 7426 R(R), CheckedExtracts(CheckedExtracts) {} 7427 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 7428 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7429 unsigned NumParts, bool &UseVecBaseAsInput) { 7430 UseVecBaseAsInput = false; 7431 if (Mask.empty()) 7432 return nullptr; 7433 Value *VecBase = nullptr; 7434 ArrayRef<Value *> VL = E->Scalars; 7435 // If the resulting type is scalarized, do not adjust the cost. 7436 if (NumParts == VL.size()) 7437 return nullptr; 7438 // Check if it can be considered reused if same extractelements were 7439 // vectorized already. 7440 bool PrevNodeFound = any_of( 7441 ArrayRef(R.VectorizableTree).take_front(E->Idx), 7442 [&](const std::unique_ptr<TreeEntry> &TE) { 7443 return ((!TE->isAltShuffle() && 7444 TE->getOpcode() == Instruction::ExtractElement) || 7445 TE->State == TreeEntry::NeedToGather) && 7446 all_of(enumerate(TE->Scalars), [&](auto &&Data) { 7447 return VL.size() > Data.index() && 7448 (Mask[Data.index()] == PoisonMaskElem || 7449 isa<UndefValue>(VL[Data.index()]) || 7450 Data.value() == VL[Data.index()]); 7451 }); 7452 }); 7453 SmallPtrSet<Value *, 4> UniqueBases; 7454 unsigned SliceSize = VL.size() / NumParts; 7455 for (unsigned Part = 0; Part < NumParts; ++Part) { 7456 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 7457 for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) { 7458 // Ignore non-extractelement scalars. 7459 if (isa<UndefValue>(V) || 7460 (!SubMask.empty() && SubMask[I] == PoisonMaskElem)) 7461 continue; 7462 // If all users of instruction are going to be vectorized and this 7463 // instruction itself is not going to be vectorized, consider this 7464 // instruction as dead and remove its cost from the final cost of the 7465 // vectorized tree. 7466 // Also, avoid adjusting the cost for extractelements with multiple uses 7467 // in different graph entries. 7468 auto *EE = cast<ExtractElementInst>(V); 7469 VecBase = EE->getVectorOperand(); 7470 UniqueBases.insert(VecBase); 7471 const TreeEntry *VE = R.getTreeEntry(V); 7472 if (!CheckedExtracts.insert(V).second || 7473 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) || 7474 (VE && VE != E)) 7475 continue; 7476 std::optional<unsigned> EEIdx = getExtractIndex(EE); 7477 if (!EEIdx) 7478 continue; 7479 unsigned Idx = *EEIdx; 7480 // Take credit for instruction that will become dead. 7481 if (EE->hasOneUse() || !PrevNodeFound) { 7482 Instruction *Ext = EE->user_back(); 7483 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) { 7484 return isa<GetElementPtrInst>(U); 7485 })) { 7486 // Use getExtractWithExtendCost() to calculate the cost of 7487 // extractelement/ext pair. 7488 Cost -= 7489 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 7490 EE->getVectorOperandType(), Idx); 7491 // Add back the cost of s|zext which is subtracted separately. 7492 Cost += TTI.getCastInstrCost( 7493 Ext->getOpcode(), Ext->getType(), EE->getType(), 7494 TTI::getCastContextHint(Ext), CostKind, Ext); 7495 continue; 7496 } 7497 } 7498 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(), 7499 CostKind, Idx); 7500 } 7501 } 7502 // Check that gather of extractelements can be represented as just a 7503 // shuffle of a single/two vectors the scalars are extracted from. 7504 // Found the bunch of extractelement instructions that must be gathered 7505 // into a vector and can be represented as a permutation elements in a 7506 // single input vector or of 2 input vectors. 7507 // Done for reused if same extractelements were vectorized already. 7508 if (!PrevNodeFound) 7509 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts); 7510 InVectors.assign(1, E); 7511 CommonMask.assign(Mask.begin(), Mask.end()); 7512 transformMaskAfterShuffle(CommonMask, CommonMask); 7513 SameNodesEstimated = false; 7514 if (NumParts != 1 && UniqueBases.size() != 1) { 7515 UseVecBaseAsInput = true; 7516 VecBase = Constant::getNullValue( 7517 FixedVectorType::get(VL.front()->getType(), CommonMask.size())); 7518 } 7519 return VecBase; 7520 } 7521 /// Checks if the specified entry \p E needs to be delayed because of its 7522 /// dependency nodes. 7523 std::optional<InstructionCost> 7524 needToDelay(const TreeEntry *, 7525 ArrayRef<SmallVector<const TreeEntry *>>) const { 7526 // No need to delay the cost estimation during analysis. 7527 return std::nullopt; 7528 } 7529 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 7530 if (&E1 == &E2) { 7531 assert(all_of(Mask, 7532 [&](int Idx) { 7533 return Idx < static_cast<int>(E1.getVectorFactor()); 7534 }) && 7535 "Expected single vector shuffle mask."); 7536 add(E1, Mask); 7537 return; 7538 } 7539 if (InVectors.empty()) { 7540 CommonMask.assign(Mask.begin(), Mask.end()); 7541 InVectors.assign({&E1, &E2}); 7542 return; 7543 } 7544 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7545 auto *MaskVecTy = 7546 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7547 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7548 if (NumParts == 0 || NumParts >= Mask.size()) 7549 NumParts = 1; 7550 unsigned SliceSize = Mask.size() / NumParts; 7551 const auto *It = 7552 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7553 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7554 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize); 7555 } 7556 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 7557 if (InVectors.empty()) { 7558 CommonMask.assign(Mask.begin(), Mask.end()); 7559 InVectors.assign(1, &E1); 7560 return; 7561 } 7562 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7563 auto *MaskVecTy = 7564 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7565 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7566 if (NumParts == 0 || NumParts >= Mask.size()) 7567 NumParts = 1; 7568 unsigned SliceSize = Mask.size() / NumParts; 7569 const auto *It = 7570 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7571 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7572 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize); 7573 if (!SameNodesEstimated && InVectors.size() == 1) 7574 InVectors.emplace_back(&E1); 7575 } 7576 /// Adds 2 input vectors and the mask for their shuffling. 7577 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 7578 // May come only for shuffling of 2 vectors with extractelements, already 7579 // handled in adjustExtracts. 7580 assert(InVectors.size() == 1 && 7581 all_of(enumerate(CommonMask), 7582 [&](auto P) { 7583 if (P.value() == PoisonMaskElem) 7584 return Mask[P.index()] == PoisonMaskElem; 7585 auto *EI = 7586 cast<ExtractElementInst>(InVectors.front() 7587 .get<const TreeEntry *>() 7588 ->Scalars[P.index()]); 7589 return EI->getVectorOperand() == V1 || 7590 EI->getVectorOperand() == V2; 7591 }) && 7592 "Expected extractelement vectors."); 7593 } 7594 /// Adds another one input vector and the mask for the shuffling. 7595 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) { 7596 if (InVectors.empty()) { 7597 assert(CommonMask.empty() && !ForExtracts && 7598 "Expected empty input mask/vectors."); 7599 CommonMask.assign(Mask.begin(), Mask.end()); 7600 InVectors.assign(1, V1); 7601 return; 7602 } 7603 if (ForExtracts) { 7604 // No need to add vectors here, already handled them in adjustExtracts. 7605 assert(InVectors.size() == 1 && 7606 InVectors.front().is<const TreeEntry *>() && !CommonMask.empty() && 7607 all_of(enumerate(CommonMask), 7608 [&](auto P) { 7609 Value *Scalar = InVectors.front() 7610 .get<const TreeEntry *>() 7611 ->Scalars[P.index()]; 7612 if (P.value() == PoisonMaskElem) 7613 return P.value() == Mask[P.index()] || 7614 isa<UndefValue>(Scalar); 7615 if (isa<Constant>(V1)) 7616 return true; 7617 auto *EI = cast<ExtractElementInst>(Scalar); 7618 return EI->getVectorOperand() == V1; 7619 }) && 7620 "Expected only tree entry for extractelement vectors."); 7621 return; 7622 } 7623 assert(!InVectors.empty() && !CommonMask.empty() && 7624 "Expected only tree entries from extracts/reused buildvectors."); 7625 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7626 if (InVectors.size() == 2) { 7627 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask); 7628 transformMaskAfterShuffle(CommonMask, CommonMask); 7629 VF = std::max<unsigned>(VF, CommonMask.size()); 7630 } else if (const auto *InTE = 7631 InVectors.front().dyn_cast<const TreeEntry *>()) { 7632 VF = std::max(VF, InTE->getVectorFactor()); 7633 } else { 7634 VF = std::max( 7635 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType()) 7636 ->getNumElements()); 7637 } 7638 InVectors.push_back(V1); 7639 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7640 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 7641 CommonMask[Idx] = Mask[Idx] + VF; 7642 } 7643 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 7644 Value *Root = nullptr) { 7645 Cost += getBuildVectorCost(VL, Root); 7646 if (!Root) { 7647 // FIXME: Need to find a way to avoid use of getNullValue here. 7648 SmallVector<Constant *> Vals; 7649 unsigned VF = VL.size(); 7650 if (MaskVF != 0) 7651 VF = std::min(VF, MaskVF); 7652 for (Value *V : VL.take_front(VF)) { 7653 if (isa<UndefValue>(V)) { 7654 Vals.push_back(cast<Constant>(V)); 7655 continue; 7656 } 7657 Vals.push_back(Constant::getNullValue(V->getType())); 7658 } 7659 return ConstantVector::get(Vals); 7660 } 7661 return ConstantVector::getSplat( 7662 ElementCount::getFixed( 7663 cast<FixedVectorType>(Root->getType())->getNumElements()), 7664 getAllOnesValue(*R.DL, VL.front()->getType())); 7665 } 7666 InstructionCost createFreeze(InstructionCost Cost) { return Cost; } 7667 /// Finalize emission of the shuffles. 7668 InstructionCost 7669 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 7670 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 7671 IsFinalized = true; 7672 if (Action) { 7673 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front(); 7674 if (InVectors.size() == 2) 7675 Cost += createShuffle(Vec, InVectors.back(), CommonMask); 7676 else 7677 Cost += createShuffle(Vec, nullptr, CommonMask); 7678 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7679 if (CommonMask[Idx] != PoisonMaskElem) 7680 CommonMask[Idx] = Idx; 7681 assert(VF > 0 && 7682 "Expected vector length for the final value before action."); 7683 Value *V = Vec.get<Value *>(); 7684 Action(V, CommonMask); 7685 InVectors.front() = V; 7686 } 7687 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true); 7688 if (CommonMask.empty()) { 7689 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 7690 return Cost; 7691 } 7692 return Cost + 7693 createShuffle(InVectors.front(), 7694 InVectors.size() == 2 ? InVectors.back() : nullptr, 7695 CommonMask); 7696 } 7697 7698 ~ShuffleCostEstimator() { 7699 assert((IsFinalized || CommonMask.empty()) && 7700 "Shuffle construction must be finalized."); 7701 } 7702 }; 7703 7704 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E, 7705 unsigned Idx) const { 7706 Value *Op = E->getOperand(Idx).front(); 7707 if (const TreeEntry *TE = getTreeEntry(Op)) { 7708 if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) { 7709 return EI.EdgeIdx == Idx && EI.UserTE == E; 7710 }) != TE->UserTreeIndices.end()) 7711 return TE; 7712 auto MIt = MultiNodeScalars.find(Op); 7713 if (MIt != MultiNodeScalars.end()) { 7714 for (const TreeEntry *TE : MIt->second) { 7715 if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7716 return EI.EdgeIdx == Idx && EI.UserTE == E; 7717 }) != TE->UserTreeIndices.end()) 7718 return TE; 7719 } 7720 } 7721 } 7722 const auto *It = 7723 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 7724 return TE->State == TreeEntry::NeedToGather && 7725 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7726 return EI.EdgeIdx == Idx && EI.UserTE == E; 7727 }) != TE->UserTreeIndices.end(); 7728 }); 7729 assert(It != VectorizableTree.end() && "Expected vectorizable entry."); 7730 return It->get(); 7731 } 7732 7733 InstructionCost 7734 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, 7735 SmallPtrSetImpl<Value *> &CheckedExtracts) { 7736 ArrayRef<Value *> VL = E->Scalars; 7737 7738 Type *ScalarTy = VL[0]->getType(); 7739 if (E->State != TreeEntry::NeedToGather) { 7740 if (auto *SI = dyn_cast<StoreInst>(VL[0])) 7741 ScalarTy = SI->getValueOperand()->getType(); 7742 else if (auto *CI = dyn_cast<CmpInst>(VL[0])) 7743 ScalarTy = CI->getOperand(0)->getType(); 7744 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7745 ScalarTy = IE->getOperand(1)->getType(); 7746 } 7747 if (!FixedVectorType::isValidElementType(ScalarTy)) 7748 return InstructionCost::getInvalid(); 7749 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7750 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7751 7752 // If we have computed a smaller type for the expression, update VecTy so 7753 // that the costs will be accurate. 7754 auto It = MinBWs.find(E); 7755 if (It != MinBWs.end()) { 7756 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 7757 VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7758 } 7759 unsigned EntryVF = E->getVectorFactor(); 7760 auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF); 7761 7762 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7763 if (E->State == TreeEntry::NeedToGather) { 7764 if (allConstant(VL)) 7765 return 0; 7766 if (isa<InsertElementInst>(VL[0])) 7767 return InstructionCost::getInvalid(); 7768 return processBuildVector<ShuffleCostEstimator, InstructionCost>( 7769 E, *TTI, VectorizedVals, *this, CheckedExtracts); 7770 } 7771 InstructionCost CommonCost = 0; 7772 SmallVector<int> Mask; 7773 if (!E->ReorderIndices.empty() && 7774 E->State != TreeEntry::PossibleStridedVectorize) { 7775 SmallVector<int> NewMask; 7776 if (E->getOpcode() == Instruction::Store) { 7777 // For stores the order is actually a mask. 7778 NewMask.resize(E->ReorderIndices.size()); 7779 copy(E->ReorderIndices, NewMask.begin()); 7780 } else { 7781 inversePermutation(E->ReorderIndices, NewMask); 7782 } 7783 ::addMask(Mask, NewMask); 7784 } 7785 if (NeedToShuffleReuses) 7786 ::addMask(Mask, E->ReuseShuffleIndices); 7787 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 7788 CommonCost = 7789 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 7790 assert((E->State == TreeEntry::Vectorize || 7791 E->State == TreeEntry::ScatterVectorize || 7792 E->State == TreeEntry::PossibleStridedVectorize) && 7793 "Unhandled state"); 7794 assert(E->getOpcode() && 7795 ((allSameType(VL) && allSameBlock(VL)) || 7796 (E->getOpcode() == Instruction::GetElementPtr && 7797 E->getMainOp()->getType()->isPointerTy())) && 7798 "Invalid VL"); 7799 Instruction *VL0 = E->getMainOp(); 7800 unsigned ShuffleOrOp = 7801 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7802 SetVector<Value *> UniqueValues(VL.begin(), VL.end()); 7803 const unsigned Sz = UniqueValues.size(); 7804 SmallBitVector UsedScalars(Sz, false); 7805 for (unsigned I = 0; I < Sz; ++I) { 7806 if (getTreeEntry(UniqueValues[I]) == E) 7807 continue; 7808 UsedScalars.set(I); 7809 } 7810 auto GetCastContextHint = [&](Value *V) { 7811 if (const TreeEntry *OpTE = getTreeEntry(V)) { 7812 if (OpTE->State == TreeEntry::ScatterVectorize) 7813 return TTI::CastContextHint::GatherScatter; 7814 if (OpTE->State == TreeEntry::Vectorize && 7815 OpTE->getOpcode() == Instruction::Load && !OpTE->isAltShuffle()) { 7816 if (OpTE->ReorderIndices.empty()) 7817 return TTI::CastContextHint::Normal; 7818 SmallVector<int> Mask; 7819 inversePermutation(OpTE->ReorderIndices, Mask); 7820 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size())) 7821 return TTI::CastContextHint::Reversed; 7822 } 7823 } else { 7824 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI); 7825 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle()) 7826 return TTI::CastContextHint::GatherScatter; 7827 } 7828 return TTI::CastContextHint::None; 7829 }; 7830 auto GetCostDiff = 7831 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost, 7832 function_ref<InstructionCost(InstructionCost)> VectorCost) { 7833 // Calculate the cost of this instruction. 7834 InstructionCost ScalarCost = 0; 7835 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) { 7836 // For some of the instructions no need to calculate cost for each 7837 // particular instruction, we can use the cost of the single 7838 // instruction x total number of scalar instructions. 7839 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0); 7840 } else { 7841 for (unsigned I = 0; I < Sz; ++I) { 7842 if (UsedScalars.test(I)) 7843 continue; 7844 ScalarCost += ScalarEltCost(I); 7845 } 7846 } 7847 7848 InstructionCost VecCost = VectorCost(CommonCost); 7849 // Check if the current node must be resized, if the parent node is not 7850 // resized. 7851 if (!UnaryInstruction::isCast(E->getOpcode()) && E->Idx != 0) { 7852 const EdgeInfo &EI = E->UserTreeIndices.front(); 7853 if ((EI.UserTE->getOpcode() != Instruction::Select || 7854 EI.EdgeIdx != 0) && 7855 It != MinBWs.end()) { 7856 auto UserBWIt = MinBWs.find(EI.UserTE); 7857 Type *UserScalarTy = 7858 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType(); 7859 if (UserBWIt != MinBWs.end()) 7860 UserScalarTy = IntegerType::get(ScalarTy->getContext(), 7861 UserBWIt->second.first); 7862 if (ScalarTy != UserScalarTy) { 7863 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 7864 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy); 7865 unsigned VecOpcode; 7866 auto *SrcVecTy = 7867 FixedVectorType::get(UserScalarTy, E->getVectorFactor()); 7868 if (BWSz > SrcBWSz) 7869 VecOpcode = Instruction::Trunc; 7870 else 7871 VecOpcode = 7872 It->second.second ? Instruction::SExt : Instruction::ZExt; 7873 TTI::CastContextHint CCH = GetCastContextHint(VL0); 7874 VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, 7875 CostKind); 7876 ScalarCost += 7877 Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy, 7878 CCH, CostKind); 7879 } 7880 } 7881 } 7882 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost, 7883 ScalarCost, "Calculated costs for Tree")); 7884 return VecCost - ScalarCost; 7885 }; 7886 // Calculate cost difference from vectorizing set of GEPs. 7887 // Negative value means vectorizing is profitable. 7888 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) { 7889 InstructionCost ScalarCost = 0; 7890 InstructionCost VecCost = 0; 7891 // Here we differentiate two cases: (1) when Ptrs represent a regular 7892 // vectorization tree node (as they are pointer arguments of scattered 7893 // loads) or (2) when Ptrs are the arguments of loads or stores being 7894 // vectorized as plane wide unit-stride load/store since all the 7895 // loads/stores are known to be from/to adjacent locations. 7896 assert(E->State == TreeEntry::Vectorize && 7897 "Entry state expected to be Vectorize here."); 7898 if (isa<LoadInst, StoreInst>(VL0)) { 7899 // Case 2: estimate costs for pointer related costs when vectorizing to 7900 // a wide load/store. 7901 // Scalar cost is estimated as a set of pointers with known relationship 7902 // between them. 7903 // For vector code we will use BasePtr as argument for the wide load/store 7904 // but we also need to account all the instructions which are going to 7905 // stay in vectorized code due to uses outside of these scalar 7906 // loads/stores. 7907 ScalarCost = TTI->getPointersChainCost( 7908 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy, 7909 CostKind); 7910 7911 SmallVector<const Value *> PtrsRetainedInVecCode; 7912 for (Value *V : Ptrs) { 7913 if (V == BasePtr) { 7914 PtrsRetainedInVecCode.push_back(V); 7915 continue; 7916 } 7917 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7918 // For simplicity assume Ptr to stay in vectorized code if it's not a 7919 // GEP instruction. We don't care since it's cost considered free. 7920 // TODO: We should check for any uses outside of vectorizable tree 7921 // rather than just single use. 7922 if (!Ptr || !Ptr->hasOneUse()) 7923 PtrsRetainedInVecCode.push_back(V); 7924 } 7925 7926 if (PtrsRetainedInVecCode.size() == Ptrs.size()) { 7927 // If all pointers stay in vectorized code then we don't have 7928 // any savings on that. 7929 LLVM_DEBUG(dumpTreeCosts(E, 0, ScalarCost, ScalarCost, 7930 "Calculated GEPs cost for Tree")); 7931 return InstructionCost{TTI::TCC_Free}; 7932 } 7933 VecCost = TTI->getPointersChainCost( 7934 PtrsRetainedInVecCode, BasePtr, 7935 TTI::PointersChainInfo::getKnownStride(), VecTy, CostKind); 7936 } else { 7937 // Case 1: Ptrs are the arguments of loads that we are going to transform 7938 // into masked gather load intrinsic. 7939 // All the scalar GEPs will be removed as a result of vectorization. 7940 // For any external uses of some lanes extract element instructions will 7941 // be generated (which cost is estimated separately). 7942 TTI::PointersChainInfo PtrsInfo = 7943 all_of(Ptrs, 7944 [](const Value *V) { 7945 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7946 return Ptr && !Ptr->hasAllConstantIndices(); 7947 }) 7948 ? TTI::PointersChainInfo::getUnknownStride() 7949 : TTI::PointersChainInfo::getKnownStride(); 7950 7951 ScalarCost = TTI->getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, 7952 CostKind); 7953 if (auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr)) { 7954 SmallVector<const Value *> Indices(BaseGEP->indices()); 7955 VecCost = TTI->getGEPCost(BaseGEP->getSourceElementType(), 7956 BaseGEP->getPointerOperand(), Indices, VecTy, 7957 CostKind); 7958 } 7959 } 7960 7961 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost, 7962 "Calculated GEPs cost for Tree")); 7963 7964 return VecCost - ScalarCost; 7965 }; 7966 7967 switch (ShuffleOrOp) { 7968 case Instruction::PHI: { 7969 // Count reused scalars. 7970 InstructionCost ScalarCost = 0; 7971 SmallPtrSet<const TreeEntry *, 4> CountedOps; 7972 for (Value *V : UniqueValues) { 7973 auto *PHI = dyn_cast<PHINode>(V); 7974 if (!PHI) 7975 continue; 7976 7977 ValueList Operands(PHI->getNumIncomingValues(), nullptr); 7978 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) { 7979 Value *Op = PHI->getIncomingValue(I); 7980 Operands[I] = Op; 7981 } 7982 if (const TreeEntry *OpTE = getTreeEntry(Operands.front())) 7983 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second) 7984 if (!OpTE->ReuseShuffleIndices.empty()) 7985 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() - 7986 OpTE->Scalars.size()); 7987 } 7988 7989 return CommonCost - ScalarCost; 7990 } 7991 case Instruction::ExtractValue: 7992 case Instruction::ExtractElement: { 7993 auto GetScalarCost = [&](unsigned Idx) { 7994 auto *I = cast<Instruction>(UniqueValues[Idx]); 7995 VectorType *SrcVecTy; 7996 if (ShuffleOrOp == Instruction::ExtractElement) { 7997 auto *EE = cast<ExtractElementInst>(I); 7998 SrcVecTy = EE->getVectorOperandType(); 7999 } else { 8000 auto *EV = cast<ExtractValueInst>(I); 8001 Type *AggregateTy = EV->getAggregateOperand()->getType(); 8002 unsigned NumElts; 8003 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy)) 8004 NumElts = ATy->getNumElements(); 8005 else 8006 NumElts = AggregateTy->getStructNumElements(); 8007 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts); 8008 } 8009 if (I->hasOneUse()) { 8010 Instruction *Ext = I->user_back(); 8011 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 8012 all_of(Ext->users(), 8013 [](User *U) { return isa<GetElementPtrInst>(U); })) { 8014 // Use getExtractWithExtendCost() to calculate the cost of 8015 // extractelement/ext pair. 8016 InstructionCost Cost = TTI->getExtractWithExtendCost( 8017 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I)); 8018 // Subtract the cost of s|zext which is subtracted separately. 8019 Cost -= TTI->getCastInstrCost( 8020 Ext->getOpcode(), Ext->getType(), I->getType(), 8021 TTI::getCastContextHint(Ext), CostKind, Ext); 8022 return Cost; 8023 } 8024 } 8025 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy, 8026 CostKind, *getExtractIndex(I)); 8027 }; 8028 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; }; 8029 return GetCostDiff(GetScalarCost, GetVectorCost); 8030 } 8031 case Instruction::InsertElement: { 8032 assert(E->ReuseShuffleIndices.empty() && 8033 "Unique insertelements only are expected."); 8034 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 8035 unsigned const NumElts = SrcVecTy->getNumElements(); 8036 unsigned const NumScalars = VL.size(); 8037 8038 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy); 8039 8040 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 8041 unsigned OffsetBeg = *getInsertIndex(VL.front()); 8042 unsigned OffsetEnd = OffsetBeg; 8043 InsertMask[OffsetBeg] = 0; 8044 for (auto [I, V] : enumerate(VL.drop_front())) { 8045 unsigned Idx = *getInsertIndex(V); 8046 if (OffsetBeg > Idx) 8047 OffsetBeg = Idx; 8048 else if (OffsetEnd < Idx) 8049 OffsetEnd = Idx; 8050 InsertMask[Idx] = I + 1; 8051 } 8052 unsigned VecScalarsSz = PowerOf2Ceil(NumElts); 8053 if (NumOfParts > 0) 8054 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts); 8055 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) * 8056 VecScalarsSz; 8057 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz); 8058 unsigned InsertVecSz = std::min<unsigned>( 8059 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1), 8060 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz); 8061 bool IsWholeSubvector = 8062 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0); 8063 // Check if we can safely insert a subvector. If it is not possible, just 8064 // generate a whole-sized vector and shuffle the source vector and the new 8065 // subvector. 8066 if (OffsetBeg + InsertVecSz > VecSz) { 8067 // Align OffsetBeg to generate correct mask. 8068 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset); 8069 InsertVecSz = VecSz; 8070 } 8071 8072 APInt DemandedElts = APInt::getZero(NumElts); 8073 // TODO: Add support for Instruction::InsertValue. 8074 SmallVector<int> Mask; 8075 if (!E->ReorderIndices.empty()) { 8076 inversePermutation(E->ReorderIndices, Mask); 8077 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem); 8078 } else { 8079 Mask.assign(VecSz, PoisonMaskElem); 8080 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0); 8081 } 8082 bool IsIdentity = true; 8083 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem); 8084 Mask.swap(PrevMask); 8085 for (unsigned I = 0; I < NumScalars; ++I) { 8086 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 8087 DemandedElts.setBit(InsertIdx); 8088 IsIdentity &= InsertIdx - OffsetBeg == I; 8089 Mask[InsertIdx - OffsetBeg] = I; 8090 } 8091 assert(Offset < NumElts && "Failed to find vector index offset"); 8092 8093 InstructionCost Cost = 0; 8094 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 8095 /*Insert*/ true, /*Extract*/ false, 8096 CostKind); 8097 8098 // First cost - resize to actual vector size if not identity shuffle or 8099 // need to shift the vector. 8100 // Do not calculate the cost if the actual size is the register size and 8101 // we can merge this shuffle with the following SK_Select. 8102 auto *InsertVecTy = FixedVectorType::get(ScalarTy, InsertVecSz); 8103 if (!IsIdentity) 8104 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 8105 InsertVecTy, Mask); 8106 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 8107 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 8108 })); 8109 // Second cost - permutation with subvector, if some elements are from the 8110 // initial vector or inserting a subvector. 8111 // TODO: Implement the analysis of the FirstInsert->getOperand(0) 8112 // subvector of ActualVecTy. 8113 SmallBitVector InMask = 8114 isUndefVector(FirstInsert->getOperand(0), 8115 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask)); 8116 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { 8117 if (InsertVecSz != VecSz) { 8118 auto *ActualVecTy = FixedVectorType::get(ScalarTy, VecSz); 8119 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy, 8120 std::nullopt, CostKind, OffsetBeg - Offset, 8121 InsertVecTy); 8122 } else { 8123 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) 8124 Mask[I] = InMask.test(I) ? PoisonMaskElem : I; 8125 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset; 8126 I <= End; ++I) 8127 if (Mask[I] != PoisonMaskElem) 8128 Mask[I] = I + VecSz; 8129 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I) 8130 Mask[I] = 8131 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I; 8132 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask); 8133 } 8134 } 8135 return Cost; 8136 } 8137 case Instruction::ZExt: 8138 case Instruction::SExt: 8139 case Instruction::FPToUI: 8140 case Instruction::FPToSI: 8141 case Instruction::FPExt: 8142 case Instruction::PtrToInt: 8143 case Instruction::IntToPtr: 8144 case Instruction::SIToFP: 8145 case Instruction::UIToFP: 8146 case Instruction::Trunc: 8147 case Instruction::FPTrunc: 8148 case Instruction::BitCast: { 8149 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 8150 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 8151 auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8152 unsigned Opcode = ShuffleOrOp; 8153 unsigned VecOpcode = Opcode; 8154 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 8155 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 8156 // Check if the values are candidates to demote. 8157 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 8158 if (SrcIt != MinBWs.end()) { 8159 SrcBWSz = SrcIt->second.first; 8160 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz); 8161 SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8162 } 8163 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 8164 if (BWSz == SrcBWSz) { 8165 VecOpcode = Instruction::BitCast; 8166 } else if (BWSz < SrcBWSz) { 8167 VecOpcode = Instruction::Trunc; 8168 } else if (It != MinBWs.end()) { 8169 assert(BWSz > SrcBWSz && "Invalid cast!"); 8170 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 8171 } 8172 } 8173 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost { 8174 // Do not count cost here if minimum bitwidth is in effect and it is just 8175 // a bitcast (here it is just a noop). 8176 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8177 return TTI::TCC_Free; 8178 auto *VI = VL0->getOpcode() == Opcode 8179 ? cast<Instruction>(UniqueValues[Idx]) 8180 : nullptr; 8181 return TTI->getCastInstrCost(Opcode, VL0->getType(), 8182 VL0->getOperand(0)->getType(), 8183 TTI::getCastContextHint(VI), CostKind, VI); 8184 }; 8185 auto GetVectorCost = [=](InstructionCost CommonCost) { 8186 // Do not count cost here if minimum bitwidth is in effect and it is just 8187 // a bitcast (here it is just a noop). 8188 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8189 return CommonCost; 8190 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr; 8191 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0)); 8192 return CommonCost + 8193 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind, 8194 VecOpcode == Opcode ? VI : nullptr); 8195 }; 8196 return GetCostDiff(GetScalarCost, GetVectorCost); 8197 } 8198 case Instruction::FCmp: 8199 case Instruction::ICmp: 8200 case Instruction::Select: { 8201 CmpInst::Predicate VecPred, SwappedVecPred; 8202 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value()); 8203 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) || 8204 match(VL0, MatchCmp)) 8205 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred); 8206 else 8207 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy() 8208 ? CmpInst::BAD_FCMP_PREDICATE 8209 : CmpInst::BAD_ICMP_PREDICATE; 8210 auto GetScalarCost = [&](unsigned Idx) { 8211 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8212 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy() 8213 ? CmpInst::BAD_FCMP_PREDICATE 8214 : CmpInst::BAD_ICMP_PREDICATE; 8215 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 8216 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) && 8217 !match(VI, MatchCmp)) || 8218 (CurrentPred != VecPred && CurrentPred != SwappedVecPred)) 8219 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy() 8220 ? CmpInst::BAD_FCMP_PREDICATE 8221 : CmpInst::BAD_ICMP_PREDICATE; 8222 8223 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 8224 Builder.getInt1Ty(), CurrentPred, CostKind, 8225 VI); 8226 }; 8227 auto GetVectorCost = [&](InstructionCost CommonCost) { 8228 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8229 8230 InstructionCost VecCost = TTI->getCmpSelInstrCost( 8231 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 8232 // Check if it is possible and profitable to use min/max for selects 8233 // in VL. 8234 // 8235 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 8236 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 8237 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 8238 {VecTy, VecTy}); 8239 InstructionCost IntrinsicCost = 8240 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8241 // If the selects are the only uses of the compares, they will be 8242 // dead and we can adjust the cost by removing their cost. 8243 if (IntrinsicAndUse.second) 8244 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 8245 MaskTy, VecPred, CostKind); 8246 VecCost = std::min(VecCost, IntrinsicCost); 8247 } 8248 return VecCost + CommonCost; 8249 }; 8250 return GetCostDiff(GetScalarCost, GetVectorCost); 8251 } 8252 case Instruction::FNeg: 8253 case Instruction::Add: 8254 case Instruction::FAdd: 8255 case Instruction::Sub: 8256 case Instruction::FSub: 8257 case Instruction::Mul: 8258 case Instruction::FMul: 8259 case Instruction::UDiv: 8260 case Instruction::SDiv: 8261 case Instruction::FDiv: 8262 case Instruction::URem: 8263 case Instruction::SRem: 8264 case Instruction::FRem: 8265 case Instruction::Shl: 8266 case Instruction::LShr: 8267 case Instruction::AShr: 8268 case Instruction::And: 8269 case Instruction::Or: 8270 case Instruction::Xor: { 8271 auto GetScalarCost = [&](unsigned Idx) { 8272 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8273 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1; 8274 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0)); 8275 TTI::OperandValueInfo Op2Info = 8276 TTI::getOperandInfo(VI->getOperand(OpIdx)); 8277 SmallVector<const Value *> Operands(VI->operand_values()); 8278 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind, 8279 Op1Info, Op2Info, Operands, VI); 8280 }; 8281 auto GetVectorCost = [=](InstructionCost CommonCost) { 8282 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1; 8283 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); 8284 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); 8285 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, 8286 Op2Info) + 8287 CommonCost; 8288 }; 8289 return GetCostDiff(GetScalarCost, GetVectorCost); 8290 } 8291 case Instruction::GetElementPtr: { 8292 return CommonCost + GetGEPCostDiff(VL, VL0); 8293 } 8294 case Instruction::Load: { 8295 auto GetScalarCost = [&](unsigned Idx) { 8296 auto *VI = cast<LoadInst>(UniqueValues[Idx]); 8297 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(), 8298 VI->getPointerAddressSpace(), CostKind, 8299 TTI::OperandValueInfo(), VI); 8300 }; 8301 auto *LI0 = cast<LoadInst>(VL0); 8302 auto GetVectorCost = [&](InstructionCost CommonCost) { 8303 InstructionCost VecLdCost; 8304 if (E->State == TreeEntry::Vectorize) { 8305 VecLdCost = TTI->getMemoryOpCost( 8306 Instruction::Load, VecTy, LI0->getAlign(), 8307 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()); 8308 } else { 8309 assert((E->State == TreeEntry::ScatterVectorize || 8310 E->State == TreeEntry::PossibleStridedVectorize) && 8311 "Unknown EntryState"); 8312 Align CommonAlignment = LI0->getAlign(); 8313 for (Value *V : UniqueValues) 8314 CommonAlignment = 8315 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 8316 VecLdCost = TTI->getGatherScatterOpCost( 8317 Instruction::Load, VecTy, LI0->getPointerOperand(), 8318 /*VariableMask=*/false, CommonAlignment, CostKind); 8319 } 8320 return VecLdCost + CommonCost; 8321 }; 8322 8323 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost); 8324 // If this node generates masked gather load then it is not a terminal node. 8325 // Hence address operand cost is estimated separately. 8326 if (E->State == TreeEntry::ScatterVectorize || 8327 E->State == TreeEntry::PossibleStridedVectorize) 8328 return Cost; 8329 8330 // Estimate cost of GEPs since this tree node is a terminator. 8331 SmallVector<Value *> PointerOps(VL.size()); 8332 for (auto [I, V] : enumerate(VL)) 8333 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand(); 8334 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand()); 8335 } 8336 case Instruction::Store: { 8337 bool IsReorder = !E->ReorderIndices.empty(); 8338 auto GetScalarCost = [=](unsigned Idx) { 8339 auto *VI = cast<StoreInst>(VL[Idx]); 8340 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand()); 8341 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(), 8342 VI->getPointerAddressSpace(), CostKind, 8343 OpInfo, VI); 8344 }; 8345 auto *BaseSI = 8346 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 8347 auto GetVectorCost = [=](InstructionCost CommonCost) { 8348 // We know that we can merge the stores. Calculate the cost. 8349 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); 8350 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), 8351 BaseSI->getPointerAddressSpace(), CostKind, 8352 OpInfo) + 8353 CommonCost; 8354 }; 8355 SmallVector<Value *> PointerOps(VL.size()); 8356 for (auto [I, V] : enumerate(VL)) { 8357 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I; 8358 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand(); 8359 } 8360 8361 return GetCostDiff(GetScalarCost, GetVectorCost) + 8362 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand()); 8363 } 8364 case Instruction::Call: { 8365 auto GetScalarCost = [&](unsigned Idx) { 8366 auto *CI = cast<CallInst>(UniqueValues[Idx]); 8367 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8368 if (ID != Intrinsic::not_intrinsic) { 8369 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 8370 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8371 } 8372 return TTI->getCallInstrCost(CI->getCalledFunction(), 8373 CI->getFunctionType()->getReturnType(), 8374 CI->getFunctionType()->params(), CostKind); 8375 }; 8376 auto GetVectorCost = [=](InstructionCost CommonCost) { 8377 auto *CI = cast<CallInst>(VL0); 8378 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 8379 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost; 8380 }; 8381 return GetCostDiff(GetScalarCost, GetVectorCost); 8382 } 8383 case Instruction::ShuffleVector: { 8384 assert(E->isAltShuffle() && 8385 ((Instruction::isBinaryOp(E->getOpcode()) && 8386 Instruction::isBinaryOp(E->getAltOpcode())) || 8387 (Instruction::isCast(E->getOpcode()) && 8388 Instruction::isCast(E->getAltOpcode())) || 8389 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8390 "Invalid Shuffle Vector Operand"); 8391 // Try to find the previous shuffle node with the same operands and same 8392 // main/alternate ops. 8393 auto TryFindNodeWithEqualOperands = [=]() { 8394 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 8395 if (TE.get() == E) 8396 break; 8397 if (TE->isAltShuffle() && 8398 ((TE->getOpcode() == E->getOpcode() && 8399 TE->getAltOpcode() == E->getAltOpcode()) || 8400 (TE->getOpcode() == E->getAltOpcode() && 8401 TE->getAltOpcode() == E->getOpcode())) && 8402 TE->hasEqualOperands(*E)) 8403 return true; 8404 } 8405 return false; 8406 }; 8407 auto GetScalarCost = [&](unsigned Idx) { 8408 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8409 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode"); 8410 (void)E; 8411 return TTI->getInstructionCost(VI, CostKind); 8412 }; 8413 // Need to clear CommonCost since the final shuffle cost is included into 8414 // vector cost. 8415 auto GetVectorCost = [&](InstructionCost) { 8416 // VecCost is equal to sum of the cost of creating 2 vectors 8417 // and the cost of creating shuffle. 8418 InstructionCost VecCost = 0; 8419 if (TryFindNodeWithEqualOperands()) { 8420 LLVM_DEBUG({ 8421 dbgs() << "SLP: diamond match for alternate node found.\n"; 8422 E->dump(); 8423 }); 8424 // No need to add new vector costs here since we're going to reuse 8425 // same main/alternate vector ops, just do different shuffling. 8426 } else if (Instruction::isBinaryOp(E->getOpcode())) { 8427 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 8428 VecCost += 8429 TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind); 8430 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8431 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8432 VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 8433 CI0->getPredicate(), CostKind, VL0); 8434 VecCost += TTI->getCmpSelInstrCost( 8435 E->getOpcode(), VecTy, MaskTy, 8436 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 8437 E->getAltOp()); 8438 } else { 8439 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 8440 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 8441 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 8442 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 8443 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 8444 TTI::CastContextHint::None, CostKind); 8445 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 8446 TTI::CastContextHint::None, CostKind); 8447 } 8448 SmallVector<int> Mask; 8449 E->buildAltOpShuffleMask( 8450 [E](Instruction *I) { 8451 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8452 return I->getOpcode() == E->getAltOpcode(); 8453 }, 8454 Mask); 8455 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, 8456 FinalVecTy, Mask); 8457 return VecCost; 8458 }; 8459 return GetCostDiff(GetScalarCost, GetVectorCost); 8460 } 8461 default: 8462 llvm_unreachable("Unknown instruction"); 8463 } 8464 } 8465 8466 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 8467 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 8468 << VectorizableTree.size() << " is fully vectorizable .\n"); 8469 8470 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 8471 SmallVector<int> Mask; 8472 return TE->State == TreeEntry::NeedToGather && 8473 !any_of(TE->Scalars, 8474 [this](Value *V) { return EphValues.contains(V); }) && 8475 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 8476 TE->Scalars.size() < Limit || 8477 ((TE->getOpcode() == Instruction::ExtractElement || 8478 all_of(TE->Scalars, 8479 [](Value *V) { 8480 return isa<ExtractElementInst, UndefValue>(V); 8481 })) && 8482 isFixedVectorShuffle(TE->Scalars, Mask)) || 8483 (TE->State == TreeEntry::NeedToGather && 8484 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 8485 }; 8486 8487 // We only handle trees of heights 1 and 2. 8488 if (VectorizableTree.size() == 1 && 8489 (VectorizableTree[0]->State == TreeEntry::Vectorize || 8490 (ForReduction && 8491 AreVectorizableGathers(VectorizableTree[0].get(), 8492 VectorizableTree[0]->Scalars.size()) && 8493 VectorizableTree[0]->getVectorFactor() > 2))) 8494 return true; 8495 8496 if (VectorizableTree.size() != 2) 8497 return false; 8498 8499 // Handle splat and all-constants stores. Also try to vectorize tiny trees 8500 // with the second gather nodes if they have less scalar operands rather than 8501 // the initial tree element (may be profitable to shuffle the second gather) 8502 // or they are extractelements, which form shuffle. 8503 SmallVector<int> Mask; 8504 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 8505 AreVectorizableGathers(VectorizableTree[1].get(), 8506 VectorizableTree[0]->Scalars.size())) 8507 return true; 8508 8509 // Gathering cost would be too much for tiny trees. 8510 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 8511 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 8512 VectorizableTree[0]->State != TreeEntry::ScatterVectorize && 8513 VectorizableTree[0]->State != TreeEntry::PossibleStridedVectorize)) 8514 return false; 8515 8516 return true; 8517 } 8518 8519 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 8520 TargetTransformInfo *TTI, 8521 bool MustMatchOrInst) { 8522 // Look past the root to find a source value. Arbitrarily follow the 8523 // path through operand 0 of any 'or'. Also, peek through optional 8524 // shift-left-by-multiple-of-8-bits. 8525 Value *ZextLoad = Root; 8526 const APInt *ShAmtC; 8527 bool FoundOr = false; 8528 while (!isa<ConstantExpr>(ZextLoad) && 8529 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 8530 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 8531 ShAmtC->urem(8) == 0))) { 8532 auto *BinOp = cast<BinaryOperator>(ZextLoad); 8533 ZextLoad = BinOp->getOperand(0); 8534 if (BinOp->getOpcode() == Instruction::Or) 8535 FoundOr = true; 8536 } 8537 // Check if the input is an extended load of the required or/shift expression. 8538 Value *Load; 8539 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 8540 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 8541 return false; 8542 8543 // Require that the total load bit width is a legal integer type. 8544 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 8545 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 8546 Type *SrcTy = Load->getType(); 8547 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 8548 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 8549 return false; 8550 8551 // Everything matched - assume that we can fold the whole sequence using 8552 // load combining. 8553 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 8554 << *(cast<Instruction>(Root)) << "\n"); 8555 8556 return true; 8557 } 8558 8559 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 8560 if (RdxKind != RecurKind::Or) 8561 return false; 8562 8563 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8564 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 8565 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 8566 /* MatchOr */ false); 8567 } 8568 8569 bool BoUpSLP::isLoadCombineCandidate() const { 8570 // Peek through a final sequence of stores and check if all operations are 8571 // likely to be load-combined. 8572 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8573 for (Value *Scalar : VectorizableTree[0]->Scalars) { 8574 Value *X; 8575 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 8576 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 8577 return false; 8578 } 8579 return true; 8580 } 8581 8582 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 8583 // No need to vectorize inserts of gathered values. 8584 if (VectorizableTree.size() == 2 && 8585 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 8586 VectorizableTree[1]->State == TreeEntry::NeedToGather && 8587 (VectorizableTree[1]->getVectorFactor() <= 2 || 8588 !(isSplat(VectorizableTree[1]->Scalars) || 8589 allConstant(VectorizableTree[1]->Scalars)))) 8590 return true; 8591 8592 // If the graph includes only PHI nodes and gathers, it is defnitely not 8593 // profitable for the vectorization, we can skip it, if the cost threshold is 8594 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of 8595 // gathers/buildvectors. 8596 constexpr int Limit = 4; 8597 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() && 8598 !VectorizableTree.empty() && 8599 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 8600 return (TE->State == TreeEntry::NeedToGather && 8601 TE->getOpcode() != Instruction::ExtractElement && 8602 count_if(TE->Scalars, 8603 [](Value *V) { return isa<ExtractElementInst>(V); }) <= 8604 Limit) || 8605 TE->getOpcode() == Instruction::PHI; 8606 })) 8607 return true; 8608 8609 // We can vectorize the tree if its size is greater than or equal to the 8610 // minimum size specified by the MinTreeSize command line option. 8611 if (VectorizableTree.size() >= MinTreeSize) 8612 return false; 8613 8614 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 8615 // can vectorize it if we can prove it fully vectorizable. 8616 if (isFullyVectorizableTinyTree(ForReduction)) 8617 return false; 8618 8619 assert(VectorizableTree.empty() 8620 ? ExternalUses.empty() 8621 : true && "We shouldn't have any external users"); 8622 8623 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 8624 // vectorizable. 8625 return true; 8626 } 8627 8628 InstructionCost BoUpSLP::getSpillCost() const { 8629 // Walk from the bottom of the tree to the top, tracking which values are 8630 // live. When we see a call instruction that is not part of our tree, 8631 // query TTI to see if there is a cost to keeping values live over it 8632 // (for example, if spills and fills are required). 8633 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 8634 InstructionCost Cost = 0; 8635 8636 SmallPtrSet<Instruction *, 4> LiveValues; 8637 Instruction *PrevInst = nullptr; 8638 8639 // The entries in VectorizableTree are not necessarily ordered by their 8640 // position in basic blocks. Collect them and order them by dominance so later 8641 // instructions are guaranteed to be visited first. For instructions in 8642 // different basic blocks, we only scan to the beginning of the block, so 8643 // their order does not matter, as long as all instructions in a basic block 8644 // are grouped together. Using dominance ensures a deterministic order. 8645 SmallVector<Instruction *, 16> OrderedScalars; 8646 for (const auto &TEPtr : VectorizableTree) { 8647 if (TEPtr->State != TreeEntry::Vectorize) 8648 continue; 8649 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 8650 if (!Inst) 8651 continue; 8652 OrderedScalars.push_back(Inst); 8653 } 8654 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 8655 auto *NodeA = DT->getNode(A->getParent()); 8656 auto *NodeB = DT->getNode(B->getParent()); 8657 assert(NodeA && "Should only process reachable instructions"); 8658 assert(NodeB && "Should only process reachable instructions"); 8659 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 8660 "Different nodes should have different DFS numbers"); 8661 if (NodeA != NodeB) 8662 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); 8663 return B->comesBefore(A); 8664 }); 8665 8666 for (Instruction *Inst : OrderedScalars) { 8667 if (!PrevInst) { 8668 PrevInst = Inst; 8669 continue; 8670 } 8671 8672 // Update LiveValues. 8673 LiveValues.erase(PrevInst); 8674 for (auto &J : PrevInst->operands()) { 8675 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 8676 LiveValues.insert(cast<Instruction>(&*J)); 8677 } 8678 8679 LLVM_DEBUG({ 8680 dbgs() << "SLP: #LV: " << LiveValues.size(); 8681 for (auto *X : LiveValues) 8682 dbgs() << " " << X->getName(); 8683 dbgs() << ", Looking at "; 8684 Inst->dump(); 8685 }); 8686 8687 // Now find the sequence of instructions between PrevInst and Inst. 8688 unsigned NumCalls = 0; 8689 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 8690 PrevInstIt = 8691 PrevInst->getIterator().getReverse(); 8692 while (InstIt != PrevInstIt) { 8693 if (PrevInstIt == PrevInst->getParent()->rend()) { 8694 PrevInstIt = Inst->getParent()->rbegin(); 8695 continue; 8696 } 8697 8698 auto NoCallIntrinsic = [this](Instruction *I) { 8699 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 8700 if (II->isAssumeLikeIntrinsic()) 8701 return true; 8702 FastMathFlags FMF; 8703 SmallVector<Type *, 4> Tys; 8704 for (auto &ArgOp : II->args()) 8705 Tys.push_back(ArgOp->getType()); 8706 if (auto *FPMO = dyn_cast<FPMathOperator>(II)) 8707 FMF = FPMO->getFastMathFlags(); 8708 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys, 8709 FMF); 8710 InstructionCost IntrCost = 8711 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput); 8712 InstructionCost CallCost = TTI->getCallInstrCost( 8713 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput); 8714 if (IntrCost < CallCost) 8715 return true; 8716 } 8717 return false; 8718 }; 8719 8720 // Debug information does not impact spill cost. 8721 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && 8722 &*PrevInstIt != PrevInst) 8723 NumCalls++; 8724 8725 ++PrevInstIt; 8726 } 8727 8728 if (NumCalls) { 8729 SmallVector<Type *, 4> V; 8730 for (auto *II : LiveValues) { 8731 auto *ScalarTy = II->getType(); 8732 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 8733 ScalarTy = VectorTy->getElementType(); 8734 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 8735 } 8736 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 8737 } 8738 8739 PrevInst = Inst; 8740 } 8741 8742 return Cost; 8743 } 8744 8745 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 8746 /// buildvector sequence. 8747 static bool isFirstInsertElement(const InsertElementInst *IE1, 8748 const InsertElementInst *IE2) { 8749 if (IE1 == IE2) 8750 return false; 8751 const auto *I1 = IE1; 8752 const auto *I2 = IE2; 8753 const InsertElementInst *PrevI1; 8754 const InsertElementInst *PrevI2; 8755 unsigned Idx1 = *getInsertIndex(IE1); 8756 unsigned Idx2 = *getInsertIndex(IE2); 8757 do { 8758 if (I2 == IE1) 8759 return true; 8760 if (I1 == IE2) 8761 return false; 8762 PrevI1 = I1; 8763 PrevI2 = I2; 8764 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 8765 getInsertIndex(I1).value_or(Idx2) != Idx2) 8766 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 8767 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 8768 getInsertIndex(I2).value_or(Idx1) != Idx1) 8769 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 8770 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 8771 llvm_unreachable("Two different buildvectors not expected."); 8772 } 8773 8774 namespace { 8775 /// Returns incoming Value *, if the requested type is Value * too, or a default 8776 /// value, otherwise. 8777 struct ValueSelect { 8778 template <typename U> 8779 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) { 8780 return V; 8781 } 8782 template <typename U> 8783 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) { 8784 return U(); 8785 } 8786 }; 8787 } // namespace 8788 8789 /// Does the analysis of the provided shuffle masks and performs the requested 8790 /// actions on the vectors with the given shuffle masks. It tries to do it in 8791 /// several steps. 8792 /// 1. If the Base vector is not undef vector, resizing the very first mask to 8793 /// have common VF and perform action for 2 input vectors (including non-undef 8794 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 8795 /// and processed as a shuffle of 2 elements. 8796 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 8797 /// action only for 1 vector with the given mask, if it is not the identity 8798 /// mask. 8799 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 8800 /// vectors, combing the masks properly between the steps. 8801 template <typename T> 8802 static T *performExtractsShuffleAction( 8803 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 8804 function_ref<unsigned(T *)> GetVF, 8805 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction, 8806 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 8807 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 8808 SmallVector<int> Mask(ShuffleMask.begin()->second); 8809 auto VMIt = std::next(ShuffleMask.begin()); 8810 T *Prev = nullptr; 8811 SmallBitVector UseMask = 8812 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask); 8813 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask); 8814 if (!IsBaseUndef.all()) { 8815 // Base is not undef, need to combine it with the next subvectors. 8816 std::pair<T *, bool> Res = 8817 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false); 8818 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask); 8819 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 8820 if (Mask[Idx] == PoisonMaskElem) 8821 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx; 8822 else 8823 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 8824 } 8825 auto *V = ValueSelect::get<T *>(Base); 8826 (void)V; 8827 assert((!V || GetVF(V) == Mask.size()) && 8828 "Expected base vector of VF number of elements."); 8829 Prev = Action(Mask, {nullptr, Res.first}); 8830 } else if (ShuffleMask.size() == 1) { 8831 // Base is undef and only 1 vector is shuffled - perform the action only for 8832 // single vector, if the mask is not the identity mask. 8833 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask, 8834 /*ForSingleMask=*/true); 8835 if (Res.second) 8836 // Identity mask is found. 8837 Prev = Res.first; 8838 else 8839 Prev = Action(Mask, {ShuffleMask.begin()->first}); 8840 } else { 8841 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 8842 // shuffles step by step, combining shuffle between the steps. 8843 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 8844 unsigned Vec2VF = GetVF(VMIt->first); 8845 if (Vec1VF == Vec2VF) { 8846 // No need to resize the input vectors since they are of the same size, we 8847 // can shuffle them directly. 8848 ArrayRef<int> SecMask = VMIt->second; 8849 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8850 if (SecMask[I] != PoisonMaskElem) { 8851 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8852 Mask[I] = SecMask[I] + Vec1VF; 8853 } 8854 } 8855 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 8856 } else { 8857 // Vectors of different sizes - resize and reshuffle. 8858 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask, 8859 /*ForSingleMask=*/false); 8860 std::pair<T *, bool> Res2 = 8861 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8862 ArrayRef<int> SecMask = VMIt->second; 8863 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8864 if (Mask[I] != PoisonMaskElem) { 8865 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8866 if (Res1.second) 8867 Mask[I] = I; 8868 } else if (SecMask[I] != PoisonMaskElem) { 8869 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8870 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 8871 } 8872 } 8873 Prev = Action(Mask, {Res1.first, Res2.first}); 8874 } 8875 VMIt = std::next(VMIt); 8876 } 8877 bool IsBaseNotUndef = !IsBaseUndef.all(); 8878 (void)IsBaseNotUndef; 8879 // Perform requested actions for the remaining masks/vectors. 8880 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 8881 // Shuffle other input vectors, if any. 8882 std::pair<T *, bool> Res = 8883 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8884 ArrayRef<int> SecMask = VMIt->second; 8885 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8886 if (SecMask[I] != PoisonMaskElem) { 8887 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) && 8888 "Multiple uses of scalars."); 8889 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 8890 } else if (Mask[I] != PoisonMaskElem) { 8891 Mask[I] = I; 8892 } 8893 } 8894 Prev = Action(Mask, {Prev, Res.first}); 8895 } 8896 return Prev; 8897 } 8898 8899 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 8900 InstructionCost Cost = 0; 8901 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 8902 << VectorizableTree.size() << ".\n"); 8903 8904 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 8905 8906 SmallPtrSet<Value *, 4> CheckedExtracts; 8907 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 8908 TreeEntry &TE = *VectorizableTree[I]; 8909 if (TE.State == TreeEntry::NeedToGather) { 8910 if (const TreeEntry *E = getTreeEntry(TE.getMainOp()); 8911 E && E->getVectorFactor() == TE.getVectorFactor() && 8912 E->isSame(TE.Scalars)) { 8913 // Some gather nodes might be absolutely the same as some vectorizable 8914 // nodes after reordering, need to handle it. 8915 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle " 8916 << shortBundleName(TE.Scalars) << ".\n" 8917 << "SLP: Current total cost = " << Cost << "\n"); 8918 continue; 8919 } 8920 } 8921 8922 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts); 8923 Cost += C; 8924 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle " 8925 << shortBundleName(TE.Scalars) << ".\n" 8926 << "SLP: Current total cost = " << Cost << "\n"); 8927 } 8928 8929 SmallPtrSet<Value *, 16> ExtractCostCalculated; 8930 InstructionCost ExtractCost = 0; 8931 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 8932 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 8933 SmallVector<APInt> DemandedElts; 8934 SmallDenseSet<Value *, 4> UsedInserts; 8935 DenseSet<Value *> VectorCasts; 8936 for (ExternalUser &EU : ExternalUses) { 8937 // We only add extract cost once for the same scalar. 8938 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 8939 !ExtractCostCalculated.insert(EU.Scalar).second) 8940 continue; 8941 8942 // Uses by ephemeral values are free (because the ephemeral value will be 8943 // removed prior to code generation, and so the extraction will be 8944 // removed as well). 8945 if (EphValues.count(EU.User)) 8946 continue; 8947 8948 // No extract cost for vector "scalar" 8949 if (isa<FixedVectorType>(EU.Scalar->getType())) 8950 continue; 8951 8952 // If found user is an insertelement, do not calculate extract cost but try 8953 // to detect it as a final shuffled/identity match. 8954 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 8955 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 8956 if (!UsedInserts.insert(VU).second) 8957 continue; 8958 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 8959 if (InsertIdx) { 8960 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 8961 auto *It = find_if( 8962 FirstUsers, 8963 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) { 8964 return areTwoInsertFromSameBuildVector( 8965 VU, cast<InsertElementInst>(Pair.first), 8966 [this](InsertElementInst *II) -> Value * { 8967 Value *Op0 = II->getOperand(0); 8968 if (getTreeEntry(II) && !getTreeEntry(Op0)) 8969 return nullptr; 8970 return Op0; 8971 }); 8972 }); 8973 int VecId = -1; 8974 if (It == FirstUsers.end()) { 8975 (void)ShuffleMasks.emplace_back(); 8976 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 8977 if (Mask.empty()) 8978 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 8979 // Find the insertvector, vectorized in tree, if any. 8980 Value *Base = VU; 8981 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 8982 if (IEBase != EU.User && 8983 (!IEBase->hasOneUse() || 8984 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx)) 8985 break; 8986 // Build the mask for the vectorized insertelement instructions. 8987 if (const TreeEntry *E = getTreeEntry(IEBase)) { 8988 VU = IEBase; 8989 do { 8990 IEBase = cast<InsertElementInst>(Base); 8991 int Idx = *getInsertIndex(IEBase); 8992 assert(Mask[Idx] == PoisonMaskElem && 8993 "InsertElementInstruction used already."); 8994 Mask[Idx] = Idx; 8995 Base = IEBase->getOperand(0); 8996 } while (E == getTreeEntry(Base)); 8997 break; 8998 } 8999 Base = cast<InsertElementInst>(Base)->getOperand(0); 9000 } 9001 FirstUsers.emplace_back(VU, ScalarTE); 9002 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 9003 VecId = FirstUsers.size() - 1; 9004 auto It = MinBWs.find(ScalarTE); 9005 if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) { 9006 unsigned BWSz = It->second.second; 9007 unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType()); 9008 unsigned VecOpcode; 9009 if (BWSz < SrcBWSz) 9010 VecOpcode = Instruction::Trunc; 9011 else 9012 VecOpcode = 9013 It->second.second ? Instruction::SExt : Instruction::ZExt; 9014 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9015 InstructionCost C = TTI->getCastInstrCost( 9016 VecOpcode, FTy, 9017 FixedVectorType::get( 9018 IntegerType::get(FTy->getContext(), It->second.first), 9019 FTy->getNumElements()), 9020 TTI::CastContextHint::None, CostKind); 9021 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9022 << " for extending externally used vector with " 9023 "non-equal minimum bitwidth.\n"); 9024 Cost += C; 9025 } 9026 } else { 9027 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 9028 It->first = VU; 9029 VecId = std::distance(FirstUsers.begin(), It); 9030 } 9031 int InIdx = *InsertIdx; 9032 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 9033 if (Mask.empty()) 9034 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9035 Mask[InIdx] = EU.Lane; 9036 DemandedElts[VecId].setBit(InIdx); 9037 continue; 9038 } 9039 } 9040 } 9041 9042 // If we plan to rewrite the tree in a smaller type, we will need to sign 9043 // extend the extracted value back to the original type. Here, we account 9044 // for the extract and the added cost of the sign extend if needed. 9045 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 9046 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9047 auto It = MinBWs.find(getTreeEntry(EU.Scalar)); 9048 if (It != MinBWs.end()) { 9049 auto *MinTy = IntegerType::get(F->getContext(), It->second.first); 9050 unsigned Extend = 9051 It->second.second ? Instruction::SExt : Instruction::ZExt; 9052 VecTy = FixedVectorType::get(MinTy, BundleWidth); 9053 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 9054 VecTy, EU.Lane); 9055 } else { 9056 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 9057 CostKind, EU.Lane); 9058 } 9059 } 9060 // Add reduced value cost, if resized. 9061 if (!VectorizedVals.empty()) { 9062 auto BWIt = MinBWs.find(VectorizableTree.front().get()); 9063 if (BWIt != MinBWs.end()) { 9064 Type *DstTy = VectorizableTree.front()->Scalars.front()->getType(); 9065 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy); 9066 unsigned Opcode = Instruction::Trunc; 9067 if (OriginalSz < BWIt->second.first) 9068 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt; 9069 Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first); 9070 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy, 9071 TTI::CastContextHint::None, 9072 TTI::TCK_RecipThroughput); 9073 } 9074 } 9075 9076 InstructionCost SpillCost = getSpillCost(); 9077 Cost += SpillCost + ExtractCost; 9078 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask, 9079 bool) { 9080 InstructionCost C = 0; 9081 unsigned VF = Mask.size(); 9082 unsigned VecVF = TE->getVectorFactor(); 9083 if (VF != VecVF && 9084 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 9085 !ShuffleVectorInst::isIdentityMask(Mask, VF))) { 9086 SmallVector<int> OrigMask(VecVF, PoisonMaskElem); 9087 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 9088 OrigMask.begin()); 9089 C = TTI->getShuffleCost( 9090 TTI::SK_PermuteSingleSrc, 9091 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 9092 LLVM_DEBUG( 9093 dbgs() << "SLP: Adding cost " << C 9094 << " for final shuffle of insertelement external users.\n"; 9095 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9096 Cost += C; 9097 return std::make_pair(TE, true); 9098 } 9099 return std::make_pair(TE, false); 9100 }; 9101 // Calculate the cost of the reshuffled vectors, if any. 9102 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 9103 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 9104 auto Vector = ShuffleMasks[I].takeVector(); 9105 unsigned VF = 0; 9106 auto EstimateShufflesCost = [&](ArrayRef<int> Mask, 9107 ArrayRef<const TreeEntry *> TEs) { 9108 assert((TEs.size() == 1 || TEs.size() == 2) && 9109 "Expected exactly 1 or 2 tree entries."); 9110 if (TEs.size() == 1) { 9111 if (VF == 0) 9112 VF = TEs.front()->getVectorFactor(); 9113 auto *FTy = 9114 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9115 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) && 9116 !all_of(enumerate(Mask), [=](const auto &Data) { 9117 return Data.value() == PoisonMaskElem || 9118 (Data.index() < VF && 9119 static_cast<int>(Data.index()) == Data.value()); 9120 })) { 9121 InstructionCost C = 9122 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 9123 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9124 << " for final shuffle of insertelement " 9125 "external users.\n"; 9126 TEs.front()->dump(); 9127 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9128 Cost += C; 9129 } 9130 } else { 9131 if (VF == 0) { 9132 if (TEs.front() && 9133 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor()) 9134 VF = TEs.front()->getVectorFactor(); 9135 else 9136 VF = Mask.size(); 9137 } 9138 auto *FTy = 9139 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9140 InstructionCost C = 9141 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask); 9142 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9143 << " for final shuffle of vector node and external " 9144 "insertelement users.\n"; 9145 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 9146 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9147 Cost += C; 9148 } 9149 VF = Mask.size(); 9150 return TEs.back(); 9151 }; 9152 (void)performExtractsShuffleAction<const TreeEntry>( 9153 MutableArrayRef(Vector.data(), Vector.size()), Base, 9154 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 9155 EstimateShufflesCost); 9156 InstructionCost InsertCost = TTI->getScalarizationOverhead( 9157 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 9158 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput); 9159 Cost -= InsertCost; 9160 } 9161 9162 #ifndef NDEBUG 9163 SmallString<256> Str; 9164 { 9165 raw_svector_ostream OS(Str); 9166 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 9167 << "SLP: Extract Cost = " << ExtractCost << ".\n" 9168 << "SLP: Total Cost = " << Cost << ".\n"; 9169 } 9170 LLVM_DEBUG(dbgs() << Str); 9171 if (ViewSLPTree) 9172 ViewGraph(this, "SLP" + F->getName(), false, Str); 9173 #endif 9174 9175 return Cost; 9176 } 9177 9178 /// Tries to find extractelement instructions with constant indices from fixed 9179 /// vector type and gather such instructions into a bunch, which highly likely 9180 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9181 /// successful, the matched scalars are replaced by poison values in \p VL for 9182 /// future analysis. 9183 std::optional<TTI::ShuffleKind> 9184 BoUpSLP::tryToGatherSingleRegisterExtractElements( 9185 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const { 9186 // Scan list of gathered scalars for extractelements that can be represented 9187 // as shuffles. 9188 MapVector<Value *, SmallVector<int>> VectorOpToIdx; 9189 SmallVector<int> UndefVectorExtracts; 9190 for (int I = 0, E = VL.size(); I < E; ++I) { 9191 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9192 if (!EI) { 9193 if (isa<UndefValue>(VL[I])) 9194 UndefVectorExtracts.push_back(I); 9195 continue; 9196 } 9197 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType()); 9198 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand())) 9199 continue; 9200 std::optional<unsigned> Idx = getExtractIndex(EI); 9201 // Undefined index. 9202 if (!Idx) { 9203 UndefVectorExtracts.push_back(I); 9204 continue; 9205 } 9206 SmallBitVector ExtractMask(VecTy->getNumElements(), true); 9207 ExtractMask.reset(*Idx); 9208 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) { 9209 UndefVectorExtracts.push_back(I); 9210 continue; 9211 } 9212 VectorOpToIdx[EI->getVectorOperand()].push_back(I); 9213 } 9214 // Sort the vector operands by the maximum number of uses in extractelements. 9215 MapVector<unsigned, SmallVector<Value *>> VFToVector; 9216 for (const auto &Data : VectorOpToIdx) 9217 VFToVector[cast<FixedVectorType>(Data.first->getType())->getNumElements()] 9218 .push_back(Data.first); 9219 for (auto &Data : VFToVector) { 9220 stable_sort(Data.second, [&VectorOpToIdx](Value *V1, Value *V2) { 9221 return VectorOpToIdx.find(V1)->second.size() > 9222 VectorOpToIdx.find(V2)->second.size(); 9223 }); 9224 } 9225 // Find the best pair of the vectors with the same number of elements or a 9226 // single vector. 9227 const int UndefSz = UndefVectorExtracts.size(); 9228 unsigned SingleMax = 0; 9229 Value *SingleVec = nullptr; 9230 unsigned PairMax = 0; 9231 std::pair<Value *, Value *> PairVec(nullptr, nullptr); 9232 for (auto &Data : VFToVector) { 9233 Value *V1 = Data.second.front(); 9234 if (SingleMax < VectorOpToIdx[V1].size() + UndefSz) { 9235 SingleMax = VectorOpToIdx[V1].size() + UndefSz; 9236 SingleVec = V1; 9237 } 9238 Value *V2 = nullptr; 9239 if (Data.second.size() > 1) 9240 V2 = *std::next(Data.second.begin()); 9241 if (V2 && PairMax < VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + 9242 UndefSz) { 9243 PairMax = VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + UndefSz; 9244 PairVec = std::make_pair(V1, V2); 9245 } 9246 } 9247 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0) 9248 return std::nullopt; 9249 // Check if better to perform a shuffle of 2 vectors or just of a single 9250 // vector. 9251 SmallVector<Value *> SavedVL(VL.begin(), VL.end()); 9252 SmallVector<Value *> GatheredExtracts( 9253 VL.size(), PoisonValue::get(VL.front()->getType())); 9254 if (SingleMax >= PairMax && SingleMax) { 9255 for (int Idx : VectorOpToIdx[SingleVec]) 9256 std::swap(GatheredExtracts[Idx], VL[Idx]); 9257 } else { 9258 for (Value *V : {PairVec.first, PairVec.second}) 9259 for (int Idx : VectorOpToIdx[V]) 9260 std::swap(GatheredExtracts[Idx], VL[Idx]); 9261 } 9262 // Add extracts from undefs too. 9263 for (int Idx : UndefVectorExtracts) 9264 std::swap(GatheredExtracts[Idx], VL[Idx]); 9265 // Check that gather of extractelements can be represented as just a 9266 // shuffle of a single/two vectors the scalars are extracted from. 9267 std::optional<TTI::ShuffleKind> Res = 9268 isFixedVectorShuffle(GatheredExtracts, Mask); 9269 if (!Res) { 9270 // TODO: try to check other subsets if possible. 9271 // Restore the original VL if attempt was not successful. 9272 copy(SavedVL, VL.begin()); 9273 return std::nullopt; 9274 } 9275 // Restore unused scalars from mask, if some of the extractelements were not 9276 // selected for shuffle. 9277 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) { 9278 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) && 9279 isa<UndefValue>(GatheredExtracts[I])) { 9280 std::swap(VL[I], GatheredExtracts[I]); 9281 continue; 9282 } 9283 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9284 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) || 9285 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) || 9286 is_contained(UndefVectorExtracts, I)) 9287 continue; 9288 } 9289 return Res; 9290 } 9291 9292 /// Tries to find extractelement instructions with constant indices from fixed 9293 /// vector type and gather such instructions into a bunch, which highly likely 9294 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9295 /// successful, the matched scalars are replaced by poison values in \p VL for 9296 /// future analysis. 9297 SmallVector<std::optional<TTI::ShuffleKind>> 9298 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 9299 SmallVectorImpl<int> &Mask, 9300 unsigned NumParts) const { 9301 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1."); 9302 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts); 9303 Mask.assign(VL.size(), PoisonMaskElem); 9304 unsigned SliceSize = VL.size() / NumParts; 9305 for (unsigned Part = 0; Part < NumParts; ++Part) { 9306 // Scan list of gathered scalars for extractelements that can be represented 9307 // as shuffles. 9308 MutableArrayRef<Value *> SubVL = 9309 MutableArrayRef(VL).slice(Part * SliceSize, SliceSize); 9310 SmallVector<int> SubMask; 9311 std::optional<TTI::ShuffleKind> Res = 9312 tryToGatherSingleRegisterExtractElements(SubVL, SubMask); 9313 ShufflesRes[Part] = Res; 9314 copy(SubMask, std::next(Mask.begin(), Part * SliceSize)); 9315 } 9316 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) { 9317 return Res.has_value(); 9318 })) 9319 ShufflesRes.clear(); 9320 return ShufflesRes; 9321 } 9322 9323 std::optional<TargetTransformInfo::ShuffleKind> 9324 BoUpSLP::isGatherShuffledSingleRegisterEntry( 9325 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 9326 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) { 9327 Entries.clear(); 9328 // TODO: currently checking only for Scalars in the tree entry, need to count 9329 // reused elements too for better cost estimation. 9330 const EdgeInfo &TEUseEI = TE->UserTreeIndices.front(); 9331 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE); 9332 const BasicBlock *TEInsertBlock = nullptr; 9333 // Main node of PHI entries keeps the correct order of operands/incoming 9334 // blocks. 9335 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) { 9336 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx); 9337 TEInsertPt = TEInsertBlock->getTerminator(); 9338 } else { 9339 TEInsertBlock = TEInsertPt->getParent(); 9340 } 9341 auto *NodeUI = DT->getNode(TEInsertBlock); 9342 assert(NodeUI && "Should only process reachable instructions"); 9343 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end()); 9344 auto CheckOrdering = [&](const Instruction *InsertPt) { 9345 // Argument InsertPt is an instruction where vector code for some other 9346 // tree entry (one that shares one or more scalars with TE) is going to be 9347 // generated. This lambda returns true if insertion point of vector code 9348 // for the TE dominates that point (otherwise dependency is the other way 9349 // around). The other node is not limited to be of a gather kind. Gather 9350 // nodes are not scheduled and their vector code is inserted before their 9351 // first user. If user is PHI, that is supposed to be at the end of a 9352 // predecessor block. Otherwise it is the last instruction among scalars of 9353 // the user node. So, instead of checking dependency between instructions 9354 // themselves, we check dependency between their insertion points for vector 9355 // code (since each scalar instruction ends up as a lane of a vector 9356 // instruction). 9357 const BasicBlock *InsertBlock = InsertPt->getParent(); 9358 auto *NodeEUI = DT->getNode(InsertBlock); 9359 if (!NodeEUI) 9360 return false; 9361 assert((NodeUI == NodeEUI) == 9362 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && 9363 "Different nodes should have different DFS numbers"); 9364 // Check the order of the gather nodes users. 9365 if (TEInsertPt->getParent() != InsertBlock && 9366 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI))) 9367 return false; 9368 if (TEInsertPt->getParent() == InsertBlock && 9369 TEInsertPt->comesBefore(InsertPt)) 9370 return false; 9371 return true; 9372 }; 9373 // Find all tree entries used by the gathered values. If no common entries 9374 // found - not a shuffle. 9375 // Here we build a set of tree nodes for each gathered value and trying to 9376 // find the intersection between these sets. If we have at least one common 9377 // tree node for each gathered value - we have just a permutation of the 9378 // single vector. If we have 2 different sets, we're in situation where we 9379 // have a permutation of 2 input vectors. 9380 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 9381 DenseMap<Value *, int> UsedValuesEntry; 9382 for (Value *V : VL) { 9383 if (isConstant(V)) 9384 continue; 9385 // Build a list of tree entries where V is used. 9386 SmallPtrSet<const TreeEntry *, 4> VToTEs; 9387 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) { 9388 if (TEPtr == TE) 9389 continue; 9390 assert(any_of(TEPtr->Scalars, 9391 [&](Value *V) { return GatheredScalars.contains(V); }) && 9392 "Must contain at least single gathered value."); 9393 assert(TEPtr->UserTreeIndices.size() == 1 && 9394 "Expected only single user of a gather node."); 9395 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front(); 9396 9397 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp()); 9398 const Instruction *InsertPt = 9399 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator() 9400 : &getLastInstructionInBundle(UseEI.UserTE); 9401 if (TEInsertPt == InsertPt) { 9402 // If 2 gathers are operands of the same entry (regardless of whether 9403 // user is PHI or else), compare operands indices, use the earlier one 9404 // as the base. 9405 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx) 9406 continue; 9407 // If the user instruction is used for some reason in different 9408 // vectorized nodes - make it depend on index. 9409 if (TEUseEI.UserTE != UseEI.UserTE && 9410 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx) 9411 continue; 9412 } 9413 9414 // Check if the user node of the TE comes after user node of TEPtr, 9415 // otherwise TEPtr depends on TE. 9416 if ((TEInsertBlock != InsertPt->getParent() || 9417 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) && 9418 !CheckOrdering(InsertPt)) 9419 continue; 9420 VToTEs.insert(TEPtr); 9421 } 9422 if (const TreeEntry *VTE = getTreeEntry(V)) { 9423 Instruction &LastBundleInst = getLastInstructionInBundle(VTE); 9424 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst)) 9425 continue; 9426 auto It = MinBWs.find(VTE); 9427 // If vectorize node is demoted - do not match. 9428 if (It != MinBWs.end() && 9429 It->second.first != DL->getTypeSizeInBits(V->getType())) 9430 continue; 9431 VToTEs.insert(VTE); 9432 } 9433 if (VToTEs.empty()) 9434 continue; 9435 if (UsedTEs.empty()) { 9436 // The first iteration, just insert the list of nodes to vector. 9437 UsedTEs.push_back(VToTEs); 9438 UsedValuesEntry.try_emplace(V, 0); 9439 } else { 9440 // Need to check if there are any previously used tree nodes which use V. 9441 // If there are no such nodes, consider that we have another one input 9442 // vector. 9443 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 9444 unsigned Idx = 0; 9445 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 9446 // Do we have a non-empty intersection of previously listed tree entries 9447 // and tree entries using current V? 9448 set_intersect(VToTEs, Set); 9449 if (!VToTEs.empty()) { 9450 // Yes, write the new subset and continue analysis for the next 9451 // scalar. 9452 Set.swap(VToTEs); 9453 break; 9454 } 9455 VToTEs = SavedVToTEs; 9456 ++Idx; 9457 } 9458 // No non-empty intersection found - need to add a second set of possible 9459 // source vectors. 9460 if (Idx == UsedTEs.size()) { 9461 // If the number of input vectors is greater than 2 - not a permutation, 9462 // fallback to the regular gather. 9463 // TODO: support multiple reshuffled nodes. 9464 if (UsedTEs.size() == 2) 9465 continue; 9466 UsedTEs.push_back(SavedVToTEs); 9467 Idx = UsedTEs.size() - 1; 9468 } 9469 UsedValuesEntry.try_emplace(V, Idx); 9470 } 9471 } 9472 9473 if (UsedTEs.empty()) { 9474 Entries.clear(); 9475 return std::nullopt; 9476 } 9477 9478 unsigned VF = 0; 9479 if (UsedTEs.size() == 1) { 9480 // Keep the order to avoid non-determinism. 9481 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(), 9482 UsedTEs.front().end()); 9483 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9484 return TE1->Idx < TE2->Idx; 9485 }); 9486 // Try to find the perfect match in another gather node at first. 9487 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) { 9488 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars); 9489 }); 9490 if (It != FirstEntries.end() && 9491 ((*It)->getVectorFactor() == VL.size() || 9492 ((*It)->getVectorFactor() == TE->Scalars.size() && 9493 TE->ReuseShuffleIndices.size() == VL.size() && 9494 (*It)->isSame(TE->Scalars)))) { 9495 Entries.push_back(*It); 9496 if ((*It)->getVectorFactor() == VL.size()) { 9497 std::iota(std::next(Mask.begin(), Part * VL.size()), 9498 std::next(Mask.begin(), (Part + 1) * VL.size()), 0); 9499 } else { 9500 SmallVector<int> CommonMask = TE->getCommonMask(); 9501 copy(CommonMask, Mask.begin()); 9502 } 9503 // Clear undef scalars. 9504 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9505 if (isa<PoisonValue>(VL[I])) 9506 Mask[I] = PoisonMaskElem; 9507 return TargetTransformInfo::SK_PermuteSingleSrc; 9508 } 9509 // No perfect match, just shuffle, so choose the first tree node from the 9510 // tree. 9511 Entries.push_back(FirstEntries.front()); 9512 } else { 9513 // Try to find nodes with the same vector factor. 9514 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 9515 // Keep the order of tree nodes to avoid non-determinism. 9516 DenseMap<int, const TreeEntry *> VFToTE; 9517 for (const TreeEntry *TE : UsedTEs.front()) { 9518 unsigned VF = TE->getVectorFactor(); 9519 auto It = VFToTE.find(VF); 9520 if (It != VFToTE.end()) { 9521 if (It->second->Idx > TE->Idx) 9522 It->getSecond() = TE; 9523 continue; 9524 } 9525 VFToTE.try_emplace(VF, TE); 9526 } 9527 // Same, keep the order to avoid non-determinism. 9528 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(), 9529 UsedTEs.back().end()); 9530 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9531 return TE1->Idx < TE2->Idx; 9532 }); 9533 for (const TreeEntry *TE : SecondEntries) { 9534 auto It = VFToTE.find(TE->getVectorFactor()); 9535 if (It != VFToTE.end()) { 9536 VF = It->first; 9537 Entries.push_back(It->second); 9538 Entries.push_back(TE); 9539 break; 9540 } 9541 } 9542 // No 2 source vectors with the same vector factor - just choose 2 with max 9543 // index. 9544 if (Entries.empty()) { 9545 Entries.push_back( 9546 *std::max_element(UsedTEs.front().begin(), UsedTEs.front().end(), 9547 [](const TreeEntry *TE1, const TreeEntry *TE2) { 9548 return TE1->Idx < TE2->Idx; 9549 })); 9550 Entries.push_back(SecondEntries.front()); 9551 VF = std::max(Entries.front()->getVectorFactor(), 9552 Entries.back()->getVectorFactor()); 9553 } 9554 } 9555 9556 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof); 9557 // Checks if the 2 PHIs are compatible in terms of high possibility to be 9558 // vectorized. 9559 auto AreCompatiblePHIs = [&](Value *V, Value *V1) { 9560 auto *PHI = cast<PHINode>(V); 9561 auto *PHI1 = cast<PHINode>(V1); 9562 // Check that all incoming values are compatible/from same parent (if they 9563 // are instructions). 9564 // The incoming values are compatible if they all are constants, or 9565 // instruction with the same/alternate opcodes from the same basic block. 9566 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 9567 Value *In = PHI->getIncomingValue(I); 9568 Value *In1 = PHI1->getIncomingValue(I); 9569 if (isConstant(In) && isConstant(In1)) 9570 continue; 9571 if (!getSameOpcode({In, In1}, *TLI).getOpcode()) 9572 return false; 9573 if (cast<Instruction>(In)->getParent() != 9574 cast<Instruction>(In1)->getParent()) 9575 return false; 9576 } 9577 return true; 9578 }; 9579 // Check if the value can be ignored during analysis for shuffled gathers. 9580 // We suppose it is better to ignore instruction, which do not form splats, 9581 // are not vectorized/not extractelements (these instructions will be handled 9582 // by extractelements processing) or may form vector node in future. 9583 auto MightBeIgnored = [=](Value *V) { 9584 auto *I = dyn_cast<Instruction>(V); 9585 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) && 9586 !isVectorLikeInstWithConstOps(I) && 9587 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I); 9588 }; 9589 // Check that the neighbor instruction may form a full vector node with the 9590 // current instruction V. It is possible, if they have same/alternate opcode 9591 // and same parent basic block. 9592 auto NeighborMightBeIgnored = [&](Value *V, int Idx) { 9593 Value *V1 = VL[Idx]; 9594 bool UsedInSameVTE = false; 9595 auto It = UsedValuesEntry.find(V1); 9596 if (It != UsedValuesEntry.end()) 9597 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second; 9598 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE && 9599 getSameOpcode({V, V1}, *TLI).getOpcode() && 9600 cast<Instruction>(V)->getParent() == 9601 cast<Instruction>(V1)->getParent() && 9602 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1)); 9603 }; 9604 // Build a shuffle mask for better cost estimation and vector emission. 9605 SmallBitVector UsedIdxs(Entries.size()); 9606 SmallVector<std::pair<unsigned, int>> EntryLanes; 9607 for (int I = 0, E = VL.size(); I < E; ++I) { 9608 Value *V = VL[I]; 9609 auto It = UsedValuesEntry.find(V); 9610 if (It == UsedValuesEntry.end()) 9611 continue; 9612 // Do not try to shuffle scalars, if they are constants, or instructions 9613 // that can be vectorized as a result of the following vector build 9614 // vectorization. 9615 if (isConstant(V) || (MightBeIgnored(V) && 9616 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) || 9617 (I != E - 1 && NeighborMightBeIgnored(V, I + 1))))) 9618 continue; 9619 unsigned Idx = It->second; 9620 EntryLanes.emplace_back(Idx, I); 9621 UsedIdxs.set(Idx); 9622 } 9623 // Iterate through all shuffled scalars and select entries, which can be used 9624 // for final shuffle. 9625 SmallVector<const TreeEntry *> TempEntries; 9626 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) { 9627 if (!UsedIdxs.test(I)) 9628 continue; 9629 // Fix the entry number for the given scalar. If it is the first entry, set 9630 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes). 9631 // These indices are used when calculating final shuffle mask as the vector 9632 // offset. 9633 for (std::pair<unsigned, int> &Pair : EntryLanes) 9634 if (Pair.first == I) 9635 Pair.first = TempEntries.size(); 9636 TempEntries.push_back(Entries[I]); 9637 } 9638 Entries.swap(TempEntries); 9639 if (EntryLanes.size() == Entries.size() && 9640 !VL.equals(ArrayRef(TE->Scalars) 9641 .slice(Part * VL.size(), 9642 std::min<int>(VL.size(), TE->Scalars.size())))) { 9643 // We may have here 1 or 2 entries only. If the number of scalars is equal 9644 // to the number of entries, no need to do the analysis, it is not very 9645 // profitable. Since VL is not the same as TE->Scalars, it means we already 9646 // have some shuffles before. Cut off not profitable case. 9647 Entries.clear(); 9648 return std::nullopt; 9649 } 9650 // Build the final mask, check for the identity shuffle, if possible. 9651 bool IsIdentity = Entries.size() == 1; 9652 // Pair.first is the offset to the vector, while Pair.second is the index of 9653 // scalar in the list. 9654 for (const std::pair<unsigned, int> &Pair : EntryLanes) { 9655 unsigned Idx = Part * VL.size() + Pair.second; 9656 Mask[Idx] = Pair.first * VF + 9657 Entries[Pair.first]->findLaneForValue(VL[Pair.second]); 9658 IsIdentity &= Mask[Idx] == Pair.second; 9659 } 9660 switch (Entries.size()) { 9661 case 1: 9662 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2) 9663 return TargetTransformInfo::SK_PermuteSingleSrc; 9664 break; 9665 case 2: 9666 if (EntryLanes.size() > 2 || VL.size() <= 2) 9667 return TargetTransformInfo::SK_PermuteTwoSrc; 9668 break; 9669 default: 9670 break; 9671 } 9672 Entries.clear(); 9673 // Clear the corresponding mask elements. 9674 std::fill(std::next(Mask.begin(), Part * VL.size()), 9675 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem); 9676 return std::nullopt; 9677 } 9678 9679 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 9680 BoUpSLP::isGatherShuffledEntry( 9681 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 9682 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 9683 unsigned NumParts) { 9684 assert(NumParts > 0 && NumParts < VL.size() && 9685 "Expected positive number of registers."); 9686 Entries.clear(); 9687 // No need to check for the topmost gather node. 9688 if (TE == VectorizableTree.front().get()) 9689 return {}; 9690 Mask.assign(VL.size(), PoisonMaskElem); 9691 assert(TE->UserTreeIndices.size() == 1 && 9692 "Expected only single user of the gather node."); 9693 assert(VL.size() % NumParts == 0 && 9694 "Number of scalars must be divisible by NumParts."); 9695 unsigned SliceSize = VL.size() / NumParts; 9696 SmallVector<std::optional<TTI::ShuffleKind>> Res; 9697 for (unsigned Part = 0; Part < NumParts; ++Part) { 9698 ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize); 9699 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back(); 9700 std::optional<TTI::ShuffleKind> SubRes = 9701 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part); 9702 if (!SubRes) 9703 SubEntries.clear(); 9704 Res.push_back(SubRes); 9705 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc && 9706 SubEntries.front()->getVectorFactor() == VL.size() && 9707 (SubEntries.front()->isSame(TE->Scalars) || 9708 SubEntries.front()->isSame(VL))) { 9709 SmallVector<const TreeEntry *> LocalSubEntries; 9710 LocalSubEntries.swap(SubEntries); 9711 Entries.clear(); 9712 Res.clear(); 9713 std::iota(Mask.begin(), Mask.end(), 0); 9714 // Clear undef scalars. 9715 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9716 if (isa<PoisonValue>(VL[I])) 9717 Mask[I] = PoisonMaskElem; 9718 Entries.emplace_back(1, LocalSubEntries.front()); 9719 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc); 9720 return Res; 9721 } 9722 } 9723 if (all_of(Res, 9724 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) { 9725 Entries.clear(); 9726 return {}; 9727 } 9728 return Res; 9729 } 9730 9731 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, 9732 bool ForPoisonSrc) const { 9733 // Find the type of the operands in VL. 9734 Type *ScalarTy = VL[0]->getType(); 9735 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 9736 ScalarTy = SI->getValueOperand()->getType(); 9737 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 9738 bool DuplicateNonConst = false; 9739 // Find the cost of inserting/extracting values from the vector. 9740 // Check if the same elements are inserted several times and count them as 9741 // shuffle candidates. 9742 APInt ShuffledElements = APInt::getZero(VL.size()); 9743 DenseSet<Value *> UniqueElements; 9744 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9745 InstructionCost Cost; 9746 auto EstimateInsertCost = [&](unsigned I, Value *V) { 9747 if (!ForPoisonSrc) 9748 Cost += 9749 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 9750 I, Constant::getNullValue(VecTy), V); 9751 }; 9752 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 9753 Value *V = VL[I]; 9754 // No need to shuffle duplicates for constants. 9755 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) { 9756 ShuffledElements.setBit(I); 9757 continue; 9758 } 9759 if (!UniqueElements.insert(V).second) { 9760 DuplicateNonConst = true; 9761 ShuffledElements.setBit(I); 9762 continue; 9763 } 9764 EstimateInsertCost(I, V); 9765 } 9766 if (ForPoisonSrc) 9767 Cost = 9768 TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true, 9769 /*Extract*/ false, CostKind); 9770 if (DuplicateNonConst) 9771 Cost += 9772 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 9773 return Cost; 9774 } 9775 9776 // Perform operand reordering on the instructions in VL and return the reordered 9777 // operands in Left and Right. 9778 void BoUpSLP::reorderInputsAccordingToOpcode( 9779 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 9780 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 9781 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) { 9782 if (VL.empty()) 9783 return; 9784 VLOperands Ops(VL, TLI, DL, SE, R); 9785 // Reorder the operands in place. 9786 Ops.reorder(); 9787 Left = Ops.getVL(0); 9788 Right = Ops.getVL(1); 9789 } 9790 9791 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { 9792 auto &Res = EntryToLastInstruction.FindAndConstruct(E); 9793 if (Res.second) 9794 return *Res.second; 9795 // Get the basic block this bundle is in. All instructions in the bundle 9796 // should be in this block (except for extractelement-like instructions with 9797 // constant indeces). 9798 auto *Front = E->getMainOp(); 9799 auto *BB = Front->getParent(); 9800 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 9801 if (E->getOpcode() == Instruction::GetElementPtr && 9802 !isa<GetElementPtrInst>(V)) 9803 return true; 9804 auto *I = cast<Instruction>(V); 9805 return !E->isOpcodeOrAlt(I) || I->getParent() == BB || 9806 isVectorLikeInstWithConstOps(I); 9807 })); 9808 9809 auto FindLastInst = [&]() { 9810 Instruction *LastInst = Front; 9811 for (Value *V : E->Scalars) { 9812 auto *I = dyn_cast<Instruction>(V); 9813 if (!I) 9814 continue; 9815 if (LastInst->getParent() == I->getParent()) { 9816 if (LastInst->comesBefore(I)) 9817 LastInst = I; 9818 continue; 9819 } 9820 assert(((E->getOpcode() == Instruction::GetElementPtr && 9821 !isa<GetElementPtrInst>(I)) || 9822 (isVectorLikeInstWithConstOps(LastInst) && 9823 isVectorLikeInstWithConstOps(I))) && 9824 "Expected vector-like or non-GEP in GEP node insts only."); 9825 if (!DT->isReachableFromEntry(LastInst->getParent())) { 9826 LastInst = I; 9827 continue; 9828 } 9829 if (!DT->isReachableFromEntry(I->getParent())) 9830 continue; 9831 auto *NodeA = DT->getNode(LastInst->getParent()); 9832 auto *NodeB = DT->getNode(I->getParent()); 9833 assert(NodeA && "Should only process reachable instructions"); 9834 assert(NodeB && "Should only process reachable instructions"); 9835 assert((NodeA == NodeB) == 9836 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9837 "Different nodes should have different DFS numbers"); 9838 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) 9839 LastInst = I; 9840 } 9841 BB = LastInst->getParent(); 9842 return LastInst; 9843 }; 9844 9845 auto FindFirstInst = [&]() { 9846 Instruction *FirstInst = Front; 9847 for (Value *V : E->Scalars) { 9848 auto *I = dyn_cast<Instruction>(V); 9849 if (!I) 9850 continue; 9851 if (FirstInst->getParent() == I->getParent()) { 9852 if (I->comesBefore(FirstInst)) 9853 FirstInst = I; 9854 continue; 9855 } 9856 assert(((E->getOpcode() == Instruction::GetElementPtr && 9857 !isa<GetElementPtrInst>(I)) || 9858 (isVectorLikeInstWithConstOps(FirstInst) && 9859 isVectorLikeInstWithConstOps(I))) && 9860 "Expected vector-like or non-GEP in GEP node insts only."); 9861 if (!DT->isReachableFromEntry(FirstInst->getParent())) { 9862 FirstInst = I; 9863 continue; 9864 } 9865 if (!DT->isReachableFromEntry(I->getParent())) 9866 continue; 9867 auto *NodeA = DT->getNode(FirstInst->getParent()); 9868 auto *NodeB = DT->getNode(I->getParent()); 9869 assert(NodeA && "Should only process reachable instructions"); 9870 assert(NodeB && "Should only process reachable instructions"); 9871 assert((NodeA == NodeB) == 9872 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9873 "Different nodes should have different DFS numbers"); 9874 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn()) 9875 FirstInst = I; 9876 } 9877 return FirstInst; 9878 }; 9879 9880 // Set the insert point to the beginning of the basic block if the entry 9881 // should not be scheduled. 9882 if (doesNotNeedToSchedule(E->Scalars) || 9883 (E->State != TreeEntry::NeedToGather && 9884 all_of(E->Scalars, isVectorLikeInstWithConstOps))) { 9885 if ((E->getOpcode() == Instruction::GetElementPtr && 9886 any_of(E->Scalars, 9887 [](Value *V) { 9888 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); 9889 })) || 9890 all_of(E->Scalars, [](Value *V) { 9891 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V); 9892 })) 9893 Res.second = FindLastInst(); 9894 else 9895 Res.second = FindFirstInst(); 9896 return *Res.second; 9897 } 9898 9899 // Find the last instruction. The common case should be that BB has been 9900 // scheduled, and the last instruction is VL.back(). So we start with 9901 // VL.back() and iterate over schedule data until we reach the end of the 9902 // bundle. The end of the bundle is marked by null ScheduleData. 9903 if (BlocksSchedules.count(BB)) { 9904 Value *V = E->isOneOf(E->Scalars.back()); 9905 if (doesNotNeedToBeScheduled(V)) 9906 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 9907 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 9908 if (Bundle && Bundle->isPartOfBundle()) 9909 for (; Bundle; Bundle = Bundle->NextInBundle) 9910 if (Bundle->OpValue == Bundle->Inst) 9911 Res.second = Bundle->Inst; 9912 } 9913 9914 // LastInst can still be null at this point if there's either not an entry 9915 // for BB in BlocksSchedules or there's no ScheduleData available for 9916 // VL.back(). This can be the case if buildTree_rec aborts for various 9917 // reasons (e.g., the maximum recursion depth is reached, the maximum region 9918 // size is reached, etc.). ScheduleData is initialized in the scheduling 9919 // "dry-run". 9920 // 9921 // If this happens, we can still find the last instruction by brute force. We 9922 // iterate forwards from Front (inclusive) until we either see all 9923 // instructions in the bundle or reach the end of the block. If Front is the 9924 // last instruction in program order, LastInst will be set to Front, and we 9925 // will visit all the remaining instructions in the block. 9926 // 9927 // One of the reasons we exit early from buildTree_rec is to place an upper 9928 // bound on compile-time. Thus, taking an additional compile-time hit here is 9929 // not ideal. However, this should be exceedingly rare since it requires that 9930 // we both exit early from buildTree_rec and that the bundle be out-of-order 9931 // (causing us to iterate all the way to the end of the block). 9932 if (!Res.second) 9933 Res.second = FindLastInst(); 9934 assert(Res.second && "Failed to find last instruction in bundle"); 9935 return *Res.second; 9936 } 9937 9938 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 9939 auto *Front = E->getMainOp(); 9940 Instruction *LastInst = &getLastInstructionInBundle(E); 9941 assert(LastInst && "Failed to find last instruction in bundle"); 9942 BasicBlock::iterator LastInstIt = LastInst->getIterator(); 9943 // If the instruction is PHI, set the insert point after all the PHIs. 9944 bool IsPHI = isa<PHINode>(LastInst); 9945 if (IsPHI) 9946 LastInstIt = LastInst->getParent()->getFirstNonPHIIt(); 9947 if (IsPHI || (E->State != TreeEntry::NeedToGather && 9948 doesNotNeedToSchedule(E->Scalars))) { 9949 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt); 9950 } else { 9951 // Set the insertion point after the last instruction in the bundle. Set the 9952 // debug location to Front. 9953 Builder.SetInsertPoint( 9954 LastInst->getParent(), 9955 LastInst->getNextNonDebugInstruction()->getIterator()); 9956 } 9957 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 9958 } 9959 9960 Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root) { 9961 // List of instructions/lanes from current block and/or the blocks which are 9962 // part of the current loop. These instructions will be inserted at the end to 9963 // make it possible to optimize loops and hoist invariant instructions out of 9964 // the loops body with better chances for success. 9965 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 9966 SmallSet<int, 4> PostponedIndices; 9967 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 9968 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 9969 SmallPtrSet<BasicBlock *, 4> Visited; 9970 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 9971 InsertBB = InsertBB->getSinglePredecessor(); 9972 return InsertBB && InsertBB == InstBB; 9973 }; 9974 for (int I = 0, E = VL.size(); I < E; ++I) { 9975 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 9976 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 9977 getTreeEntry(Inst) || 9978 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) && 9979 PostponedIndices.insert(I).second) 9980 PostponedInsts.emplace_back(Inst, I); 9981 } 9982 9983 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 9984 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 9985 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 9986 if (!InsElt) 9987 return Vec; 9988 GatherShuffleExtractSeq.insert(InsElt); 9989 CSEBlocks.insert(InsElt->getParent()); 9990 // Add to our 'need-to-extract' list. 9991 if (isa<Instruction>(V)) { 9992 if (TreeEntry *Entry = getTreeEntry(V)) { 9993 // Find which lane we need to extract. 9994 unsigned FoundLane = Entry->findLaneForValue(V); 9995 ExternalUses.emplace_back(V, InsElt, FoundLane); 9996 } 9997 } 9998 return Vec; 9999 }; 10000 Value *Val0 = 10001 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 10002 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 10003 Value *Vec = Root ? Root : PoisonValue::get(VecTy); 10004 SmallVector<int> NonConsts; 10005 // Insert constant values at first. 10006 for (int I = 0, E = VL.size(); I < E; ++I) { 10007 if (PostponedIndices.contains(I)) 10008 continue; 10009 if (!isConstant(VL[I])) { 10010 NonConsts.push_back(I); 10011 continue; 10012 } 10013 if (Root) { 10014 if (!isa<UndefValue>(VL[I])) { 10015 NonConsts.push_back(I); 10016 continue; 10017 } 10018 if (isa<PoisonValue>(VL[I])) 10019 continue; 10020 if (auto *SV = dyn_cast<ShuffleVectorInst>(Root)) { 10021 if (SV->getMaskValue(I) == PoisonMaskElem) 10022 continue; 10023 } 10024 } 10025 Vec = CreateInsertElement(Vec, VL[I], I); 10026 } 10027 // Insert non-constant values. 10028 for (int I : NonConsts) 10029 Vec = CreateInsertElement(Vec, VL[I], I); 10030 // Append instructions, which are/may be part of the loop, in the end to make 10031 // it possible to hoist non-loop-based instructions. 10032 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 10033 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 10034 10035 return Vec; 10036 } 10037 10038 /// Merges shuffle masks and emits final shuffle instruction, if required. It 10039 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 10040 /// when the actual shuffle instruction is generated only if this is actually 10041 /// required. Otherwise, the shuffle instruction emission is delayed till the 10042 /// end of the process, to reduce the number of emitted instructions and further 10043 /// analysis/transformations. 10044 /// The class also will look through the previously emitted shuffle instructions 10045 /// and properly mark indices in mask as undef. 10046 /// For example, given the code 10047 /// \code 10048 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 10049 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 10050 /// \endcode 10051 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 10052 /// look through %s1 and %s2 and emit 10053 /// \code 10054 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10055 /// \endcode 10056 /// instead. 10057 /// If 2 operands are of different size, the smallest one will be resized and 10058 /// the mask recalculated properly. 10059 /// For example, given the code 10060 /// \code 10061 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 10062 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 10063 /// \endcode 10064 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 10065 /// look through %s1 and %s2 and emit 10066 /// \code 10067 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10068 /// \endcode 10069 /// instead. 10070 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis { 10071 bool IsFinalized = false; 10072 /// Combined mask for all applied operands and masks. It is built during 10073 /// analysis and actual emission of shuffle vector instructions. 10074 SmallVector<int> CommonMask; 10075 /// List of operands for the shuffle vector instruction. It hold at max 2 10076 /// operands, if the 3rd is going to be added, the first 2 are combined into 10077 /// shuffle with \p CommonMask mask, the first operand sets to be the 10078 /// resulting shuffle and the second operand sets to be the newly added 10079 /// operand. The \p CommonMask is transformed in the proper way after that. 10080 SmallVector<Value *, 2> InVectors; 10081 IRBuilderBase &Builder; 10082 BoUpSLP &R; 10083 10084 class ShuffleIRBuilder { 10085 IRBuilderBase &Builder; 10086 /// Holds all of the instructions that we gathered. 10087 SetVector<Instruction *> &GatherShuffleExtractSeq; 10088 /// A list of blocks that we are going to CSE. 10089 DenseSet<BasicBlock *> &CSEBlocks; 10090 10091 public: 10092 ShuffleIRBuilder(IRBuilderBase &Builder, 10093 SetVector<Instruction *> &GatherShuffleExtractSeq, 10094 DenseSet<BasicBlock *> &CSEBlocks) 10095 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq), 10096 CSEBlocks(CSEBlocks) {} 10097 ~ShuffleIRBuilder() = default; 10098 /// Creates shufflevector for the 2 operands with the given mask. 10099 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) { 10100 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask); 10101 if (auto *I = dyn_cast<Instruction>(Vec)) { 10102 GatherShuffleExtractSeq.insert(I); 10103 CSEBlocks.insert(I->getParent()); 10104 } 10105 return Vec; 10106 } 10107 /// Creates permutation of the single vector operand with the given mask, if 10108 /// it is not identity mask. 10109 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) { 10110 if (Mask.empty()) 10111 return V1; 10112 unsigned VF = Mask.size(); 10113 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10114 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF)) 10115 return V1; 10116 Value *Vec = Builder.CreateShuffleVector(V1, Mask); 10117 if (auto *I = dyn_cast<Instruction>(Vec)) { 10118 GatherShuffleExtractSeq.insert(I); 10119 CSEBlocks.insert(I->getParent()); 10120 } 10121 return Vec; 10122 } 10123 Value *createIdentity(Value *V) { return V; } 10124 Value *createPoison(Type *Ty, unsigned VF) { 10125 return PoisonValue::get(FixedVectorType::get(Ty, VF)); 10126 } 10127 /// Resizes 2 input vector to match the sizes, if the they are not equal 10128 /// yet. The smallest vector is resized to the size of the larger vector. 10129 void resizeToMatch(Value *&V1, Value *&V2) { 10130 if (V1->getType() == V2->getType()) 10131 return; 10132 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10133 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 10134 int VF = std::max(V1VF, V2VF); 10135 int MinVF = std::min(V1VF, V2VF); 10136 SmallVector<int> IdentityMask(VF, PoisonMaskElem); 10137 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF), 10138 0); 10139 Value *&Op = MinVF == V1VF ? V1 : V2; 10140 Op = Builder.CreateShuffleVector(Op, IdentityMask); 10141 if (auto *I = dyn_cast<Instruction>(Op)) { 10142 GatherShuffleExtractSeq.insert(I); 10143 CSEBlocks.insert(I->getParent()); 10144 } 10145 if (MinVF == V1VF) 10146 V1 = Op; 10147 else 10148 V2 = Op; 10149 } 10150 }; 10151 10152 /// Smart shuffle instruction emission, walks through shuffles trees and 10153 /// tries to find the best matching vector for the actual shuffle 10154 /// instruction. 10155 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) { 10156 assert(V1 && "Expected at least one vector value."); 10157 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq, 10158 R.CSEBlocks); 10159 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask, 10160 ShuffleBuilder); 10161 } 10162 10163 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 10164 /// shuffle emission. 10165 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 10166 ArrayRef<int> Mask) { 10167 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10168 if (Mask[Idx] != PoisonMaskElem) 10169 CommonMask[Idx] = Idx; 10170 } 10171 10172 public: 10173 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R) 10174 : Builder(Builder), R(R) {} 10175 10176 /// Adjusts extractelements after reusing them. 10177 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 10178 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 10179 unsigned NumParts, bool &UseVecBaseAsInput) { 10180 UseVecBaseAsInput = false; 10181 SmallPtrSet<Value *, 4> UniqueBases; 10182 Value *VecBase = nullptr; 10183 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10184 int Idx = Mask[I]; 10185 if (Idx == PoisonMaskElem) 10186 continue; 10187 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10188 VecBase = EI->getVectorOperand(); 10189 if (const TreeEntry *TE = R.getTreeEntry(VecBase)) 10190 VecBase = TE->VectorizedValue; 10191 assert(VecBase && "Expected vectorized value."); 10192 UniqueBases.insert(VecBase); 10193 // If the only one use is vectorized - can delete the extractelement 10194 // itself. 10195 if (!EI->hasOneUse() || any_of(EI->users(), [&](User *U) { 10196 return !R.ScalarToTreeEntry.count(U); 10197 })) 10198 continue; 10199 R.eraseInstruction(EI); 10200 } 10201 if (NumParts == 1 || UniqueBases.size() == 1) 10202 return VecBase; 10203 UseVecBaseAsInput = true; 10204 auto TransformToIdentity = [](MutableArrayRef<int> Mask) { 10205 for (auto [I, Idx] : enumerate(Mask)) 10206 if (Idx != PoisonMaskElem) 10207 Idx = I; 10208 }; 10209 // Perform multi-register vector shuffle, joining them into a single virtual 10210 // long vector. 10211 // Need to shuffle each part independently and then insert all this parts 10212 // into a long virtual vector register, forming the original vector. 10213 Value *Vec = nullptr; 10214 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10215 unsigned SliceSize = E->Scalars.size() / NumParts; 10216 for (unsigned Part = 0; Part < NumParts; ++Part) { 10217 ArrayRef<Value *> VL = 10218 ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize); 10219 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 10220 constexpr int MaxBases = 2; 10221 SmallVector<Value *, MaxBases> Bases(MaxBases); 10222 #ifndef NDEBUG 10223 int PrevSize = 0; 10224 #endif // NDEBUG 10225 for (const auto [I, V]: enumerate(VL)) { 10226 if (SubMask[I] == PoisonMaskElem) 10227 continue; 10228 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand(); 10229 if (const TreeEntry *TE = R.getTreeEntry(VecOp)) 10230 VecOp = TE->VectorizedValue; 10231 assert(VecOp && "Expected vectorized value."); 10232 const int Size = 10233 cast<FixedVectorType>(VecOp->getType())->getNumElements(); 10234 #ifndef NDEBUG 10235 assert((PrevSize == Size || PrevSize == 0) && 10236 "Expected vectors of the same size."); 10237 PrevSize = Size; 10238 #endif // NDEBUG 10239 Bases[SubMask[I] < Size ? 0 : 1] = VecOp; 10240 } 10241 if (!Bases.front()) 10242 continue; 10243 Value *SubVec; 10244 if (Bases.back()) { 10245 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask); 10246 TransformToIdentity(SubMask); 10247 } else { 10248 SubVec = Bases.front(); 10249 } 10250 if (!Vec) { 10251 Vec = SubVec; 10252 assert((Part == 0 || all_of(seq<unsigned>(0, Part), 10253 [&](unsigned P) { 10254 ArrayRef<int> SubMask = 10255 Mask.slice(P * SliceSize, SliceSize); 10256 return all_of(SubMask, [](int Idx) { 10257 return Idx == PoisonMaskElem; 10258 }); 10259 })) && 10260 "Expected first part or all previous parts masked."); 10261 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10262 } else { 10263 unsigned VF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10264 if (Vec->getType() != SubVec->getType()) { 10265 unsigned SubVecVF = 10266 cast<FixedVectorType>(SubVec->getType())->getNumElements(); 10267 VF = std::max(VF, SubVecVF); 10268 } 10269 // Adjust SubMask. 10270 for (auto [I, Idx] : enumerate(SubMask)) 10271 if (Idx != PoisonMaskElem) 10272 Idx += VF; 10273 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10274 Vec = createShuffle(Vec, SubVec, VecMask); 10275 TransformToIdentity(VecMask); 10276 } 10277 } 10278 copy(VecMask, Mask.begin()); 10279 return Vec; 10280 } 10281 /// Checks if the specified entry \p E needs to be delayed because of its 10282 /// dependency nodes. 10283 std::optional<Value *> 10284 needToDelay(const TreeEntry *E, 10285 ArrayRef<SmallVector<const TreeEntry *>> Deps) const { 10286 // No need to delay emission if all deps are ready. 10287 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) { 10288 return all_of( 10289 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; }); 10290 })) 10291 return std::nullopt; 10292 // Postpone gather emission, will be emitted after the end of the 10293 // process to keep correct order. 10294 auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(), 10295 E->getVectorFactor()); 10296 return Builder.CreateAlignedLoad( 10297 VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())), 10298 MaybeAlign()); 10299 } 10300 /// Adds 2 input vectors (in form of tree entries) and the mask for their 10301 /// shuffling. 10302 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 10303 add(E1.VectorizedValue, E2.VectorizedValue, Mask); 10304 } 10305 /// Adds single input vector (in form of tree entry) and the mask for its 10306 /// shuffling. 10307 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 10308 add(E1.VectorizedValue, Mask); 10309 } 10310 /// Adds 2 input vectors and the mask for their shuffling. 10311 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 10312 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors."); 10313 if (InVectors.empty()) { 10314 InVectors.push_back(V1); 10315 InVectors.push_back(V2); 10316 CommonMask.assign(Mask.begin(), Mask.end()); 10317 return; 10318 } 10319 Value *Vec = InVectors.front(); 10320 if (InVectors.size() == 2) { 10321 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10322 transformMaskAfterShuffle(CommonMask, CommonMask); 10323 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() != 10324 Mask.size()) { 10325 Vec = createShuffle(Vec, nullptr, CommonMask); 10326 transformMaskAfterShuffle(CommonMask, CommonMask); 10327 } 10328 V1 = createShuffle(V1, V2, Mask); 10329 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10330 if (Mask[Idx] != PoisonMaskElem) 10331 CommonMask[Idx] = Idx + Sz; 10332 InVectors.front() = Vec; 10333 if (InVectors.size() == 2) 10334 InVectors.back() = V1; 10335 else 10336 InVectors.push_back(V1); 10337 } 10338 /// Adds another one input vector and the mask for the shuffling. 10339 void add(Value *V1, ArrayRef<int> Mask, bool = false) { 10340 if (InVectors.empty()) { 10341 if (!isa<FixedVectorType>(V1->getType())) { 10342 V1 = createShuffle(V1, nullptr, CommonMask); 10343 CommonMask.assign(Mask.size(), PoisonMaskElem); 10344 transformMaskAfterShuffle(CommonMask, Mask); 10345 } 10346 InVectors.push_back(V1); 10347 CommonMask.assign(Mask.begin(), Mask.end()); 10348 return; 10349 } 10350 const auto *It = find(InVectors, V1); 10351 if (It == InVectors.end()) { 10352 if (InVectors.size() == 2 || 10353 InVectors.front()->getType() != V1->getType() || 10354 !isa<FixedVectorType>(V1->getType())) { 10355 Value *V = InVectors.front(); 10356 if (InVectors.size() == 2) { 10357 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10358 transformMaskAfterShuffle(CommonMask, CommonMask); 10359 } else if (cast<FixedVectorType>(V->getType())->getNumElements() != 10360 CommonMask.size()) { 10361 V = createShuffle(InVectors.front(), nullptr, CommonMask); 10362 transformMaskAfterShuffle(CommonMask, CommonMask); 10363 } 10364 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10365 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem) 10366 CommonMask[Idx] = 10367 V->getType() != V1->getType() 10368 ? Idx + Sz 10369 : Mask[Idx] + cast<FixedVectorType>(V1->getType()) 10370 ->getNumElements(); 10371 if (V->getType() != V1->getType()) 10372 V1 = createShuffle(V1, nullptr, Mask); 10373 InVectors.front() = V; 10374 if (InVectors.size() == 2) 10375 InVectors.back() = V1; 10376 else 10377 InVectors.push_back(V1); 10378 return; 10379 } 10380 // Check if second vector is required if the used elements are already 10381 // used from the first one. 10382 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10383 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) { 10384 InVectors.push_back(V1); 10385 break; 10386 } 10387 } 10388 int VF = CommonMask.size(); 10389 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 10390 VF = FTy->getNumElements(); 10391 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10392 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 10393 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF); 10394 } 10395 /// Adds another one input vector and the mask for the shuffling. 10396 void addOrdered(Value *V1, ArrayRef<unsigned> Order) { 10397 SmallVector<int> NewMask; 10398 inversePermutation(Order, NewMask); 10399 add(V1, NewMask); 10400 } 10401 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 10402 Value *Root = nullptr) { 10403 return R.gather(VL, Root); 10404 } 10405 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); } 10406 /// Finalize emission of the shuffles. 10407 /// \param Action the action (if any) to be performed before final applying of 10408 /// the \p ExtMask mask. 10409 Value * 10410 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 10411 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 10412 IsFinalized = true; 10413 if (Action) { 10414 Value *Vec = InVectors.front(); 10415 if (InVectors.size() == 2) { 10416 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10417 InVectors.pop_back(); 10418 } else { 10419 Vec = createShuffle(Vec, nullptr, CommonMask); 10420 } 10421 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10422 if (CommonMask[Idx] != PoisonMaskElem) 10423 CommonMask[Idx] = Idx; 10424 assert(VF > 0 && 10425 "Expected vector length for the final value before action."); 10426 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10427 if (VecVF < VF) { 10428 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 10429 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0); 10430 Vec = createShuffle(Vec, nullptr, ResizeMask); 10431 } 10432 Action(Vec, CommonMask); 10433 InVectors.front() = Vec; 10434 } 10435 if (!ExtMask.empty()) { 10436 if (CommonMask.empty()) { 10437 CommonMask.assign(ExtMask.begin(), ExtMask.end()); 10438 } else { 10439 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 10440 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 10441 if (ExtMask[I] == PoisonMaskElem) 10442 continue; 10443 NewMask[I] = CommonMask[ExtMask[I]]; 10444 } 10445 CommonMask.swap(NewMask); 10446 } 10447 } 10448 if (CommonMask.empty()) { 10449 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 10450 return InVectors.front(); 10451 } 10452 if (InVectors.size() == 2) 10453 return createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10454 return createShuffle(InVectors.front(), nullptr, CommonMask); 10455 } 10456 10457 ~ShuffleInstructionBuilder() { 10458 assert((IsFinalized || CommonMask.empty()) && 10459 "Shuffle construction must be finalized."); 10460 } 10461 }; 10462 10463 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, 10464 bool PostponedPHIs) { 10465 ValueList &VL = E->getOperand(NodeIdx); 10466 if (E->State == TreeEntry::PossibleStridedVectorize && 10467 !E->ReorderIndices.empty()) { 10468 SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end()); 10469 reorderScalars(VL, Mask); 10470 } 10471 const unsigned VF = VL.size(); 10472 InstructionsState S = getSameOpcode(VL, *TLI); 10473 // Special processing for GEPs bundle, which may include non-gep values. 10474 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) { 10475 const auto *It = 10476 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 10477 if (It != VL.end()) 10478 S = getSameOpcode(*It, *TLI); 10479 } 10480 if (S.getOpcode()) { 10481 auto CheckSameVE = [&](const TreeEntry *VE) { 10482 return VE->isSame(VL) && 10483 (any_of(VE->UserTreeIndices, 10484 [E, NodeIdx](const EdgeInfo &EI) { 10485 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10486 }) || 10487 any_of(VectorizableTree, 10488 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) { 10489 return TE->isOperandGatherNode({E, NodeIdx}) && 10490 VE->isSame(TE->Scalars); 10491 })); 10492 }; 10493 TreeEntry *VE = getTreeEntry(S.OpValue); 10494 bool IsSameVE = VE && CheckSameVE(VE); 10495 if (!IsSameVE) { 10496 auto It = MultiNodeScalars.find(S.OpValue); 10497 if (It != MultiNodeScalars.end()) { 10498 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) { 10499 return TE != VE && CheckSameVE(TE); 10500 }); 10501 if (I != It->getSecond().end()) { 10502 VE = *I; 10503 IsSameVE = true; 10504 } 10505 } 10506 } 10507 if (IsSameVE) { 10508 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) { 10509 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 10510 ShuffleBuilder.add(V, Mask); 10511 return ShuffleBuilder.finalize(std::nullopt); 10512 }; 10513 Value *V = vectorizeTree(VE, PostponedPHIs); 10514 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 10515 if (!VE->ReuseShuffleIndices.empty()) { 10516 // Reshuffle to get only unique values. 10517 // If some of the scalars are duplicated in the vectorization 10518 // tree entry, we do not vectorize them but instead generate a 10519 // mask for the reuses. But if there are several users of the 10520 // same entry, they may have different vectorization factors. 10521 // This is especially important for PHI nodes. In this case, we 10522 // need to adapt the resulting instruction for the user 10523 // vectorization factor and have to reshuffle it again to take 10524 // only unique elements of the vector. Without this code the 10525 // function incorrectly returns reduced vector instruction with 10526 // the same elements, not with the unique ones. 10527 10528 // block: 10529 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 10530 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 10531 // ... (use %2) 10532 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 10533 // br %block 10534 SmallVector<int> UniqueIdxs(VF, PoisonMaskElem); 10535 SmallSet<int, 4> UsedIdxs; 10536 int Pos = 0; 10537 for (int Idx : VE->ReuseShuffleIndices) { 10538 if (Idx != static_cast<int>(VF) && Idx != PoisonMaskElem && 10539 UsedIdxs.insert(Idx).second) 10540 UniqueIdxs[Idx] = Pos; 10541 ++Pos; 10542 } 10543 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 10544 "less than original vector size."); 10545 UniqueIdxs.append(VF - UsedIdxs.size(), PoisonMaskElem); 10546 V = FinalShuffle(V, UniqueIdxs); 10547 } else { 10548 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 10549 "Expected vectorization factor less " 10550 "than original vector size."); 10551 SmallVector<int> UniformMask(VF, 0); 10552 std::iota(UniformMask.begin(), UniformMask.end(), 0); 10553 V = FinalShuffle(V, UniformMask); 10554 } 10555 } 10556 // Need to update the operand gather node, if actually the operand is not a 10557 // vectorized node, but the buildvector/gather node, which matches one of 10558 // the vectorized nodes. 10559 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) { 10560 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10561 }) == VE->UserTreeIndices.end()) { 10562 auto *It = find_if( 10563 VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 10564 return TE->State == TreeEntry::NeedToGather && 10565 TE->UserTreeIndices.front().UserTE == E && 10566 TE->UserTreeIndices.front().EdgeIdx == NodeIdx; 10567 }); 10568 assert(It != VectorizableTree.end() && "Expected gather node operand."); 10569 (*It)->VectorizedValue = V; 10570 } 10571 return V; 10572 } 10573 } 10574 10575 // Find the corresponding gather entry and vectorize it. 10576 // Allows to be more accurate with tree/graph transformations, checks for the 10577 // correctness of the transformations in many cases. 10578 auto *I = find_if(VectorizableTree, 10579 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) { 10580 return TE->isOperandGatherNode({E, NodeIdx}); 10581 }); 10582 assert(I != VectorizableTree.end() && "Gather node is not in the graph."); 10583 assert(I->get()->UserTreeIndices.size() == 1 && 10584 "Expected only single user for the gather node."); 10585 assert(I->get()->isSame(VL) && "Expected same list of scalars."); 10586 return vectorizeTree(I->get(), PostponedPHIs); 10587 } 10588 10589 template <typename BVTy, typename ResTy, typename... Args> 10590 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) { 10591 assert(E->State == TreeEntry::NeedToGather && "Expected gather node."); 10592 unsigned VF = E->getVectorFactor(); 10593 10594 bool NeedFreeze = false; 10595 SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(), 10596 E->ReuseShuffleIndices.end()); 10597 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end()); 10598 // Build a mask out of the reorder indices and reorder scalars per this 10599 // mask. 10600 SmallVector<int> ReorderMask; 10601 inversePermutation(E->ReorderIndices, ReorderMask); 10602 if (!ReorderMask.empty()) 10603 reorderScalars(GatheredScalars, ReorderMask); 10604 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF) { 10605 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) { 10606 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10607 })) 10608 return false; 10609 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE; 10610 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx; 10611 if (UserTE->getNumOperands() != 2) 10612 return false; 10613 auto *It = 10614 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) { 10615 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) { 10616 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx; 10617 }) != TE->UserTreeIndices.end(); 10618 }); 10619 if (It == VectorizableTree.end()) 10620 return false; 10621 int Idx; 10622 if ((Mask.size() < InputVF && 10623 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) && 10624 Idx == 0) || 10625 (Mask.size() == InputVF && 10626 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) { 10627 std::iota(Mask.begin(), Mask.end(), 0); 10628 } else { 10629 unsigned I = 10630 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; }); 10631 std::fill(Mask.begin(), Mask.end(), I); 10632 } 10633 return true; 10634 }; 10635 BVTy ShuffleBuilder(Params...); 10636 ResTy Res = ResTy(); 10637 SmallVector<int> Mask; 10638 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem); 10639 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles; 10640 Value *ExtractVecBase = nullptr; 10641 bool UseVecBaseAsInput = false; 10642 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles; 10643 SmallVector<SmallVector<const TreeEntry *>> Entries; 10644 Type *ScalarTy = GatheredScalars.front()->getType(); 10645 auto *VecTy = FixedVectorType::get(ScalarTy, GatheredScalars.size()); 10646 unsigned NumParts = TTI->getNumberOfParts(VecTy); 10647 if (NumParts == 0 || NumParts >= GatheredScalars.size()) 10648 NumParts = 1; 10649 if (!all_of(GatheredScalars, UndefValue::classof)) { 10650 // Check for gathered extracts. 10651 bool Resized = false; 10652 ExtractShuffles = 10653 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts); 10654 if (!ExtractShuffles.empty()) { 10655 SmallVector<const TreeEntry *> ExtractEntries; 10656 for (auto [Idx, I] : enumerate(ExtractMask)) { 10657 if (I == PoisonMaskElem) 10658 continue; 10659 if (const auto *TE = getTreeEntry( 10660 cast<ExtractElementInst>(E->Scalars[Idx])->getVectorOperand())) 10661 ExtractEntries.push_back(TE); 10662 } 10663 if (std::optional<ResTy> Delayed = 10664 ShuffleBuilder.needToDelay(E, ExtractEntries)) { 10665 // Delay emission of gathers which are not ready yet. 10666 PostponedGathers.insert(E); 10667 // Postpone gather emission, will be emitted after the end of the 10668 // process to keep correct order. 10669 return *Delayed; 10670 } 10671 if (Value *VecBase = ShuffleBuilder.adjustExtracts( 10672 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) { 10673 ExtractVecBase = VecBase; 10674 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType())) 10675 if (VF == VecBaseTy->getNumElements() && 10676 GatheredScalars.size() != VF) { 10677 Resized = true; 10678 GatheredScalars.append(VF - GatheredScalars.size(), 10679 PoisonValue::get(ScalarTy)); 10680 } 10681 } 10682 } 10683 // Gather extracts after we check for full matched gathers only. 10684 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load || 10685 E->isAltShuffle() || 10686 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) || 10687 isSplat(E->Scalars) || 10688 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) { 10689 GatherShuffles = 10690 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts); 10691 } 10692 if (!GatherShuffles.empty()) { 10693 if (std::optional<ResTy> Delayed = 10694 ShuffleBuilder.needToDelay(E, Entries)) { 10695 // Delay emission of gathers which are not ready yet. 10696 PostponedGathers.insert(E); 10697 // Postpone gather emission, will be emitted after the end of the 10698 // process to keep correct order. 10699 return *Delayed; 10700 } 10701 if (GatherShuffles.size() == 1 && 10702 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc && 10703 Entries.front().front()->isSame(E->Scalars)) { 10704 // Perfect match in the graph, will reuse the previously vectorized 10705 // node. Cost is 0. 10706 LLVM_DEBUG( 10707 dbgs() 10708 << "SLP: perfect diamond match for gather bundle " 10709 << shortBundleName(E->Scalars) << ".\n"); 10710 // Restore the mask for previous partially matched values. 10711 Mask.resize(E->Scalars.size()); 10712 const TreeEntry *FrontTE = Entries.front().front(); 10713 if (FrontTE->ReorderIndices.empty() && 10714 ((FrontTE->ReuseShuffleIndices.empty() && 10715 E->Scalars.size() == FrontTE->Scalars.size()) || 10716 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) { 10717 std::iota(Mask.begin(), Mask.end(), 0); 10718 } else { 10719 for (auto [I, V] : enumerate(E->Scalars)) { 10720 if (isa<PoisonValue>(V)) { 10721 Mask[I] = PoisonMaskElem; 10722 continue; 10723 } 10724 Mask[I] = FrontTE->findLaneForValue(V); 10725 } 10726 } 10727 ShuffleBuilder.add(*FrontTE, Mask); 10728 Res = ShuffleBuilder.finalize(E->getCommonMask()); 10729 return Res; 10730 } 10731 if (!Resized) { 10732 if (GatheredScalars.size() != VF && 10733 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) { 10734 return any_of(TEs, [&](const TreeEntry *TE) { 10735 return TE->getVectorFactor() == VF; 10736 }); 10737 })) 10738 GatheredScalars.append(VF - GatheredScalars.size(), 10739 PoisonValue::get(ScalarTy)); 10740 } 10741 // Remove shuffled elements from list of gathers. 10742 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10743 if (Mask[I] != PoisonMaskElem) 10744 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10745 } 10746 } 10747 } 10748 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars, 10749 SmallVectorImpl<int> &ReuseMask, 10750 bool IsRootPoison) { 10751 // For splats with can emit broadcasts instead of gathers, so try to find 10752 // such sequences. 10753 bool IsSplat = IsRootPoison && isSplat(Scalars) && 10754 (Scalars.size() > 2 || Scalars.front() == Scalars.back()); 10755 Scalars.append(VF - Scalars.size(), PoisonValue::get(ScalarTy)); 10756 SmallVector<int> UndefPos; 10757 DenseMap<Value *, unsigned> UniquePositions; 10758 // Gather unique non-const values and all constant values. 10759 // For repeated values, just shuffle them. 10760 int NumNonConsts = 0; 10761 int SinglePos = 0; 10762 for (auto [I, V] : enumerate(Scalars)) { 10763 if (isa<UndefValue>(V)) { 10764 if (!isa<PoisonValue>(V)) { 10765 ReuseMask[I] = I; 10766 UndefPos.push_back(I); 10767 } 10768 continue; 10769 } 10770 if (isConstant(V)) { 10771 ReuseMask[I] = I; 10772 continue; 10773 } 10774 ++NumNonConsts; 10775 SinglePos = I; 10776 Value *OrigV = V; 10777 Scalars[I] = PoisonValue::get(ScalarTy); 10778 if (IsSplat) { 10779 Scalars.front() = OrigV; 10780 ReuseMask[I] = 0; 10781 } else { 10782 const auto Res = UniquePositions.try_emplace(OrigV, I); 10783 Scalars[Res.first->second] = OrigV; 10784 ReuseMask[I] = Res.first->second; 10785 } 10786 } 10787 if (NumNonConsts == 1) { 10788 // Restore single insert element. 10789 if (IsSplat) { 10790 ReuseMask.assign(VF, PoisonMaskElem); 10791 std::swap(Scalars.front(), Scalars[SinglePos]); 10792 if (!UndefPos.empty() && UndefPos.front() == 0) 10793 Scalars.front() = UndefValue::get(ScalarTy); 10794 } 10795 ReuseMask[SinglePos] = SinglePos; 10796 } else if (!UndefPos.empty() && IsSplat) { 10797 // For undef values, try to replace them with the simple broadcast. 10798 // We can do it if the broadcasted value is guaranteed to be 10799 // non-poisonous, or by freezing the incoming scalar value first. 10800 auto *It = find_if(Scalars, [this, E](Value *V) { 10801 return !isa<UndefValue>(V) && 10802 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) || 10803 (E->UserTreeIndices.size() == 1 && 10804 any_of(V->uses(), [E](const Use &U) { 10805 // Check if the value already used in the same operation in 10806 // one of the nodes already. 10807 return E->UserTreeIndices.front().EdgeIdx != 10808 U.getOperandNo() && 10809 is_contained( 10810 E->UserTreeIndices.front().UserTE->Scalars, 10811 U.getUser()); 10812 }))); 10813 }); 10814 if (It != Scalars.end()) { 10815 // Replace undefs by the non-poisoned scalars and emit broadcast. 10816 int Pos = std::distance(Scalars.begin(), It); 10817 for (int I : UndefPos) { 10818 // Set the undef position to the non-poisoned scalar. 10819 ReuseMask[I] = Pos; 10820 // Replace the undef by the poison, in the mask it is replaced by 10821 // non-poisoned scalar already. 10822 if (I != Pos) 10823 Scalars[I] = PoisonValue::get(ScalarTy); 10824 } 10825 } else { 10826 // Replace undefs by the poisons, emit broadcast and then emit 10827 // freeze. 10828 for (int I : UndefPos) { 10829 ReuseMask[I] = PoisonMaskElem; 10830 if (isa<UndefValue>(Scalars[I])) 10831 Scalars[I] = PoisonValue::get(ScalarTy); 10832 } 10833 NeedFreeze = true; 10834 } 10835 } 10836 }; 10837 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) { 10838 bool IsNonPoisoned = true; 10839 bool IsUsedInExpr = true; 10840 Value *Vec1 = nullptr; 10841 if (!ExtractShuffles.empty()) { 10842 // Gather of extractelements can be represented as just a shuffle of 10843 // a single/two vectors the scalars are extracted from. 10844 // Find input vectors. 10845 Value *Vec2 = nullptr; 10846 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10847 if (!Mask.empty() && Mask[I] != PoisonMaskElem) 10848 ExtractMask[I] = PoisonMaskElem; 10849 } 10850 if (UseVecBaseAsInput) { 10851 Vec1 = ExtractVecBase; 10852 } else { 10853 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10854 if (ExtractMask[I] == PoisonMaskElem) 10855 continue; 10856 if (isa<UndefValue>(E->Scalars[I])) 10857 continue; 10858 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10859 Value *VecOp = EI->getVectorOperand(); 10860 if (const auto *TE = getTreeEntry(VecOp)) 10861 if (TE->VectorizedValue) 10862 VecOp = TE->VectorizedValue; 10863 if (!Vec1) { 10864 Vec1 = VecOp; 10865 } else if (Vec1 != EI->getVectorOperand()) { 10866 assert((!Vec2 || Vec2 == EI->getVectorOperand()) && 10867 "Expected only 1 or 2 vectors shuffle."); 10868 Vec2 = VecOp; 10869 } 10870 } 10871 } 10872 if (Vec2) { 10873 IsUsedInExpr = false; 10874 IsNonPoisoned &= 10875 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2); 10876 ShuffleBuilder.add(Vec1, Vec2, ExtractMask); 10877 } else if (Vec1) { 10878 IsUsedInExpr &= FindReusedSplat( 10879 ExtractMask, 10880 cast<FixedVectorType>(Vec1->getType())->getNumElements()); 10881 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true); 10882 IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1); 10883 } else { 10884 IsUsedInExpr = false; 10885 ShuffleBuilder.add(PoisonValue::get(FixedVectorType::get( 10886 ScalarTy, GatheredScalars.size())), 10887 ExtractMask, /*ForExtracts=*/true); 10888 } 10889 } 10890 if (!GatherShuffles.empty()) { 10891 unsigned SliceSize = E->Scalars.size() / NumParts; 10892 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10893 for (const auto [I, TEs] : enumerate(Entries)) { 10894 if (TEs.empty()) { 10895 assert(!GatherShuffles[I] && 10896 "No shuffles with empty entries list expected."); 10897 continue; 10898 } 10899 assert((TEs.size() == 1 || TEs.size() == 2) && 10900 "Expected shuffle of 1 or 2 entries."); 10901 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize); 10902 VecMask.assign(VecMask.size(), PoisonMaskElem); 10903 copy(SubMask, std::next(VecMask.begin(), I * SliceSize)); 10904 if (TEs.size() == 1) { 10905 IsUsedInExpr &= 10906 FindReusedSplat(VecMask, TEs.front()->getVectorFactor()); 10907 ShuffleBuilder.add(*TEs.front(), VecMask); 10908 if (TEs.front()->VectorizedValue) 10909 IsNonPoisoned &= 10910 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue); 10911 } else { 10912 IsUsedInExpr = false; 10913 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask); 10914 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue) 10915 IsNonPoisoned &= 10916 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) && 10917 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue); 10918 } 10919 } 10920 } 10921 // Try to figure out best way to combine values: build a shuffle and insert 10922 // elements or just build several shuffles. 10923 // Insert non-constant scalars. 10924 SmallVector<Value *> NonConstants(GatheredScalars); 10925 int EMSz = ExtractMask.size(); 10926 int MSz = Mask.size(); 10927 // Try to build constant vector and shuffle with it only if currently we 10928 // have a single permutation and more than 1 scalar constants. 10929 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty(); 10930 bool IsIdentityShuffle = 10931 ((UseVecBaseAsInput || 10932 all_of(ExtractShuffles, 10933 [](const std::optional<TTI::ShuffleKind> &SK) { 10934 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10935 TTI::SK_PermuteSingleSrc; 10936 })) && 10937 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) && 10938 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) || 10939 (!GatherShuffles.empty() && 10940 all_of(GatherShuffles, 10941 [](const std::optional<TTI::ShuffleKind> &SK) { 10942 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10943 TTI::SK_PermuteSingleSrc; 10944 }) && 10945 none_of(Mask, [&](int I) { return I >= MSz; }) && 10946 ShuffleVectorInst::isIdentityMask(Mask, MSz)); 10947 bool EnoughConstsForShuffle = 10948 IsSingleShuffle && 10949 (none_of(GatheredScalars, 10950 [](Value *V) { 10951 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10952 }) || 10953 any_of(GatheredScalars, 10954 [](Value *V) { 10955 return isa<Constant>(V) && !isa<UndefValue>(V); 10956 })) && 10957 (!IsIdentityShuffle || 10958 (GatheredScalars.size() == 2 && 10959 any_of(GatheredScalars, 10960 [](Value *V) { return !isa<UndefValue>(V); })) || 10961 count_if(GatheredScalars, [](Value *V) { 10962 return isa<Constant>(V) && !isa<PoisonValue>(V); 10963 }) > 1); 10964 // NonConstants array contains just non-constant values, GatheredScalars 10965 // contains only constant to build final vector and then shuffle. 10966 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) { 10967 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I])) 10968 NonConstants[I] = PoisonValue::get(ScalarTy); 10969 else 10970 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10971 } 10972 // Generate constants for final shuffle and build a mask for them. 10973 if (!all_of(GatheredScalars, PoisonValue::classof)) { 10974 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem); 10975 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true); 10976 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size()); 10977 ShuffleBuilder.add(BV, BVMask); 10978 } 10979 if (all_of(NonConstants, [=](Value *V) { 10980 return isa<PoisonValue>(V) || 10981 (IsSingleShuffle && ((IsIdentityShuffle && 10982 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V)); 10983 })) 10984 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10985 else 10986 Res = ShuffleBuilder.finalize( 10987 E->ReuseShuffleIndices, E->Scalars.size(), 10988 [&](Value *&Vec, SmallVectorImpl<int> &Mask) { 10989 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false); 10990 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec); 10991 }); 10992 } else if (!allConstant(GatheredScalars)) { 10993 // Gather unique scalars and all constants. 10994 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem); 10995 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true); 10996 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size()); 10997 ShuffleBuilder.add(BV, ReuseMask); 10998 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10999 } else { 11000 // Gather all constants. 11001 SmallVector<int> Mask(E->Scalars.size(), PoisonMaskElem); 11002 for (auto [I, V] : enumerate(E->Scalars)) { 11003 if (!isa<PoisonValue>(V)) 11004 Mask[I] = I; 11005 } 11006 Value *BV = ShuffleBuilder.gather(E->Scalars); 11007 ShuffleBuilder.add(BV, Mask); 11008 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11009 } 11010 11011 if (NeedFreeze) 11012 Res = ShuffleBuilder.createFreeze(Res); 11013 return Res; 11014 } 11015 11016 Value *BoUpSLP::createBuildVector(const TreeEntry *E) { 11017 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, Builder, 11018 *this); 11019 } 11020 11021 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { 11022 IRBuilder<>::InsertPointGuard Guard(Builder); 11023 11024 if (E->VectorizedValue && 11025 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI || 11026 E->isAltShuffle())) { 11027 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 11028 return E->VectorizedValue; 11029 } 11030 11031 if (E->State == TreeEntry::NeedToGather) { 11032 // Set insert point for non-reduction initial nodes. 11033 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList) 11034 setInsertPointAfterBundle(E); 11035 Value *Vec = createBuildVector(E); 11036 E->VectorizedValue = Vec; 11037 return Vec; 11038 } 11039 11040 auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy, 11041 bool IsSigned) { 11042 if (V->getType() != VecTy) 11043 V = Builder.CreateIntCast(V, VecTy, IsSigned); 11044 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 11045 if (E->getOpcode() == Instruction::Store) { 11046 ArrayRef<int> Mask = 11047 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()), 11048 E->ReorderIndices.size()); 11049 ShuffleBuilder.add(V, Mask); 11050 } else if (E->State == TreeEntry::PossibleStridedVectorize) { 11051 ShuffleBuilder.addOrdered(V, std::nullopt); 11052 } else { 11053 ShuffleBuilder.addOrdered(V, E->ReorderIndices); 11054 } 11055 return ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11056 }; 11057 11058 assert((E->State == TreeEntry::Vectorize || 11059 E->State == TreeEntry::ScatterVectorize || 11060 E->State == TreeEntry::PossibleStridedVectorize) && 11061 "Unhandled state"); 11062 unsigned ShuffleOrOp = 11063 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 11064 Instruction *VL0 = E->getMainOp(); 11065 Type *ScalarTy = VL0->getType(); 11066 if (auto *Store = dyn_cast<StoreInst>(VL0)) 11067 ScalarTy = Store->getValueOperand()->getType(); 11068 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 11069 ScalarTy = IE->getOperand(1)->getType(); 11070 bool IsSigned = false; 11071 auto It = MinBWs.find(E); 11072 if (It != MinBWs.end()) { 11073 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 11074 IsSigned = It->second.second; 11075 } 11076 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 11077 switch (ShuffleOrOp) { 11078 case Instruction::PHI: { 11079 assert((E->ReorderIndices.empty() || 11080 E != VectorizableTree.front().get() || 11081 !E->UserTreeIndices.empty()) && 11082 "PHI reordering is free."); 11083 if (PostponedPHIs && E->VectorizedValue) 11084 return E->VectorizedValue; 11085 auto *PH = cast<PHINode>(VL0); 11086 Builder.SetInsertPoint(PH->getParent(), 11087 PH->getParent()->getFirstNonPHIIt()); 11088 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11089 if (PostponedPHIs || !E->VectorizedValue) { 11090 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 11091 E->PHI = NewPhi; 11092 Value *V = NewPhi; 11093 11094 // Adjust insertion point once all PHI's have been generated. 11095 Builder.SetInsertPoint(PH->getParent(), 11096 PH->getParent()->getFirstInsertionPt()); 11097 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11098 11099 V = FinalShuffle(V, E, VecTy, IsSigned); 11100 11101 E->VectorizedValue = V; 11102 if (PostponedPHIs) 11103 return V; 11104 } 11105 PHINode *NewPhi = cast<PHINode>(E->PHI); 11106 // If phi node is fully emitted - exit. 11107 if (NewPhi->getNumIncomingValues() != 0) 11108 return NewPhi; 11109 11110 // PHINodes may have multiple entries from the same block. We want to 11111 // visit every block once. 11112 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 11113 11114 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 11115 ValueList Operands; 11116 BasicBlock *IBB = PH->getIncomingBlock(I); 11117 11118 // Stop emission if all incoming values are generated. 11119 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) { 11120 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11121 return NewPhi; 11122 } 11123 11124 if (!VisitedBBs.insert(IBB).second) { 11125 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 11126 continue; 11127 } 11128 11129 Builder.SetInsertPoint(IBB->getTerminator()); 11130 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11131 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true); 11132 if (VecTy != Vec->getType()) { 11133 assert(MinBWs.contains(getOperandEntry(E, I)) && 11134 "Expected item in MinBWs."); 11135 Vec = Builder.CreateIntCast(Vec, VecTy, It->second.second); 11136 } 11137 NewPhi->addIncoming(Vec, IBB); 11138 } 11139 11140 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 11141 "Invalid number of incoming values"); 11142 return NewPhi; 11143 } 11144 11145 case Instruction::ExtractElement: { 11146 Value *V = E->getSingleOperand(0); 11147 setInsertPointAfterBundle(E); 11148 V = FinalShuffle(V, E, VecTy, IsSigned); 11149 E->VectorizedValue = V; 11150 return V; 11151 } 11152 case Instruction::ExtractValue: { 11153 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 11154 Builder.SetInsertPoint(LI); 11155 Value *Ptr = LI->getPointerOperand(); 11156 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 11157 Value *NewV = propagateMetadata(V, E->Scalars); 11158 NewV = FinalShuffle(NewV, E, VecTy, IsSigned); 11159 E->VectorizedValue = NewV; 11160 return NewV; 11161 } 11162 case Instruction::InsertElement: { 11163 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 11164 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 11165 Value *V = vectorizeOperand(E, 1, PostponedPHIs); 11166 ArrayRef<Value *> Op = E->getOperand(1); 11167 Type *ScalarTy = Op.front()->getType(); 11168 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) { 11169 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs."); 11170 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1)); 11171 assert(Res.first > 0 && "Expected item in MinBWs."); 11172 V = Builder.CreateIntCast( 11173 V, 11174 FixedVectorType::get( 11175 ScalarTy, 11176 cast<FixedVectorType>(V->getType())->getNumElements()), 11177 Res.second); 11178 } 11179 11180 // Create InsertVector shuffle if necessary 11181 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 11182 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 11183 })); 11184 const unsigned NumElts = 11185 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 11186 const unsigned NumScalars = E->Scalars.size(); 11187 11188 unsigned Offset = *getInsertIndex(VL0); 11189 assert(Offset < NumElts && "Failed to find vector index offset"); 11190 11191 // Create shuffle to resize vector 11192 SmallVector<int> Mask; 11193 if (!E->ReorderIndices.empty()) { 11194 inversePermutation(E->ReorderIndices, Mask); 11195 Mask.append(NumElts - NumScalars, PoisonMaskElem); 11196 } else { 11197 Mask.assign(NumElts, PoisonMaskElem); 11198 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 11199 } 11200 // Create InsertVector shuffle if necessary 11201 bool IsIdentity = true; 11202 SmallVector<int> PrevMask(NumElts, PoisonMaskElem); 11203 Mask.swap(PrevMask); 11204 for (unsigned I = 0; I < NumScalars; ++I) { 11205 Value *Scalar = E->Scalars[PrevMask[I]]; 11206 unsigned InsertIdx = *getInsertIndex(Scalar); 11207 IsIdentity &= InsertIdx - Offset == I; 11208 Mask[InsertIdx - Offset] = I; 11209 } 11210 if (!IsIdentity || NumElts != NumScalars) { 11211 Value *V2 = nullptr; 11212 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V); 11213 SmallVector<int> InsertMask(Mask); 11214 if (NumElts != NumScalars && Offset == 0) { 11215 // Follow all insert element instructions from the current buildvector 11216 // sequence. 11217 InsertElementInst *Ins = cast<InsertElementInst>(VL0); 11218 do { 11219 std::optional<unsigned> InsertIdx = getInsertIndex(Ins); 11220 if (!InsertIdx) 11221 break; 11222 if (InsertMask[*InsertIdx] == PoisonMaskElem) 11223 InsertMask[*InsertIdx] = *InsertIdx; 11224 if (!Ins->hasOneUse()) 11225 break; 11226 Ins = dyn_cast_or_null<InsertElementInst>( 11227 Ins->getUniqueUndroppableUser()); 11228 } while (Ins); 11229 SmallBitVector UseMask = 11230 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11231 SmallBitVector IsFirstPoison = 11232 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11233 SmallBitVector IsFirstUndef = 11234 isUndefVector(FirstInsert->getOperand(0), UseMask); 11235 if (!IsFirstPoison.all()) { 11236 unsigned Idx = 0; 11237 for (unsigned I = 0; I < NumElts; I++) { 11238 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) && 11239 IsFirstUndef.test(I)) { 11240 if (IsVNonPoisonous) { 11241 InsertMask[I] = I < NumScalars ? I : 0; 11242 continue; 11243 } 11244 if (!V2) 11245 V2 = UndefValue::get(V->getType()); 11246 if (Idx >= NumScalars) 11247 Idx = NumScalars - 1; 11248 InsertMask[I] = NumScalars + Idx; 11249 ++Idx; 11250 } else if (InsertMask[I] != PoisonMaskElem && 11251 Mask[I] == PoisonMaskElem) { 11252 InsertMask[I] = PoisonMaskElem; 11253 } 11254 } 11255 } else { 11256 InsertMask = Mask; 11257 } 11258 } 11259 if (!V2) 11260 V2 = PoisonValue::get(V->getType()); 11261 V = Builder.CreateShuffleVector(V, V2, InsertMask); 11262 if (auto *I = dyn_cast<Instruction>(V)) { 11263 GatherShuffleExtractSeq.insert(I); 11264 CSEBlocks.insert(I->getParent()); 11265 } 11266 } 11267 11268 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 11269 for (unsigned I = 0; I < NumElts; I++) { 11270 if (Mask[I] != PoisonMaskElem) 11271 InsertMask[Offset + I] = I; 11272 } 11273 SmallBitVector UseMask = 11274 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11275 SmallBitVector IsFirstUndef = 11276 isUndefVector(FirstInsert->getOperand(0), UseMask); 11277 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) && 11278 NumElts != NumScalars) { 11279 if (IsFirstUndef.all()) { 11280 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) { 11281 SmallBitVector IsFirstPoison = 11282 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11283 if (!IsFirstPoison.all()) { 11284 for (unsigned I = 0; I < NumElts; I++) { 11285 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I)) 11286 InsertMask[I] = I + NumElts; 11287 } 11288 } 11289 V = Builder.CreateShuffleVector( 11290 V, 11291 IsFirstPoison.all() ? PoisonValue::get(V->getType()) 11292 : FirstInsert->getOperand(0), 11293 InsertMask, cast<Instruction>(E->Scalars.back())->getName()); 11294 if (auto *I = dyn_cast<Instruction>(V)) { 11295 GatherShuffleExtractSeq.insert(I); 11296 CSEBlocks.insert(I->getParent()); 11297 } 11298 } 11299 } else { 11300 SmallBitVector IsFirstPoison = 11301 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11302 for (unsigned I = 0; I < NumElts; I++) { 11303 if (InsertMask[I] == PoisonMaskElem) 11304 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I; 11305 else 11306 InsertMask[I] += NumElts; 11307 } 11308 V = Builder.CreateShuffleVector( 11309 FirstInsert->getOperand(0), V, InsertMask, 11310 cast<Instruction>(E->Scalars.back())->getName()); 11311 if (auto *I = dyn_cast<Instruction>(V)) { 11312 GatherShuffleExtractSeq.insert(I); 11313 CSEBlocks.insert(I->getParent()); 11314 } 11315 } 11316 } 11317 11318 ++NumVectorInstructions; 11319 E->VectorizedValue = V; 11320 return V; 11321 } 11322 case Instruction::ZExt: 11323 case Instruction::SExt: 11324 case Instruction::FPToUI: 11325 case Instruction::FPToSI: 11326 case Instruction::FPExt: 11327 case Instruction::PtrToInt: 11328 case Instruction::IntToPtr: 11329 case Instruction::SIToFP: 11330 case Instruction::UIToFP: 11331 case Instruction::Trunc: 11332 case Instruction::FPTrunc: 11333 case Instruction::BitCast: { 11334 setInsertPointAfterBundle(E); 11335 11336 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs); 11337 if (E->VectorizedValue) { 11338 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11339 return E->VectorizedValue; 11340 } 11341 11342 auto *CI = cast<CastInst>(VL0); 11343 Instruction::CastOps VecOpcode = CI->getOpcode(); 11344 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 11345 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 11346 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 11347 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 11348 // Check if the values are candidates to demote. 11349 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 11350 if (SrcIt != MinBWs.end()) 11351 SrcBWSz = SrcIt->second.first; 11352 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 11353 if (BWSz == SrcBWSz) { 11354 VecOpcode = Instruction::BitCast; 11355 } else if (BWSz < SrcBWSz) { 11356 VecOpcode = Instruction::Trunc; 11357 } else if (It != MinBWs.end()) { 11358 assert(BWSz > SrcBWSz && "Invalid cast!"); 11359 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 11360 } 11361 } 11362 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast) 11363 ? InVec 11364 : Builder.CreateCast(VecOpcode, InVec, VecTy); 11365 V = FinalShuffle(V, E, VecTy, IsSigned); 11366 11367 E->VectorizedValue = V; 11368 ++NumVectorInstructions; 11369 return V; 11370 } 11371 case Instruction::FCmp: 11372 case Instruction::ICmp: { 11373 setInsertPointAfterBundle(E); 11374 11375 Value *L = vectorizeOperand(E, 0, PostponedPHIs); 11376 if (E->VectorizedValue) { 11377 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11378 return E->VectorizedValue; 11379 } 11380 Value *R = vectorizeOperand(E, 1, PostponedPHIs); 11381 if (E->VectorizedValue) { 11382 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11383 return E->VectorizedValue; 11384 } 11385 if (L->getType() != R->getType()) { 11386 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11387 MinBWs.contains(getOperandEntry(E, 1))) && 11388 "Expected item in MinBWs."); 11389 L = Builder.CreateIntCast(L, VecTy, IsSigned); 11390 R = Builder.CreateIntCast(R, VecTy, IsSigned); 11391 } 11392 11393 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 11394 Value *V = Builder.CreateCmp(P0, L, R); 11395 propagateIRFlags(V, E->Scalars, VL0); 11396 // Do not cast for cmps. 11397 VecTy = cast<FixedVectorType>(V->getType()); 11398 V = FinalShuffle(V, E, VecTy, IsSigned); 11399 11400 E->VectorizedValue = V; 11401 ++NumVectorInstructions; 11402 return V; 11403 } 11404 case Instruction::Select: { 11405 setInsertPointAfterBundle(E); 11406 11407 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs); 11408 if (E->VectorizedValue) { 11409 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11410 return E->VectorizedValue; 11411 } 11412 Value *True = vectorizeOperand(E, 1, PostponedPHIs); 11413 if (E->VectorizedValue) { 11414 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11415 return E->VectorizedValue; 11416 } 11417 Value *False = vectorizeOperand(E, 2, PostponedPHIs); 11418 if (E->VectorizedValue) { 11419 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11420 return E->VectorizedValue; 11421 } 11422 if (True->getType() != False->getType()) { 11423 assert((MinBWs.contains(getOperandEntry(E, 1)) || 11424 MinBWs.contains(getOperandEntry(E, 2))) && 11425 "Expected item in MinBWs."); 11426 True = Builder.CreateIntCast(True, VecTy, IsSigned); 11427 False = Builder.CreateIntCast(False, VecTy, IsSigned); 11428 } 11429 11430 Value *V = Builder.CreateSelect(Cond, True, False); 11431 V = FinalShuffle(V, E, VecTy, IsSigned); 11432 11433 E->VectorizedValue = V; 11434 ++NumVectorInstructions; 11435 return V; 11436 } 11437 case Instruction::FNeg: { 11438 setInsertPointAfterBundle(E); 11439 11440 Value *Op = vectorizeOperand(E, 0, PostponedPHIs); 11441 11442 if (E->VectorizedValue) { 11443 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11444 return E->VectorizedValue; 11445 } 11446 11447 Value *V = Builder.CreateUnOp( 11448 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 11449 propagateIRFlags(V, E->Scalars, VL0); 11450 if (auto *I = dyn_cast<Instruction>(V)) 11451 V = propagateMetadata(I, E->Scalars); 11452 11453 V = FinalShuffle(V, E, VecTy, IsSigned); 11454 11455 E->VectorizedValue = V; 11456 ++NumVectorInstructions; 11457 11458 return V; 11459 } 11460 case Instruction::Add: 11461 case Instruction::FAdd: 11462 case Instruction::Sub: 11463 case Instruction::FSub: 11464 case Instruction::Mul: 11465 case Instruction::FMul: 11466 case Instruction::UDiv: 11467 case Instruction::SDiv: 11468 case Instruction::FDiv: 11469 case Instruction::URem: 11470 case Instruction::SRem: 11471 case Instruction::FRem: 11472 case Instruction::Shl: 11473 case Instruction::LShr: 11474 case Instruction::AShr: 11475 case Instruction::And: 11476 case Instruction::Or: 11477 case Instruction::Xor: { 11478 setInsertPointAfterBundle(E); 11479 11480 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs); 11481 if (E->VectorizedValue) { 11482 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11483 return E->VectorizedValue; 11484 } 11485 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs); 11486 if (E->VectorizedValue) { 11487 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11488 return E->VectorizedValue; 11489 } 11490 if (LHS->getType() != RHS->getType()) { 11491 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11492 MinBWs.contains(getOperandEntry(E, 1))) && 11493 "Expected item in MinBWs."); 11494 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11495 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11496 } 11497 11498 Value *V = Builder.CreateBinOp( 11499 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 11500 RHS); 11501 propagateIRFlags(V, E->Scalars, VL0); 11502 if (auto *I = dyn_cast<Instruction>(V)) 11503 V = propagateMetadata(I, E->Scalars); 11504 11505 V = FinalShuffle(V, E, VecTy, IsSigned); 11506 11507 E->VectorizedValue = V; 11508 ++NumVectorInstructions; 11509 11510 return V; 11511 } 11512 case Instruction::Load: { 11513 // Loads are inserted at the head of the tree because we don't want to 11514 // sink them all the way down past store instructions. 11515 setInsertPointAfterBundle(E); 11516 11517 LoadInst *LI = cast<LoadInst>(VL0); 11518 Instruction *NewLI; 11519 Value *PO = LI->getPointerOperand(); 11520 if (E->State == TreeEntry::Vectorize) { 11521 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign()); 11522 11523 // The pointer operand uses an in-tree scalar so we add the new 11524 // LoadInst to ExternalUses list to make sure that an extract will 11525 // be generated in the future. 11526 if (isa<Instruction>(PO)) { 11527 if (TreeEntry *Entry = getTreeEntry(PO)) { 11528 // Find which lane we need to extract. 11529 unsigned FoundLane = Entry->findLaneForValue(PO); 11530 ExternalUses.emplace_back(PO, NewLI, FoundLane); 11531 } 11532 } 11533 } else { 11534 assert((E->State == TreeEntry::ScatterVectorize || 11535 E->State == TreeEntry::PossibleStridedVectorize) && 11536 "Unhandled state"); 11537 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs); 11538 if (E->VectorizedValue) { 11539 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11540 return E->VectorizedValue; 11541 } 11542 // Use the minimum alignment of the gathered loads. 11543 Align CommonAlignment = LI->getAlign(); 11544 for (Value *V : E->Scalars) 11545 CommonAlignment = 11546 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 11547 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 11548 } 11549 Value *V = propagateMetadata(NewLI, E->Scalars); 11550 11551 V = FinalShuffle(V, E, VecTy, IsSigned); 11552 E->VectorizedValue = V; 11553 ++NumVectorInstructions; 11554 return V; 11555 } 11556 case Instruction::Store: { 11557 auto *SI = cast<StoreInst>(VL0); 11558 11559 setInsertPointAfterBundle(E); 11560 11561 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs); 11562 VecValue = FinalShuffle(VecValue, E, VecTy, IsSigned); 11563 11564 Value *Ptr = SI->getPointerOperand(); 11565 StoreInst *ST = 11566 Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); 11567 11568 // The pointer operand uses an in-tree scalar, so add the new StoreInst to 11569 // ExternalUses to make sure that an extract will be generated in the 11570 // future. 11571 if (isa<Instruction>(Ptr)) { 11572 if (TreeEntry *Entry = getTreeEntry(Ptr)) { 11573 // Find which lane we need to extract. 11574 unsigned FoundLane = Entry->findLaneForValue(Ptr); 11575 ExternalUses.push_back(ExternalUser(Ptr, ST, FoundLane)); 11576 } 11577 } 11578 11579 Value *V = propagateMetadata(ST, E->Scalars); 11580 11581 E->VectorizedValue = V; 11582 ++NumVectorInstructions; 11583 return V; 11584 } 11585 case Instruction::GetElementPtr: { 11586 auto *GEP0 = cast<GetElementPtrInst>(VL0); 11587 setInsertPointAfterBundle(E); 11588 11589 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs); 11590 if (E->VectorizedValue) { 11591 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11592 return E->VectorizedValue; 11593 } 11594 11595 SmallVector<Value *> OpVecs; 11596 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 11597 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs); 11598 if (E->VectorizedValue) { 11599 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11600 return E->VectorizedValue; 11601 } 11602 OpVecs.push_back(OpVec); 11603 } 11604 11605 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 11606 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) { 11607 SmallVector<Value *> GEPs; 11608 for (Value *V : E->Scalars) { 11609 if (isa<GetElementPtrInst>(V)) 11610 GEPs.push_back(V); 11611 } 11612 V = propagateMetadata(I, GEPs); 11613 } 11614 11615 V = FinalShuffle(V, E, VecTy, IsSigned); 11616 11617 E->VectorizedValue = V; 11618 ++NumVectorInstructions; 11619 11620 return V; 11621 } 11622 case Instruction::Call: { 11623 CallInst *CI = cast<CallInst>(VL0); 11624 setInsertPointAfterBundle(E); 11625 11626 Intrinsic::ID IID = Intrinsic::not_intrinsic; 11627 if (Function *FI = CI->getCalledFunction()) 11628 IID = FI->getIntrinsicID(); 11629 11630 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 11631 11632 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 11633 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 11634 VecCallCosts.first <= VecCallCosts.second; 11635 11636 Value *ScalarArg = nullptr; 11637 SmallVector<Value *> OpVecs; 11638 SmallVector<Type *, 2> TysForDecl; 11639 // Add return type if intrinsic is overloaded on it. 11640 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, -1)) 11641 TysForDecl.push_back( 11642 FixedVectorType::get(CI->getType(), E->Scalars.size())); 11643 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 11644 ValueList OpVL; 11645 // Some intrinsics have scalar arguments. This argument should not be 11646 // vectorized. 11647 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, I)) { 11648 CallInst *CEI = cast<CallInst>(VL0); 11649 ScalarArg = CEI->getArgOperand(I); 11650 OpVecs.push_back(CEI->getArgOperand(I)); 11651 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, I)) 11652 TysForDecl.push_back(ScalarArg->getType()); 11653 continue; 11654 } 11655 11656 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs); 11657 if (E->VectorizedValue) { 11658 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11659 return E->VectorizedValue; 11660 } 11661 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n"); 11662 OpVecs.push_back(OpVec); 11663 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, I)) 11664 TysForDecl.push_back(OpVec->getType()); 11665 } 11666 11667 Function *CF; 11668 if (!UseIntrinsic) { 11669 VFShape Shape = 11670 VFShape::get(CI->getFunctionType(), 11671 ElementCount::getFixed( 11672 static_cast<unsigned>(VecTy->getNumElements())), 11673 false /*HasGlobalPred*/); 11674 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 11675 } else { 11676 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 11677 } 11678 11679 SmallVector<OperandBundleDef, 1> OpBundles; 11680 CI->getOperandBundlesAsDefs(OpBundles); 11681 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 11682 11683 // The scalar argument uses an in-tree scalar so we add the new vectorized 11684 // call to ExternalUses list to make sure that an extract will be 11685 // generated in the future. 11686 if (isa_and_present<Instruction>(ScalarArg)) { 11687 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 11688 // Find which lane we need to extract. 11689 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 11690 ExternalUses.push_back( 11691 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 11692 } 11693 } 11694 11695 propagateIRFlags(V, E->Scalars, VL0); 11696 V = FinalShuffle(V, E, VecTy, IsSigned); 11697 11698 E->VectorizedValue = V; 11699 ++NumVectorInstructions; 11700 return V; 11701 } 11702 case Instruction::ShuffleVector: { 11703 assert(E->isAltShuffle() && 11704 ((Instruction::isBinaryOp(E->getOpcode()) && 11705 Instruction::isBinaryOp(E->getAltOpcode())) || 11706 (Instruction::isCast(E->getOpcode()) && 11707 Instruction::isCast(E->getAltOpcode())) || 11708 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 11709 "Invalid Shuffle Vector Operand"); 11710 11711 Value *LHS = nullptr, *RHS = nullptr; 11712 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 11713 setInsertPointAfterBundle(E); 11714 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11715 if (E->VectorizedValue) { 11716 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11717 return E->VectorizedValue; 11718 } 11719 RHS = vectorizeOperand(E, 1, PostponedPHIs); 11720 } else { 11721 setInsertPointAfterBundle(E); 11722 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11723 } 11724 if (E->VectorizedValue) { 11725 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11726 return E->VectorizedValue; 11727 } 11728 if (LHS && RHS && LHS->getType() != RHS->getType()) { 11729 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11730 MinBWs.contains(getOperandEntry(E, 1))) && 11731 "Expected item in MinBWs."); 11732 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11733 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11734 } 11735 11736 Value *V0, *V1; 11737 if (Instruction::isBinaryOp(E->getOpcode())) { 11738 V0 = Builder.CreateBinOp( 11739 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 11740 V1 = Builder.CreateBinOp( 11741 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 11742 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 11743 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 11744 auto *AltCI = cast<CmpInst>(E->getAltOp()); 11745 CmpInst::Predicate AltPred = AltCI->getPredicate(); 11746 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 11747 } else { 11748 V0 = Builder.CreateCast( 11749 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 11750 V1 = Builder.CreateCast( 11751 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 11752 } 11753 // Add V0 and V1 to later analysis to try to find and remove matching 11754 // instruction, if any. 11755 for (Value *V : {V0, V1}) { 11756 if (auto *I = dyn_cast<Instruction>(V)) { 11757 GatherShuffleExtractSeq.insert(I); 11758 CSEBlocks.insert(I->getParent()); 11759 } 11760 } 11761 11762 // Create shuffle to take alternate operations from the vector. 11763 // Also, gather up main and alt scalar ops to propagate IR flags to 11764 // each vector operation. 11765 ValueList OpScalars, AltScalars; 11766 SmallVector<int> Mask; 11767 E->buildAltOpShuffleMask( 11768 [E, this](Instruction *I) { 11769 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 11770 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(), 11771 *TLI); 11772 }, 11773 Mask, &OpScalars, &AltScalars); 11774 11775 propagateIRFlags(V0, OpScalars); 11776 propagateIRFlags(V1, AltScalars); 11777 11778 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 11779 if (auto *I = dyn_cast<Instruction>(V)) { 11780 V = propagateMetadata(I, E->Scalars); 11781 GatherShuffleExtractSeq.insert(I); 11782 CSEBlocks.insert(I->getParent()); 11783 } 11784 11785 if (V->getType() != VecTy && !isa<CmpInst>(VL0)) 11786 V = Builder.CreateIntCast( 11787 V, FixedVectorType::get(ScalarTy, E->getVectorFactor()), IsSigned); 11788 E->VectorizedValue = V; 11789 ++NumVectorInstructions; 11790 11791 return V; 11792 } 11793 default: 11794 llvm_unreachable("unknown inst"); 11795 } 11796 return nullptr; 11797 } 11798 11799 Value *BoUpSLP::vectorizeTree() { 11800 ExtraValueToDebugLocsMap ExternallyUsedValues; 11801 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 11802 return vectorizeTree(ExternallyUsedValues, ReplacedExternals); 11803 } 11804 11805 namespace { 11806 /// Data type for handling buildvector sequences with the reused scalars from 11807 /// other tree entries. 11808 struct ShuffledInsertData { 11809 /// List of insertelements to be replaced by shuffles. 11810 SmallVector<InsertElementInst *> InsertElements; 11811 /// The parent vectors and shuffle mask for the given list of inserts. 11812 MapVector<Value *, SmallVector<int>> ValueMasks; 11813 }; 11814 } // namespace 11815 11816 Value *BoUpSLP::vectorizeTree( 11817 const ExtraValueToDebugLocsMap &ExternallyUsedValues, 11818 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 11819 Instruction *ReductionRoot) { 11820 // All blocks must be scheduled before any instructions are inserted. 11821 for (auto &BSIter : BlocksSchedules) { 11822 scheduleBlock(BSIter.second.get()); 11823 } 11824 // Clean Entry-to-LastInstruction table. It can be affected after scheduling, 11825 // need to rebuild it. 11826 EntryToLastInstruction.clear(); 11827 11828 if (ReductionRoot) 11829 Builder.SetInsertPoint(ReductionRoot->getParent(), 11830 ReductionRoot->getIterator()); 11831 else 11832 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11833 11834 // Postpone emission of PHIs operands to avoid cyclic dependencies issues. 11835 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true); 11836 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) 11837 if (TE->State == TreeEntry::Vectorize && 11838 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() && 11839 TE->VectorizedValue) 11840 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false); 11841 // Run through the list of postponed gathers and emit them, replacing the temp 11842 // emitted allocas with actual vector instructions. 11843 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef(); 11844 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues; 11845 for (const TreeEntry *E : PostponedNodes) { 11846 auto *TE = const_cast<TreeEntry *>(E); 11847 if (auto *VecTE = getTreeEntry(TE->Scalars.front())) 11848 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand( 11849 TE->UserTreeIndices.front().EdgeIdx))) 11850 // Found gather node which is absolutely the same as one of the 11851 // vectorized nodes. It may happen after reordering. 11852 continue; 11853 auto *PrevVec = cast<Instruction>(TE->VectorizedValue); 11854 TE->VectorizedValue = nullptr; 11855 auto *UserI = 11856 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue); 11857 // If user is a PHI node, its vector code have to be inserted right before 11858 // block terminator. Since the node was delayed, there were some unresolved 11859 // dependencies at the moment when stab instruction was emitted. In a case 11860 // when any of these dependencies turn out an operand of another PHI, coming 11861 // from this same block, position of a stab instruction will become invalid. 11862 // The is because source vector that supposed to feed this gather node was 11863 // inserted at the end of the block [after stab instruction]. So we need 11864 // to adjust insertion point again to the end of block. 11865 if (isa<PHINode>(UserI)) { 11866 // Insert before all users. 11867 Instruction *InsertPt = PrevVec->getParent()->getTerminator(); 11868 for (User *U : PrevVec->users()) { 11869 if (U == UserI) 11870 continue; 11871 auto *UI = dyn_cast<Instruction>(U); 11872 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent()) 11873 continue; 11874 if (UI->comesBefore(InsertPt)) 11875 InsertPt = UI; 11876 } 11877 Builder.SetInsertPoint(InsertPt); 11878 } else { 11879 Builder.SetInsertPoint(PrevVec); 11880 } 11881 Builder.SetCurrentDebugLocation(UserI->getDebugLoc()); 11882 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false); 11883 PrevVec->replaceAllUsesWith(Vec); 11884 PostponedValues.try_emplace(Vec).first->second.push_back(TE); 11885 // Replace the stub vector node, if it was used before for one of the 11886 // buildvector nodes already. 11887 auto It = PostponedValues.find(PrevVec); 11888 if (It != PostponedValues.end()) { 11889 for (TreeEntry *VTE : It->getSecond()) 11890 VTE->VectorizedValue = Vec; 11891 } 11892 eraseInstruction(PrevVec); 11893 } 11894 11895 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 11896 << " values .\n"); 11897 11898 SmallVector<ShuffledInsertData> ShuffledInserts; 11899 // Maps vector instruction to original insertelement instruction 11900 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 11901 // Maps extract Scalar to the corresponding extractelement instruction in the 11902 // basic block. Only one extractelement per block should be emitted. 11903 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs; 11904 SmallDenseSet<Value *, 4> UsedInserts; 11905 DenseMap<Value *, Value *> VectorCasts; 11906 // Extract all of the elements with the external uses. 11907 for (const auto &ExternalUse : ExternalUses) { 11908 Value *Scalar = ExternalUse.Scalar; 11909 llvm::User *User = ExternalUse.User; 11910 11911 // Skip users that we already RAUW. This happens when one instruction 11912 // has multiple uses of the same value. 11913 if (User && !is_contained(Scalar->users(), User)) 11914 continue; 11915 TreeEntry *E = getTreeEntry(Scalar); 11916 assert(E && "Invalid scalar"); 11917 assert(E->State != TreeEntry::NeedToGather && 11918 "Extracting from a gather list"); 11919 // Non-instruction pointers are not deleted, just skip them. 11920 if (E->getOpcode() == Instruction::GetElementPtr && 11921 !isa<GetElementPtrInst>(Scalar)) 11922 continue; 11923 11924 Value *Vec = E->VectorizedValue; 11925 assert(Vec && "Can't find vectorizable value"); 11926 11927 Value *Lane = Builder.getInt32(ExternalUse.Lane); 11928 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 11929 if (Scalar->getType() != Vec->getType()) { 11930 Value *Ex = nullptr; 11931 auto It = ScalarToEEs.find(Scalar); 11932 if (It != ScalarToEEs.end()) { 11933 // No need to emit many extracts, just move the only one in the 11934 // current block. 11935 auto EEIt = It->second.find(Builder.GetInsertBlock()); 11936 if (EEIt != It->second.end()) { 11937 Instruction *I = EEIt->second; 11938 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 11939 Builder.GetInsertPoint()->comesBefore(I)) 11940 I->moveBefore(*Builder.GetInsertPoint()->getParent(), 11941 Builder.GetInsertPoint()); 11942 Ex = I; 11943 } 11944 } 11945 if (!Ex) { 11946 // "Reuse" the existing extract to improve final codegen. 11947 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 11948 Ex = Builder.CreateExtractElement(ES->getOperand(0), 11949 ES->getOperand(1)); 11950 } else { 11951 Ex = Builder.CreateExtractElement(Vec, Lane); 11952 } 11953 if (auto *I = dyn_cast<Instruction>(Ex)) 11954 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I); 11955 } 11956 // The then branch of the previous if may produce constants, since 0 11957 // operand might be a constant. 11958 if (auto *ExI = dyn_cast<Instruction>(Ex)) { 11959 GatherShuffleExtractSeq.insert(ExI); 11960 CSEBlocks.insert(ExI->getParent()); 11961 } 11962 // If necessary, sign-extend or zero-extend ScalarRoot 11963 // to the larger type. 11964 if (Scalar->getType() != Ex->getType()) 11965 return Builder.CreateIntCast(Ex, Scalar->getType(), 11966 MinBWs.find(E)->second.second); 11967 return Ex; 11968 } 11969 assert(isa<FixedVectorType>(Scalar->getType()) && 11970 isa<InsertElementInst>(Scalar) && 11971 "In-tree scalar of vector type is not insertelement?"); 11972 auto *IE = cast<InsertElementInst>(Scalar); 11973 VectorToInsertElement.try_emplace(Vec, IE); 11974 return Vec; 11975 }; 11976 // If User == nullptr, the Scalar is used as extra arg. Generate 11977 // ExtractElement instruction and update the record for this scalar in 11978 // ExternallyUsedValues. 11979 if (!User) { 11980 assert(ExternallyUsedValues.count(Scalar) && 11981 "Scalar with nullptr as an external user must be registered in " 11982 "ExternallyUsedValues map"); 11983 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 11984 if (auto *PHI = dyn_cast<PHINode>(VecI)) 11985 Builder.SetInsertPoint(PHI->getParent(), 11986 PHI->getParent()->getFirstNonPHIIt()); 11987 else 11988 Builder.SetInsertPoint(VecI->getParent(), 11989 std::next(VecI->getIterator())); 11990 } else { 11991 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11992 } 11993 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 11994 // Required to update internally referenced instructions. 11995 Scalar->replaceAllUsesWith(NewInst); 11996 ReplacedExternals.emplace_back(Scalar, NewInst); 11997 continue; 11998 } 11999 12000 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 12001 // Skip if the scalar is another vector op or Vec is not an instruction. 12002 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 12003 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 12004 if (!UsedInserts.insert(VU).second) 12005 continue; 12006 // Need to use original vector, if the root is truncated. 12007 auto BWIt = MinBWs.find(E); 12008 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) { 12009 auto VecIt = VectorCasts.find(Scalar); 12010 if (VecIt == VectorCasts.end()) { 12011 IRBuilder<>::InsertPointGuard Guard(Builder); 12012 if (auto *IVec = dyn_cast<Instruction>(Vec)) 12013 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction()); 12014 Vec = Builder.CreateIntCast(Vec, VU->getType(), 12015 BWIt->second.second); 12016 VectorCasts.try_emplace(Scalar, Vec); 12017 } else { 12018 Vec = VecIt->second; 12019 } 12020 } 12021 12022 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 12023 if (InsertIdx) { 12024 auto *It = 12025 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 12026 // Checks if 2 insertelements are from the same buildvector. 12027 InsertElementInst *VecInsert = Data.InsertElements.front(); 12028 return areTwoInsertFromSameBuildVector( 12029 VU, VecInsert, 12030 [](InsertElementInst *II) { return II->getOperand(0); }); 12031 }); 12032 unsigned Idx = *InsertIdx; 12033 if (It == ShuffledInserts.end()) { 12034 (void)ShuffledInserts.emplace_back(); 12035 It = std::next(ShuffledInserts.begin(), 12036 ShuffledInserts.size() - 1); 12037 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12038 if (Mask.empty()) 12039 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12040 // Find the insertvector, vectorized in tree, if any. 12041 Value *Base = VU; 12042 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 12043 if (IEBase != User && 12044 (!IEBase->hasOneUse() || 12045 getInsertIndex(IEBase).value_or(Idx) == Idx)) 12046 break; 12047 // Build the mask for the vectorized insertelement instructions. 12048 if (const TreeEntry *E = getTreeEntry(IEBase)) { 12049 do { 12050 IEBase = cast<InsertElementInst>(Base); 12051 int IEIdx = *getInsertIndex(IEBase); 12052 assert(Mask[Idx] == PoisonMaskElem && 12053 "InsertElementInstruction used already."); 12054 Mask[IEIdx] = IEIdx; 12055 Base = IEBase->getOperand(0); 12056 } while (E == getTreeEntry(Base)); 12057 break; 12058 } 12059 Base = cast<InsertElementInst>(Base)->getOperand(0); 12060 // After the vectorization the def-use chain has changed, need 12061 // to look through original insertelement instructions, if they 12062 // get replaced by vector instructions. 12063 auto It = VectorToInsertElement.find(Base); 12064 if (It != VectorToInsertElement.end()) 12065 Base = It->second; 12066 } 12067 } 12068 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12069 if (Mask.empty()) 12070 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12071 Mask[Idx] = ExternalUse.Lane; 12072 It->InsertElements.push_back(cast<InsertElementInst>(User)); 12073 continue; 12074 } 12075 } 12076 } 12077 } 12078 12079 // Generate extracts for out-of-tree users. 12080 // Find the insertion point for the extractelement lane. 12081 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 12082 if (PHINode *PH = dyn_cast<PHINode>(User)) { 12083 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 12084 if (PH->getIncomingValue(I) == Scalar) { 12085 Instruction *IncomingTerminator = 12086 PH->getIncomingBlock(I)->getTerminator(); 12087 if (isa<CatchSwitchInst>(IncomingTerminator)) { 12088 Builder.SetInsertPoint(VecI->getParent(), 12089 std::next(VecI->getIterator())); 12090 } else { 12091 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator()); 12092 } 12093 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12094 PH->setOperand(I, NewInst); 12095 } 12096 } 12097 } else { 12098 Builder.SetInsertPoint(cast<Instruction>(User)); 12099 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12100 User->replaceUsesOfWith(Scalar, NewInst); 12101 } 12102 } else { 12103 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 12104 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12105 User->replaceUsesOfWith(Scalar, NewInst); 12106 } 12107 12108 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 12109 } 12110 12111 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) { 12112 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 12113 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 12114 int VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 12115 for (int I = 0, E = Mask.size(); I < E; ++I) { 12116 if (Mask[I] < VF) 12117 CombinedMask1[I] = Mask[I]; 12118 else 12119 CombinedMask2[I] = Mask[I] - VF; 12120 } 12121 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 12122 ShuffleBuilder.add(V1, CombinedMask1); 12123 if (V2) 12124 ShuffleBuilder.add(V2, CombinedMask2); 12125 return ShuffleBuilder.finalize(std::nullopt); 12126 }; 12127 12128 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask, 12129 bool ForSingleMask) { 12130 unsigned VF = Mask.size(); 12131 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 12132 if (VF != VecVF) { 12133 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 12134 Vec = CreateShuffle(Vec, nullptr, Mask); 12135 return std::make_pair(Vec, true); 12136 } 12137 if (!ForSingleMask) { 12138 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 12139 for (unsigned I = 0; I < VF; ++I) { 12140 if (Mask[I] != PoisonMaskElem) 12141 ResizeMask[Mask[I]] = Mask[I]; 12142 } 12143 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 12144 } 12145 } 12146 12147 return std::make_pair(Vec, false); 12148 }; 12149 // Perform shuffling of the vectorize tree entries for better handling of 12150 // external extracts. 12151 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 12152 // Find the first and the last instruction in the list of insertelements. 12153 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 12154 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 12155 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 12156 Builder.SetInsertPoint(LastInsert); 12157 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 12158 Value *NewInst = performExtractsShuffleAction<Value>( 12159 MutableArrayRef(Vector.data(), Vector.size()), 12160 FirstInsert->getOperand(0), 12161 [](Value *Vec) { 12162 return cast<VectorType>(Vec->getType()) 12163 ->getElementCount() 12164 .getKnownMinValue(); 12165 }, 12166 ResizeToVF, 12167 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 12168 ArrayRef<Value *> Vals) { 12169 assert((Vals.size() == 1 || Vals.size() == 2) && 12170 "Expected exactly 1 or 2 input values."); 12171 if (Vals.size() == 1) { 12172 // Do not create shuffle if the mask is a simple identity 12173 // non-resizing mask. 12174 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 12175 ->getNumElements() || 12176 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 12177 return CreateShuffle(Vals.front(), nullptr, Mask); 12178 return Vals.front(); 12179 } 12180 return CreateShuffle(Vals.front() ? Vals.front() 12181 : FirstInsert->getOperand(0), 12182 Vals.back(), Mask); 12183 }); 12184 auto It = ShuffledInserts[I].InsertElements.rbegin(); 12185 // Rebuild buildvector chain. 12186 InsertElementInst *II = nullptr; 12187 if (It != ShuffledInserts[I].InsertElements.rend()) 12188 II = *It; 12189 SmallVector<Instruction *> Inserts; 12190 while (It != ShuffledInserts[I].InsertElements.rend()) { 12191 assert(II && "Must be an insertelement instruction."); 12192 if (*It == II) 12193 ++It; 12194 else 12195 Inserts.push_back(cast<Instruction>(II)); 12196 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 12197 } 12198 for (Instruction *II : reverse(Inserts)) { 12199 II->replaceUsesOfWith(II->getOperand(0), NewInst); 12200 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 12201 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 12202 II->moveAfter(NewI); 12203 NewInst = II; 12204 } 12205 LastInsert->replaceAllUsesWith(NewInst); 12206 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 12207 IE->replaceUsesOfWith(IE->getOperand(0), 12208 PoisonValue::get(IE->getOperand(0)->getType())); 12209 IE->replaceUsesOfWith(IE->getOperand(1), 12210 PoisonValue::get(IE->getOperand(1)->getType())); 12211 eraseInstruction(IE); 12212 } 12213 CSEBlocks.insert(LastInsert->getParent()); 12214 } 12215 12216 SmallVector<Instruction *> RemovedInsts; 12217 // For each vectorized value: 12218 for (auto &TEPtr : VectorizableTree) { 12219 TreeEntry *Entry = TEPtr.get(); 12220 12221 // No need to handle users of gathered values. 12222 if (Entry->State == TreeEntry::NeedToGather) 12223 continue; 12224 12225 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 12226 12227 // For each lane: 12228 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 12229 Value *Scalar = Entry->Scalars[Lane]; 12230 12231 if (Entry->getOpcode() == Instruction::GetElementPtr && 12232 !isa<GetElementPtrInst>(Scalar)) 12233 continue; 12234 #ifndef NDEBUG 12235 Type *Ty = Scalar->getType(); 12236 if (!Ty->isVoidTy()) { 12237 for (User *U : Scalar->users()) { 12238 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 12239 12240 // It is legal to delete users in the ignorelist. 12241 assert((getTreeEntry(U) || 12242 (UserIgnoreList && UserIgnoreList->contains(U)) || 12243 (isa_and_nonnull<Instruction>(U) && 12244 isDeleted(cast<Instruction>(U)))) && 12245 "Deleting out-of-tree value"); 12246 } 12247 } 12248 #endif 12249 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 12250 eraseInstruction(cast<Instruction>(Scalar)); 12251 // Retain to-be-deleted instructions for some debug-info 12252 // bookkeeping. NOTE: eraseInstruction only marks the instruction for 12253 // deletion - instructions are not deleted until later. 12254 RemovedInsts.push_back(cast<Instruction>(Scalar)); 12255 } 12256 } 12257 12258 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the 12259 // new vector instruction. 12260 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue)) 12261 V->mergeDIAssignID(RemovedInsts); 12262 12263 Builder.ClearInsertionPoint(); 12264 InstrElementSize.clear(); 12265 12266 return VectorizableTree[0]->VectorizedValue; 12267 } 12268 12269 void BoUpSLP::optimizeGatherSequence() { 12270 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size() 12271 << " gather sequences instructions.\n"); 12272 // LICM InsertElementInst sequences. 12273 for (Instruction *I : GatherShuffleExtractSeq) { 12274 if (isDeleted(I)) 12275 continue; 12276 12277 // Check if this block is inside a loop. 12278 Loop *L = LI->getLoopFor(I->getParent()); 12279 if (!L) 12280 continue; 12281 12282 // Check if it has a preheader. 12283 BasicBlock *PreHeader = L->getLoopPreheader(); 12284 if (!PreHeader) 12285 continue; 12286 12287 // If the vector or the element that we insert into it are 12288 // instructions that are defined in this basic block then we can't 12289 // hoist this instruction. 12290 if (any_of(I->operands(), [L](Value *V) { 12291 auto *OpI = dyn_cast<Instruction>(V); 12292 return OpI && L->contains(OpI); 12293 })) 12294 continue; 12295 12296 // We can hoist this instruction. Move it to the pre-header. 12297 I->moveBefore(PreHeader->getTerminator()); 12298 CSEBlocks.insert(PreHeader); 12299 } 12300 12301 // Make a list of all reachable blocks in our CSE queue. 12302 SmallVector<const DomTreeNode *, 8> CSEWorkList; 12303 CSEWorkList.reserve(CSEBlocks.size()); 12304 for (BasicBlock *BB : CSEBlocks) 12305 if (DomTreeNode *N = DT->getNode(BB)) { 12306 assert(DT->isReachableFromEntry(N)); 12307 CSEWorkList.push_back(N); 12308 } 12309 12310 // Sort blocks by domination. This ensures we visit a block after all blocks 12311 // dominating it are visited. 12312 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 12313 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 12314 "Different nodes should have different DFS numbers"); 12315 return A->getDFSNumIn() < B->getDFSNumIn(); 12316 }); 12317 12318 // Less defined shuffles can be replaced by the more defined copies. 12319 // Between two shuffles one is less defined if it has the same vector operands 12320 // and its mask indeces are the same as in the first one or undefs. E.g. 12321 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 12322 // poison, <0, 0, 0, 0>. 12323 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 12324 SmallVectorImpl<int> &NewMask) { 12325 if (I1->getType() != I2->getType()) 12326 return false; 12327 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 12328 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 12329 if (!SI1 || !SI2) 12330 return I1->isIdenticalTo(I2); 12331 if (SI1->isIdenticalTo(SI2)) 12332 return true; 12333 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 12334 if (SI1->getOperand(I) != SI2->getOperand(I)) 12335 return false; 12336 // Check if the second instruction is more defined than the first one. 12337 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 12338 ArrayRef<int> SM1 = SI1->getShuffleMask(); 12339 // Count trailing undefs in the mask to check the final number of used 12340 // registers. 12341 unsigned LastUndefsCnt = 0; 12342 for (int I = 0, E = NewMask.size(); I < E; ++I) { 12343 if (SM1[I] == PoisonMaskElem) 12344 ++LastUndefsCnt; 12345 else 12346 LastUndefsCnt = 0; 12347 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem && 12348 NewMask[I] != SM1[I]) 12349 return false; 12350 if (NewMask[I] == PoisonMaskElem) 12351 NewMask[I] = SM1[I]; 12352 } 12353 // Check if the last undefs actually change the final number of used vector 12354 // registers. 12355 return SM1.size() - LastUndefsCnt > 1 && 12356 TTI->getNumberOfParts(SI1->getType()) == 12357 TTI->getNumberOfParts( 12358 FixedVectorType::get(SI1->getType()->getElementType(), 12359 SM1.size() - LastUndefsCnt)); 12360 }; 12361 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 12362 // instructions. TODO: We can further optimize this scan if we split the 12363 // instructions into different buckets based on the insert lane. 12364 SmallVector<Instruction *, 16> Visited; 12365 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 12366 assert(*I && 12367 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 12368 "Worklist not sorted properly!"); 12369 BasicBlock *BB = (*I)->getBlock(); 12370 // For all instructions in blocks containing gather sequences: 12371 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 12372 if (isDeleted(&In)) 12373 continue; 12374 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) && 12375 !GatherShuffleExtractSeq.contains(&In)) 12376 continue; 12377 12378 // Check if we can replace this instruction with any of the 12379 // visited instructions. 12380 bool Replaced = false; 12381 for (Instruction *&V : Visited) { 12382 SmallVector<int> NewMask; 12383 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 12384 DT->dominates(V->getParent(), In.getParent())) { 12385 In.replaceAllUsesWith(V); 12386 eraseInstruction(&In); 12387 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 12388 if (!NewMask.empty()) 12389 SI->setShuffleMask(NewMask); 12390 Replaced = true; 12391 break; 12392 } 12393 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 12394 GatherShuffleExtractSeq.contains(V) && 12395 IsIdenticalOrLessDefined(V, &In, NewMask) && 12396 DT->dominates(In.getParent(), V->getParent())) { 12397 In.moveAfter(V); 12398 V->replaceAllUsesWith(&In); 12399 eraseInstruction(V); 12400 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 12401 if (!NewMask.empty()) 12402 SI->setShuffleMask(NewMask); 12403 V = &In; 12404 Replaced = true; 12405 break; 12406 } 12407 } 12408 if (!Replaced) { 12409 assert(!is_contained(Visited, &In)); 12410 Visited.push_back(&In); 12411 } 12412 } 12413 } 12414 CSEBlocks.clear(); 12415 GatherShuffleExtractSeq.clear(); 12416 } 12417 12418 BoUpSLP::ScheduleData * 12419 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 12420 ScheduleData *Bundle = nullptr; 12421 ScheduleData *PrevInBundle = nullptr; 12422 for (Value *V : VL) { 12423 if (doesNotNeedToBeScheduled(V)) 12424 continue; 12425 ScheduleData *BundleMember = getScheduleData(V); 12426 assert(BundleMember && 12427 "no ScheduleData for bundle member " 12428 "(maybe not in same basic block)"); 12429 assert(BundleMember->isSchedulingEntity() && 12430 "bundle member already part of other bundle"); 12431 if (PrevInBundle) { 12432 PrevInBundle->NextInBundle = BundleMember; 12433 } else { 12434 Bundle = BundleMember; 12435 } 12436 12437 // Group the instructions to a bundle. 12438 BundleMember->FirstInBundle = Bundle; 12439 PrevInBundle = BundleMember; 12440 } 12441 assert(Bundle && "Failed to find schedule bundle"); 12442 return Bundle; 12443 } 12444 12445 // Groups the instructions to a bundle (which is then a single scheduling entity) 12446 // and schedules instructions until the bundle gets ready. 12447 std::optional<BoUpSLP::ScheduleData *> 12448 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 12449 const InstructionsState &S) { 12450 // No need to schedule PHIs, insertelement, extractelement and extractvalue 12451 // instructions. 12452 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 12453 doesNotNeedToSchedule(VL)) 12454 return nullptr; 12455 12456 // Initialize the instruction bundle. 12457 Instruction *OldScheduleEnd = ScheduleEnd; 12458 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 12459 12460 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 12461 ScheduleData *Bundle) { 12462 // The scheduling region got new instructions at the lower end (or it is a 12463 // new region for the first bundle). This makes it necessary to 12464 // recalculate all dependencies. 12465 // It is seldom that this needs to be done a second time after adding the 12466 // initial bundle to the region. 12467 if (ScheduleEnd != OldScheduleEnd) { 12468 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 12469 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 12470 ReSchedule = true; 12471 } 12472 if (Bundle) { 12473 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 12474 << " in block " << BB->getName() << "\n"); 12475 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 12476 } 12477 12478 if (ReSchedule) { 12479 resetSchedule(); 12480 initialFillReadyList(ReadyInsts); 12481 } 12482 12483 // Now try to schedule the new bundle or (if no bundle) just calculate 12484 // dependencies. As soon as the bundle is "ready" it means that there are no 12485 // cyclic dependencies and we can schedule it. Note that's important that we 12486 // don't "schedule" the bundle yet (see cancelScheduling). 12487 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 12488 !ReadyInsts.empty()) { 12489 ScheduleData *Picked = ReadyInsts.pop_back_val(); 12490 assert(Picked->isSchedulingEntity() && Picked->isReady() && 12491 "must be ready to schedule"); 12492 schedule(Picked, ReadyInsts); 12493 } 12494 }; 12495 12496 // Make sure that the scheduling region contains all 12497 // instructions of the bundle. 12498 for (Value *V : VL) { 12499 if (doesNotNeedToBeScheduled(V)) 12500 continue; 12501 if (!extendSchedulingRegion(V, S)) { 12502 // If the scheduling region got new instructions at the lower end (or it 12503 // is a new region for the first bundle). This makes it necessary to 12504 // recalculate all dependencies. 12505 // Otherwise the compiler may crash trying to incorrectly calculate 12506 // dependencies and emit instruction in the wrong order at the actual 12507 // scheduling. 12508 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 12509 return std::nullopt; 12510 } 12511 } 12512 12513 bool ReSchedule = false; 12514 for (Value *V : VL) { 12515 if (doesNotNeedToBeScheduled(V)) 12516 continue; 12517 ScheduleData *BundleMember = getScheduleData(V); 12518 assert(BundleMember && 12519 "no ScheduleData for bundle member (maybe not in same basic block)"); 12520 12521 // Make sure we don't leave the pieces of the bundle in the ready list when 12522 // whole bundle might not be ready. 12523 ReadyInsts.remove(BundleMember); 12524 12525 if (!BundleMember->IsScheduled) 12526 continue; 12527 // A bundle member was scheduled as single instruction before and now 12528 // needs to be scheduled as part of the bundle. We just get rid of the 12529 // existing schedule. 12530 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 12531 << " was already scheduled\n"); 12532 ReSchedule = true; 12533 } 12534 12535 auto *Bundle = buildBundle(VL); 12536 TryScheduleBundleImpl(ReSchedule, Bundle); 12537 if (!Bundle->isReady()) { 12538 cancelScheduling(VL, S.OpValue); 12539 return std::nullopt; 12540 } 12541 return Bundle; 12542 } 12543 12544 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 12545 Value *OpValue) { 12546 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 12547 doesNotNeedToSchedule(VL)) 12548 return; 12549 12550 if (doesNotNeedToBeScheduled(OpValue)) 12551 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 12552 ScheduleData *Bundle = getScheduleData(OpValue); 12553 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 12554 assert(!Bundle->IsScheduled && 12555 "Can't cancel bundle which is already scheduled"); 12556 assert(Bundle->isSchedulingEntity() && 12557 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 12558 "tried to unbundle something which is not a bundle"); 12559 12560 // Remove the bundle from the ready list. 12561 if (Bundle->isReady()) 12562 ReadyInsts.remove(Bundle); 12563 12564 // Un-bundle: make single instructions out of the bundle. 12565 ScheduleData *BundleMember = Bundle; 12566 while (BundleMember) { 12567 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 12568 BundleMember->FirstInBundle = BundleMember; 12569 ScheduleData *Next = BundleMember->NextInBundle; 12570 BundleMember->NextInBundle = nullptr; 12571 BundleMember->TE = nullptr; 12572 if (BundleMember->unscheduledDepsInBundle() == 0) { 12573 ReadyInsts.insert(BundleMember); 12574 } 12575 BundleMember = Next; 12576 } 12577 } 12578 12579 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 12580 // Allocate a new ScheduleData for the instruction. 12581 if (ChunkPos >= ChunkSize) { 12582 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 12583 ChunkPos = 0; 12584 } 12585 return &(ScheduleDataChunks.back()[ChunkPos++]); 12586 } 12587 12588 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 12589 const InstructionsState &S) { 12590 if (getScheduleData(V, isOneOf(S, V))) 12591 return true; 12592 Instruction *I = dyn_cast<Instruction>(V); 12593 assert(I && "bundle member must be an instruction"); 12594 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 12595 !doesNotNeedToBeScheduled(I) && 12596 "phi nodes/insertelements/extractelements/extractvalues don't need to " 12597 "be scheduled"); 12598 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 12599 ScheduleData *ISD = getScheduleData(I); 12600 if (!ISD) 12601 return false; 12602 assert(isInSchedulingRegion(ISD) && 12603 "ScheduleData not in scheduling region"); 12604 ScheduleData *SD = allocateScheduleDataChunks(); 12605 SD->Inst = I; 12606 SD->init(SchedulingRegionID, S.OpValue); 12607 ExtraScheduleDataMap[I][S.OpValue] = SD; 12608 return true; 12609 }; 12610 if (CheckScheduleForI(I)) 12611 return true; 12612 if (!ScheduleStart) { 12613 // It's the first instruction in the new region. 12614 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 12615 ScheduleStart = I; 12616 ScheduleEnd = I->getNextNode(); 12617 if (isOneOf(S, I) != I) 12618 CheckScheduleForI(I); 12619 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12620 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 12621 return true; 12622 } 12623 // Search up and down at the same time, because we don't know if the new 12624 // instruction is above or below the existing scheduling region. 12625 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted 12626 // against the budget. Otherwise debug info could affect codegen. 12627 BasicBlock::reverse_iterator UpIter = 12628 ++ScheduleStart->getIterator().getReverse(); 12629 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 12630 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 12631 BasicBlock::iterator LowerEnd = BB->end(); 12632 auto IsAssumeLikeIntr = [](const Instruction &I) { 12633 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 12634 return II->isAssumeLikeIntrinsic(); 12635 return false; 12636 }; 12637 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12638 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12639 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 12640 &*DownIter != I) { 12641 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 12642 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 12643 return false; 12644 } 12645 12646 ++UpIter; 12647 ++DownIter; 12648 12649 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12650 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12651 } 12652 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 12653 assert(I->getParent() == ScheduleStart->getParent() && 12654 "Instruction is in wrong basic block."); 12655 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 12656 ScheduleStart = I; 12657 if (isOneOf(S, I) != I) 12658 CheckScheduleForI(I); 12659 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 12660 << "\n"); 12661 return true; 12662 } 12663 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 12664 "Expected to reach top of the basic block or instruction down the " 12665 "lower end."); 12666 assert(I->getParent() == ScheduleEnd->getParent() && 12667 "Instruction is in wrong basic block."); 12668 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 12669 nullptr); 12670 ScheduleEnd = I->getNextNode(); 12671 if (isOneOf(S, I) != I) 12672 CheckScheduleForI(I); 12673 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12674 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 12675 return true; 12676 } 12677 12678 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 12679 Instruction *ToI, 12680 ScheduleData *PrevLoadStore, 12681 ScheduleData *NextLoadStore) { 12682 ScheduleData *CurrentLoadStore = PrevLoadStore; 12683 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 12684 // No need to allocate data for non-schedulable instructions. 12685 if (doesNotNeedToBeScheduled(I)) 12686 continue; 12687 ScheduleData *SD = ScheduleDataMap.lookup(I); 12688 if (!SD) { 12689 SD = allocateScheduleDataChunks(); 12690 ScheduleDataMap[I] = SD; 12691 SD->Inst = I; 12692 } 12693 assert(!isInSchedulingRegion(SD) && 12694 "new ScheduleData already in scheduling region"); 12695 SD->init(SchedulingRegionID, I); 12696 12697 if (I->mayReadOrWriteMemory() && 12698 (!isa<IntrinsicInst>(I) || 12699 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 12700 cast<IntrinsicInst>(I)->getIntrinsicID() != 12701 Intrinsic::pseudoprobe))) { 12702 // Update the linked list of memory accessing instructions. 12703 if (CurrentLoadStore) { 12704 CurrentLoadStore->NextLoadStore = SD; 12705 } else { 12706 FirstLoadStoreInRegion = SD; 12707 } 12708 CurrentLoadStore = SD; 12709 } 12710 12711 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12712 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12713 RegionHasStackSave = true; 12714 } 12715 if (NextLoadStore) { 12716 if (CurrentLoadStore) 12717 CurrentLoadStore->NextLoadStore = NextLoadStore; 12718 } else { 12719 LastLoadStoreInRegion = CurrentLoadStore; 12720 } 12721 } 12722 12723 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 12724 bool InsertInReadyList, 12725 BoUpSLP *SLP) { 12726 assert(SD->isSchedulingEntity()); 12727 12728 SmallVector<ScheduleData *, 10> WorkList; 12729 WorkList.push_back(SD); 12730 12731 while (!WorkList.empty()) { 12732 ScheduleData *SD = WorkList.pop_back_val(); 12733 for (ScheduleData *BundleMember = SD; BundleMember; 12734 BundleMember = BundleMember->NextInBundle) { 12735 assert(isInSchedulingRegion(BundleMember)); 12736 if (BundleMember->hasValidDependencies()) 12737 continue; 12738 12739 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 12740 << "\n"); 12741 BundleMember->Dependencies = 0; 12742 BundleMember->resetUnscheduledDeps(); 12743 12744 // Handle def-use chain dependencies. 12745 if (BundleMember->OpValue != BundleMember->Inst) { 12746 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 12747 BundleMember->Dependencies++; 12748 ScheduleData *DestBundle = UseSD->FirstInBundle; 12749 if (!DestBundle->IsScheduled) 12750 BundleMember->incrementUnscheduledDeps(1); 12751 if (!DestBundle->hasValidDependencies()) 12752 WorkList.push_back(DestBundle); 12753 } 12754 } else { 12755 for (User *U : BundleMember->Inst->users()) { 12756 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 12757 BundleMember->Dependencies++; 12758 ScheduleData *DestBundle = UseSD->FirstInBundle; 12759 if (!DestBundle->IsScheduled) 12760 BundleMember->incrementUnscheduledDeps(1); 12761 if (!DestBundle->hasValidDependencies()) 12762 WorkList.push_back(DestBundle); 12763 } 12764 } 12765 } 12766 12767 auto MakeControlDependent = [&](Instruction *I) { 12768 auto *DepDest = getScheduleData(I); 12769 assert(DepDest && "must be in schedule window"); 12770 DepDest->ControlDependencies.push_back(BundleMember); 12771 BundleMember->Dependencies++; 12772 ScheduleData *DestBundle = DepDest->FirstInBundle; 12773 if (!DestBundle->IsScheduled) 12774 BundleMember->incrementUnscheduledDeps(1); 12775 if (!DestBundle->hasValidDependencies()) 12776 WorkList.push_back(DestBundle); 12777 }; 12778 12779 // Any instruction which isn't safe to speculate at the beginning of the 12780 // block is control dependend on any early exit or non-willreturn call 12781 // which proceeds it. 12782 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 12783 for (Instruction *I = BundleMember->Inst->getNextNode(); 12784 I != ScheduleEnd; I = I->getNextNode()) { 12785 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC)) 12786 continue; 12787 12788 // Add the dependency 12789 MakeControlDependent(I); 12790 12791 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 12792 // Everything past here must be control dependent on I. 12793 break; 12794 } 12795 } 12796 12797 if (RegionHasStackSave) { 12798 // If we have an inalloc alloca instruction, it needs to be scheduled 12799 // after any preceeding stacksave. We also need to prevent any alloca 12800 // from reordering above a preceeding stackrestore. 12801 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 12802 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 12803 for (Instruction *I = BundleMember->Inst->getNextNode(); 12804 I != ScheduleEnd; I = I->getNextNode()) { 12805 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12806 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12807 // Any allocas past here must be control dependent on I, and I 12808 // must be memory dependend on BundleMember->Inst. 12809 break; 12810 12811 if (!isa<AllocaInst>(I)) 12812 continue; 12813 12814 // Add the dependency 12815 MakeControlDependent(I); 12816 } 12817 } 12818 12819 // In addition to the cases handle just above, we need to prevent 12820 // allocas and loads/stores from moving below a stacksave or a 12821 // stackrestore. Avoiding moving allocas below stackrestore is currently 12822 // thought to be conservatism. Moving loads/stores below a stackrestore 12823 // can lead to incorrect code. 12824 if (isa<AllocaInst>(BundleMember->Inst) || 12825 BundleMember->Inst->mayReadOrWriteMemory()) { 12826 for (Instruction *I = BundleMember->Inst->getNextNode(); 12827 I != ScheduleEnd; I = I->getNextNode()) { 12828 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 12829 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12830 continue; 12831 12832 // Add the dependency 12833 MakeControlDependent(I); 12834 break; 12835 } 12836 } 12837 } 12838 12839 // Handle the memory dependencies (if any). 12840 ScheduleData *DepDest = BundleMember->NextLoadStore; 12841 if (!DepDest) 12842 continue; 12843 Instruction *SrcInst = BundleMember->Inst; 12844 assert(SrcInst->mayReadOrWriteMemory() && 12845 "NextLoadStore list for non memory effecting bundle?"); 12846 MemoryLocation SrcLoc = getLocation(SrcInst); 12847 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 12848 unsigned NumAliased = 0; 12849 unsigned DistToSrc = 1; 12850 12851 for (; DepDest; DepDest = DepDest->NextLoadStore) { 12852 assert(isInSchedulingRegion(DepDest)); 12853 12854 // We have two limits to reduce the complexity: 12855 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 12856 // SLP->isAliased (which is the expensive part in this loop). 12857 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 12858 // the whole loop (even if the loop is fast, it's quadratic). 12859 // It's important for the loop break condition (see below) to 12860 // check this limit even between two read-only instructions. 12861 if (DistToSrc >= MaxMemDepDistance || 12862 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 12863 (NumAliased >= AliasedCheckLimit || 12864 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 12865 12866 // We increment the counter only if the locations are aliased 12867 // (instead of counting all alias checks). This gives a better 12868 // balance between reduced runtime and accurate dependencies. 12869 NumAliased++; 12870 12871 DepDest->MemoryDependencies.push_back(BundleMember); 12872 BundleMember->Dependencies++; 12873 ScheduleData *DestBundle = DepDest->FirstInBundle; 12874 if (!DestBundle->IsScheduled) { 12875 BundleMember->incrementUnscheduledDeps(1); 12876 } 12877 if (!DestBundle->hasValidDependencies()) { 12878 WorkList.push_back(DestBundle); 12879 } 12880 } 12881 12882 // Example, explaining the loop break condition: Let's assume our 12883 // starting instruction is i0 and MaxMemDepDistance = 3. 12884 // 12885 // +--------v--v--v 12886 // i0,i1,i2,i3,i4,i5,i6,i7,i8 12887 // +--------^--^--^ 12888 // 12889 // MaxMemDepDistance let us stop alias-checking at i3 and we add 12890 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 12891 // Previously we already added dependencies from i3 to i6,i7,i8 12892 // (because of MaxMemDepDistance). As we added a dependency from 12893 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 12894 // and we can abort this loop at i6. 12895 if (DistToSrc >= 2 * MaxMemDepDistance) 12896 break; 12897 DistToSrc++; 12898 } 12899 } 12900 if (InsertInReadyList && SD->isReady()) { 12901 ReadyInsts.insert(SD); 12902 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 12903 << "\n"); 12904 } 12905 } 12906 } 12907 12908 void BoUpSLP::BlockScheduling::resetSchedule() { 12909 assert(ScheduleStart && 12910 "tried to reset schedule on block which has not been scheduled"); 12911 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 12912 doForAllOpcodes(I, [&](ScheduleData *SD) { 12913 assert(isInSchedulingRegion(SD) && 12914 "ScheduleData not in scheduling region"); 12915 SD->IsScheduled = false; 12916 SD->resetUnscheduledDeps(); 12917 }); 12918 } 12919 ReadyInsts.clear(); 12920 } 12921 12922 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 12923 if (!BS->ScheduleStart) 12924 return; 12925 12926 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 12927 12928 // A key point - if we got here, pre-scheduling was able to find a valid 12929 // scheduling of the sub-graph of the scheduling window which consists 12930 // of all vector bundles and their transitive users. As such, we do not 12931 // need to reschedule anything *outside of* that subgraph. 12932 12933 BS->resetSchedule(); 12934 12935 // For the real scheduling we use a more sophisticated ready-list: it is 12936 // sorted by the original instruction location. This lets the final schedule 12937 // be as close as possible to the original instruction order. 12938 // WARNING: If changing this order causes a correctness issue, that means 12939 // there is some missing dependence edge in the schedule data graph. 12940 struct ScheduleDataCompare { 12941 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 12942 return SD2->SchedulingPriority < SD1->SchedulingPriority; 12943 } 12944 }; 12945 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 12946 12947 // Ensure that all dependency data is updated (for nodes in the sub-graph) 12948 // and fill the ready-list with initial instructions. 12949 int Idx = 0; 12950 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 12951 I = I->getNextNode()) { 12952 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 12953 TreeEntry *SDTE = getTreeEntry(SD->Inst); 12954 (void)SDTE; 12955 assert((isVectorLikeInstWithConstOps(SD->Inst) || 12956 SD->isPartOfBundle() == 12957 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 12958 "scheduler and vectorizer bundle mismatch"); 12959 SD->FirstInBundle->SchedulingPriority = Idx++; 12960 12961 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 12962 BS->calculateDependencies(SD, false, this); 12963 }); 12964 } 12965 BS->initialFillReadyList(ReadyInsts); 12966 12967 Instruction *LastScheduledInst = BS->ScheduleEnd; 12968 12969 // Do the "real" scheduling. 12970 while (!ReadyInsts.empty()) { 12971 ScheduleData *Picked = *ReadyInsts.begin(); 12972 ReadyInsts.erase(ReadyInsts.begin()); 12973 12974 // Move the scheduled instruction(s) to their dedicated places, if not 12975 // there yet. 12976 for (ScheduleData *BundleMember = Picked; BundleMember; 12977 BundleMember = BundleMember->NextInBundle) { 12978 Instruction *PickedInst = BundleMember->Inst; 12979 if (PickedInst->getNextNode() != LastScheduledInst) 12980 PickedInst->moveBefore(LastScheduledInst); 12981 LastScheduledInst = PickedInst; 12982 } 12983 12984 BS->schedule(Picked, ReadyInsts); 12985 } 12986 12987 // Check that we didn't break any of our invariants. 12988 #ifdef EXPENSIVE_CHECKS 12989 BS->verify(); 12990 #endif 12991 12992 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 12993 // Check that all schedulable entities got scheduled 12994 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 12995 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 12996 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 12997 assert(SD->IsScheduled && "must be scheduled at this point"); 12998 } 12999 }); 13000 } 13001 #endif 13002 13003 // Avoid duplicate scheduling of the block. 13004 BS->ScheduleStart = nullptr; 13005 } 13006 13007 unsigned BoUpSLP::getVectorElementSize(Value *V) { 13008 // If V is a store, just return the width of the stored value (or value 13009 // truncated just before storing) without traversing the expression tree. 13010 // This is the common case. 13011 if (auto *Store = dyn_cast<StoreInst>(V)) 13012 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 13013 13014 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 13015 return getVectorElementSize(IEI->getOperand(1)); 13016 13017 auto E = InstrElementSize.find(V); 13018 if (E != InstrElementSize.end()) 13019 return E->second; 13020 13021 // If V is not a store, we can traverse the expression tree to find loads 13022 // that feed it. The type of the loaded value may indicate a more suitable 13023 // width than V's type. We want to base the vector element size on the width 13024 // of memory operations where possible. 13025 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 13026 SmallPtrSet<Instruction *, 16> Visited; 13027 if (auto *I = dyn_cast<Instruction>(V)) { 13028 Worklist.emplace_back(I, I->getParent()); 13029 Visited.insert(I); 13030 } 13031 13032 // Traverse the expression tree in bottom-up order looking for loads. If we 13033 // encounter an instruction we don't yet handle, we give up. 13034 auto Width = 0u; 13035 while (!Worklist.empty()) { 13036 Instruction *I; 13037 BasicBlock *Parent; 13038 std::tie(I, Parent) = Worklist.pop_back_val(); 13039 13040 // We should only be looking at scalar instructions here. If the current 13041 // instruction has a vector type, skip. 13042 auto *Ty = I->getType(); 13043 if (isa<VectorType>(Ty)) 13044 continue; 13045 13046 // If the current instruction is a load, update MaxWidth to reflect the 13047 // width of the loaded value. 13048 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I)) 13049 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 13050 13051 // Otherwise, we need to visit the operands of the instruction. We only 13052 // handle the interesting cases from buildTree here. If an operand is an 13053 // instruction we haven't yet visited and from the same basic block as the 13054 // user or the use is a PHI node, we add it to the worklist. 13055 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst, 13056 BinaryOperator, UnaryOperator>(I)) { 13057 for (Use &U : I->operands()) 13058 if (auto *J = dyn_cast<Instruction>(U.get())) 13059 if (Visited.insert(J).second && 13060 (isa<PHINode>(I) || J->getParent() == Parent)) 13061 Worklist.emplace_back(J, J->getParent()); 13062 } else { 13063 break; 13064 } 13065 } 13066 13067 // If we didn't encounter a memory access in the expression tree, or if we 13068 // gave up for some reason, just return the width of V. Otherwise, return the 13069 // maximum width we found. 13070 if (!Width) { 13071 if (auto *CI = dyn_cast<CmpInst>(V)) 13072 V = CI->getOperand(0); 13073 Width = DL->getTypeSizeInBits(V->getType()); 13074 } 13075 13076 for (Instruction *I : Visited) 13077 InstrElementSize[I] = Width; 13078 13079 return Width; 13080 } 13081 13082 // Determine if a value V in a vectorizable expression Expr can be demoted to a 13083 // smaller type with a truncation. We collect the values that will be demoted 13084 // in ToDemote and additional roots that require investigating in Roots. 13085 bool BoUpSLP::collectValuesToDemote( 13086 Value *V, SmallVectorImpl<Value *> &ToDemote, 13087 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 13088 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const { 13089 // We can always demote constants. 13090 if (isa<Constant>(V)) 13091 return true; 13092 13093 // If the value is not a vectorized instruction in the expression with only 13094 // one use, it cannot be demoted. 13095 auto *I = dyn_cast<Instruction>(V); 13096 if (!I || !I->hasOneUse() || !getTreeEntry(I) || !Visited.insert(I).second) 13097 return false; 13098 13099 unsigned Start = 0; 13100 unsigned End = I->getNumOperands(); 13101 switch (I->getOpcode()) { 13102 13103 // We can always demote truncations and extensions. Since truncations can 13104 // seed additional demotion, we save the truncated value. 13105 case Instruction::Trunc: 13106 Roots.push_back(I->getOperand(0)); 13107 break; 13108 case Instruction::ZExt: 13109 case Instruction::SExt: 13110 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0))) 13111 return false; 13112 break; 13113 13114 // We can demote certain binary operations if we can demote both of their 13115 // operands. 13116 case Instruction::Add: 13117 case Instruction::Sub: 13118 case Instruction::Mul: 13119 case Instruction::And: 13120 case Instruction::Or: 13121 case Instruction::Xor: 13122 if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots, 13123 Visited) || 13124 !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots, 13125 Visited)) 13126 return false; 13127 break; 13128 13129 // We can demote selects if we can demote their true and false values. 13130 case Instruction::Select: { 13131 Start = 1; 13132 SelectInst *SI = cast<SelectInst>(I); 13133 if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts, 13134 Roots, Visited) || 13135 !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts, 13136 Roots, Visited)) 13137 return false; 13138 break; 13139 } 13140 13141 // We can demote phis if we can demote all their incoming operands. Note that 13142 // we don't need to worry about cycles since we ensure single use above. 13143 case Instruction::PHI: { 13144 PHINode *PN = cast<PHINode>(I); 13145 for (Value *IncValue : PN->incoming_values()) 13146 if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots, 13147 Visited)) 13148 return false; 13149 break; 13150 } 13151 13152 // Otherwise, conservatively give up. 13153 default: 13154 return false; 13155 } 13156 13157 // Gather demoted constant operands. 13158 for (unsigned Idx : seq<unsigned>(Start, End)) 13159 if (isa<Constant>(I->getOperand(Idx))) 13160 DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx); 13161 // Record the value that we can demote. 13162 ToDemote.push_back(V); 13163 return true; 13164 } 13165 13166 void BoUpSLP::computeMinimumValueSizes() { 13167 // If there are no external uses, the expression tree must be rooted by a 13168 // store. We can't demote in-memory values, so there is nothing to do here. 13169 if (ExternalUses.empty()) 13170 return; 13171 13172 // We only attempt to truncate integer expressions. 13173 auto &TreeRoot = VectorizableTree[0]->Scalars; 13174 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 13175 if (!TreeRootIT) 13176 return; 13177 13178 // Ensure the roots of the vectorizable tree don't form a cycle. 13179 if (!VectorizableTree.front()->UserTreeIndices.empty()) 13180 return; 13181 13182 // Conservatively determine if we can actually truncate the roots of the 13183 // expression. Collect the values that can be demoted in ToDemote and 13184 // additional roots that require investigating in Roots. 13185 SmallVector<Value *, 32> ToDemote; 13186 DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts; 13187 SmallVector<Value *, 4> Roots; 13188 for (auto *Root : TreeRoot) { 13189 DenseSet<Value *> Visited; 13190 if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited)) 13191 return; 13192 } 13193 13194 // The maximum bit width required to represent all the values that can be 13195 // demoted without loss of precision. It would be safe to truncate the roots 13196 // of the expression to this width. 13197 auto MaxBitWidth = 1u; 13198 13199 // We first check if all the bits of the roots are demanded. If they're not, 13200 // we can truncate the roots to this narrower type. 13201 for (auto *Root : TreeRoot) { 13202 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 13203 MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(), 13204 MaxBitWidth); 13205 } 13206 13207 // True if the roots can be zero-extended back to their original type, rather 13208 // than sign-extended. We know that if the leading bits are not demanded, we 13209 // can safely zero-extend. So we initialize IsKnownPositive to True. 13210 bool IsKnownPositive = true; 13211 13212 // If all the bits of the roots are demanded, we can try a little harder to 13213 // compute a narrower type. This can happen, for example, if the roots are 13214 // getelementptr indices. InstCombine promotes these indices to the pointer 13215 // width. Thus, all their bits are technically demanded even though the 13216 // address computation might be vectorized in a smaller type. 13217 // 13218 // We start by looking at each entry that can be demoted. We compute the 13219 // maximum bit width required to store the scalar by using ValueTracking to 13220 // compute the number of high-order bits we can truncate. 13221 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 13222 all_of(TreeRoot, [](Value *V) { 13223 return all_of(V->users(), 13224 [](User *U) { return isa<GetElementPtrInst>(U); }); 13225 })) { 13226 MaxBitWidth = 8u; 13227 13228 // Determine if the sign bit of all the roots is known to be zero. If not, 13229 // IsKnownPositive is set to False. 13230 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 13231 KnownBits Known = computeKnownBits(R, *DL); 13232 return Known.isNonNegative(); 13233 }); 13234 13235 // Determine the maximum number of bits required to store the scalar 13236 // values. 13237 for (auto *Scalar : ToDemote) { 13238 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 13239 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 13240 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 13241 } 13242 13243 // If we can't prove that the sign bit is zero, we must add one to the 13244 // maximum bit width to account for the unknown sign bit. This preserves 13245 // the existing sign bit so we can safely sign-extend the root back to the 13246 // original type. Otherwise, if we know the sign bit is zero, we will 13247 // zero-extend the root instead. 13248 // 13249 // FIXME: This is somewhat suboptimal, as there will be cases where adding 13250 // one to the maximum bit width will yield a larger-than-necessary 13251 // type. In general, we need to add an extra bit only if we can't 13252 // prove that the upper bit of the original type is equal to the 13253 // upper bit of the proposed smaller type. If these two bits are the 13254 // same (either zero or one) we know that sign-extending from the 13255 // smaller type will result in the same value. Here, since we can't 13256 // yet prove this, we are just making the proposed smaller type 13257 // larger to ensure correctness. 13258 if (!IsKnownPositive) 13259 ++MaxBitWidth; 13260 } 13261 13262 // Round MaxBitWidth up to the next power-of-two. 13263 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 13264 13265 // If the maximum bit width we compute is less than the with of the roots' 13266 // type, we can proceed with the narrowing. Otherwise, do nothing. 13267 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 13268 return; 13269 13270 // If we can truncate the root, we must collect additional values that might 13271 // be demoted as a result. That is, those seeded by truncations we will 13272 // modify. 13273 while (!Roots.empty()) { 13274 DenseSet<Value *> Visited; 13275 collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots, 13276 Visited); 13277 } 13278 13279 // Finally, map the values we can demote to the maximum bit with we computed. 13280 for (auto *Scalar : ToDemote) { 13281 auto *TE = getTreeEntry(Scalar); 13282 assert(TE && "Expected vectorized scalar."); 13283 if (MinBWs.contains(TE)) 13284 continue; 13285 bool IsSigned = any_of(TE->Scalars, [&](Value *R) { 13286 KnownBits Known = computeKnownBits(R, *DL); 13287 return !Known.isNonNegative(); 13288 }); 13289 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned); 13290 const auto *I = cast<Instruction>(Scalar); 13291 auto DCIt = DemotedConsts.find(I); 13292 if (DCIt != DemotedConsts.end()) { 13293 for (unsigned Idx : DCIt->getSecond()) { 13294 // Check that all instructions operands are demoted. 13295 if (all_of(TE->Scalars, [&](Value *V) { 13296 auto SIt = DemotedConsts.find(cast<Instruction>(V)); 13297 return SIt != DemotedConsts.end() && 13298 is_contained(SIt->getSecond(), Idx); 13299 })) { 13300 const TreeEntry *CTE = getOperandEntry(TE, Idx); 13301 MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned); 13302 } 13303 } 13304 } 13305 } 13306 } 13307 13308 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 13309 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 13310 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 13311 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 13312 auto *AA = &AM.getResult<AAManager>(F); 13313 auto *LI = &AM.getResult<LoopAnalysis>(F); 13314 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 13315 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 13316 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 13317 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 13318 13319 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 13320 if (!Changed) 13321 return PreservedAnalyses::all(); 13322 13323 PreservedAnalyses PA; 13324 PA.preserveSet<CFGAnalyses>(); 13325 return PA; 13326 } 13327 13328 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 13329 TargetTransformInfo *TTI_, 13330 TargetLibraryInfo *TLI_, AAResults *AA_, 13331 LoopInfo *LI_, DominatorTree *DT_, 13332 AssumptionCache *AC_, DemandedBits *DB_, 13333 OptimizationRemarkEmitter *ORE_) { 13334 if (!RunSLPVectorization) 13335 return false; 13336 SE = SE_; 13337 TTI = TTI_; 13338 TLI = TLI_; 13339 AA = AA_; 13340 LI = LI_; 13341 DT = DT_; 13342 AC = AC_; 13343 DB = DB_; 13344 DL = &F.getParent()->getDataLayout(); 13345 13346 Stores.clear(); 13347 GEPs.clear(); 13348 bool Changed = false; 13349 13350 // If the target claims to have no vector registers don't attempt 13351 // vectorization. 13352 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 13353 LLVM_DEBUG( 13354 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 13355 return false; 13356 } 13357 13358 // Don't vectorize when the attribute NoImplicitFloat is used. 13359 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 13360 return false; 13361 13362 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 13363 13364 // Use the bottom up slp vectorizer to construct chains that start with 13365 // store instructions. 13366 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 13367 13368 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 13369 // delete instructions. 13370 13371 // Update DFS numbers now so that we can use them for ordering. 13372 DT->updateDFSNumbers(); 13373 13374 // Scan the blocks in the function in post order. 13375 for (auto *BB : post_order(&F.getEntryBlock())) { 13376 // Start new block - clear the list of reduction roots. 13377 R.clearReductionData(); 13378 collectSeedInstructions(BB); 13379 13380 // Vectorize trees that end at stores. 13381 if (!Stores.empty()) { 13382 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 13383 << " underlying objects.\n"); 13384 Changed |= vectorizeStoreChains(R); 13385 } 13386 13387 // Vectorize trees that end at reductions. 13388 Changed |= vectorizeChainsInBlock(BB, R); 13389 13390 // Vectorize the index computations of getelementptr instructions. This 13391 // is primarily intended to catch gather-like idioms ending at 13392 // non-consecutive loads. 13393 if (!GEPs.empty()) { 13394 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 13395 << " underlying objects.\n"); 13396 Changed |= vectorizeGEPIndices(BB, R); 13397 } 13398 } 13399 13400 if (Changed) { 13401 R.optimizeGatherSequence(); 13402 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 13403 } 13404 return Changed; 13405 } 13406 13407 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 13408 unsigned Idx, unsigned MinVF) { 13409 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 13410 << "\n"); 13411 const unsigned Sz = R.getVectorElementSize(Chain[0]); 13412 unsigned VF = Chain.size(); 13413 13414 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 13415 return false; 13416 13417 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 13418 << "\n"); 13419 13420 R.buildTree(Chain); 13421 if (R.isTreeTinyAndNotFullyVectorizable()) 13422 return false; 13423 if (R.isLoadCombineCandidate()) 13424 return false; 13425 R.reorderTopToBottom(); 13426 R.reorderBottomToTop(); 13427 R.buildExternalUses(); 13428 13429 R.computeMinimumValueSizes(); 13430 13431 InstructionCost Cost = R.getTreeCost(); 13432 13433 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n"); 13434 if (Cost < -SLPCostThreshold) { 13435 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 13436 13437 using namespace ore; 13438 13439 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 13440 cast<StoreInst>(Chain[0])) 13441 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 13442 << " and with tree size " 13443 << NV("TreeSize", R.getTreeSize())); 13444 13445 R.vectorizeTree(); 13446 return true; 13447 } 13448 13449 return false; 13450 } 13451 13452 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 13453 BoUpSLP &R) { 13454 // We may run into multiple chains that merge into a single chain. We mark the 13455 // stores that we vectorized so that we don't visit the same store twice. 13456 BoUpSLP::ValueSet VectorizedStores; 13457 bool Changed = false; 13458 13459 // Stores the pair of stores (first_store, last_store) in a range, that were 13460 // already tried to be vectorized. Allows to skip the store ranges that were 13461 // already tried to be vectorized but the attempts were unsuccessful. 13462 DenseSet<std::pair<Value *, Value *>> TriedSequences; 13463 struct StoreDistCompare { 13464 bool operator()(const std::pair<unsigned, int> &Op1, 13465 const std::pair<unsigned, int> &Op2) const { 13466 return Op1.second < Op2.second; 13467 } 13468 }; 13469 // A set of pairs (index of store in Stores array ref, Distance of the store 13470 // address relative to base store address in units). 13471 using StoreIndexToDistSet = 13472 std::set<std::pair<unsigned, int>, StoreDistCompare>; 13473 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) { 13474 int PrevDist = -1; 13475 BoUpSLP::ValueList Operands; 13476 // Collect the chain into a list. 13477 for (auto [Idx, Data] : enumerate(Set)) { 13478 if (Operands.empty() || Data.second - PrevDist == 1) { 13479 Operands.push_back(Stores[Data.first]); 13480 PrevDist = Data.second; 13481 if (Idx != Set.size() - 1) 13482 continue; 13483 } 13484 if (Operands.size() <= 1) { 13485 Operands.clear(); 13486 Operands.push_back(Stores[Data.first]); 13487 PrevDist = Data.second; 13488 continue; 13489 } 13490 13491 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 13492 unsigned EltSize = R.getVectorElementSize(Operands[0]); 13493 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize); 13494 13495 unsigned MaxVF = 13496 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts); 13497 auto *Store = cast<StoreInst>(Operands[0]); 13498 Type *StoreTy = Store->getValueOperand()->getType(); 13499 Type *ValueTy = StoreTy; 13500 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 13501 ValueTy = Trunc->getSrcTy(); 13502 unsigned MinVF = TTI->getStoreMinimumVF( 13503 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 13504 13505 if (MaxVF <= MinVF) { 13506 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF 13507 << ") <= " 13508 << "MinVF (" << MinVF << ")\n"); 13509 } 13510 13511 // FIXME: Is division-by-2 the correct step? Should we assert that the 13512 // register size is a power-of-2? 13513 unsigned StartIdx = 0; 13514 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 13515 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 13516 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size); 13517 assert( 13518 all_of( 13519 Slice, 13520 [&](Value *V) { 13521 return cast<StoreInst>(V)->getValueOperand()->getType() == 13522 cast<StoreInst>(Slice.front()) 13523 ->getValueOperand() 13524 ->getType(); 13525 }) && 13526 "Expected all operands of same type."); 13527 if (!VectorizedStores.count(Slice.front()) && 13528 !VectorizedStores.count(Slice.back()) && 13529 TriedSequences.insert(std::make_pair(Slice.front(), Slice.back())) 13530 .second && 13531 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 13532 // Mark the vectorized stores so that we don't vectorize them again. 13533 VectorizedStores.insert(Slice.begin(), Slice.end()); 13534 Changed = true; 13535 // If we vectorized initial block, no need to try to vectorize it 13536 // again. 13537 if (Cnt == StartIdx) 13538 StartIdx += Size; 13539 Cnt += Size; 13540 continue; 13541 } 13542 ++Cnt; 13543 } 13544 // Check if the whole array was vectorized already - exit. 13545 if (StartIdx >= Operands.size()) 13546 break; 13547 } 13548 Operands.clear(); 13549 Operands.push_back(Stores[Data.first]); 13550 PrevDist = Data.second; 13551 } 13552 }; 13553 13554 // Stores pair (first: index of the store into Stores array ref, address of 13555 // which taken as base, second: sorted set of pairs {index, dist}, which are 13556 // indices of stores in the set and their store location distances relative to 13557 // the base address). 13558 13559 // Need to store the index of the very first store separately, since the set 13560 // may be reordered after the insertion and the first store may be moved. This 13561 // container allows to reduce number of calls of getPointersDiff() function. 13562 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores; 13563 // Inserts the specified store SI with the given index Idx to the set of the 13564 // stores. If the store with the same distance is found already - stop 13565 // insertion, try to vectorize already found stores. If some stores from this 13566 // sequence were not vectorized - try to vectorize them with the new store 13567 // later. But this logic is applied only to the stores, that come before the 13568 // previous store with the same distance. 13569 // Example: 13570 // 1. store x, %p 13571 // 2. store y, %p+1 13572 // 3. store z, %p+2 13573 // 4. store a, %p 13574 // 5. store b, %p+3 13575 // - Scan this from the last to first store. The very first bunch of stores is 13576 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores 13577 // vector). 13578 // - The next store in the list - #1 - has the same distance from store #5 as 13579 // the store #4. 13580 // - Try to vectorize sequence of stores 4,2,3,5. 13581 // - If all these stores are vectorized - just drop them. 13582 // - If some of them are not vectorized (say, #3 and #5), do extra analysis. 13583 // - Start new stores sequence. 13584 // The new bunch of stores is {1, {1, 0}}. 13585 // - Add the stores from previous sequence, that were not vectorized. 13586 // Here we consider the stores in the reversed order, rather they are used in 13587 // the IR (Stores are reversed already, see vectorizeStoreChains() function). 13588 // Store #3 can be added -> comes after store #4 with the same distance as 13589 // store #1. 13590 // Store #5 cannot be added - comes before store #4. 13591 // This logic allows to improve the compile time, we assume that the stores 13592 // after previous store with the same distance most likely have memory 13593 // dependencies and no need to waste compile time to try to vectorize them. 13594 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}. 13595 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) { 13596 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) { 13597 std::optional<int> Diff = getPointersDiff( 13598 Stores[Set.first]->getValueOperand()->getType(), 13599 Stores[Set.first]->getPointerOperand(), 13600 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE, 13601 /*StrictCheck=*/true); 13602 if (!Diff) 13603 continue; 13604 auto It = Set.second.find(std::make_pair(Idx, *Diff)); 13605 if (It == Set.second.end()) { 13606 Set.second.emplace(Idx, *Diff); 13607 return; 13608 } 13609 // Try to vectorize the first found set to avoid duplicate analysis. 13610 TryToVectorize(Set.second); 13611 StoreIndexToDistSet PrevSet; 13612 PrevSet.swap(Set.second); 13613 Set.first = Idx; 13614 Set.second.emplace(Idx, 0); 13615 // Insert stores that followed previous match to try to vectorize them 13616 // with this store. 13617 unsigned StartIdx = It->first + 1; 13618 SmallBitVector UsedStores(Idx - StartIdx); 13619 // Distances to previously found dup store (or this store, since they 13620 // store to the same addresses). 13621 SmallVector<int> Dists(Idx - StartIdx, 0); 13622 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) { 13623 // Do not try to vectorize sequences, we already tried. 13624 if (Pair.first <= It->first || 13625 VectorizedStores.contains(Stores[Pair.first])) 13626 break; 13627 unsigned BI = Pair.first - StartIdx; 13628 UsedStores.set(BI); 13629 Dists[BI] = Pair.second - It->second; 13630 } 13631 for (unsigned I = StartIdx; I < Idx; ++I) { 13632 unsigned BI = I - StartIdx; 13633 if (UsedStores.test(BI)) 13634 Set.second.emplace(I, Dists[BI]); 13635 } 13636 return; 13637 } 13638 auto &Res = SortedStores.emplace_back(); 13639 Res.first = Idx; 13640 Res.second.emplace(Idx, 0); 13641 }; 13642 StoreInst *PrevStore = Stores.front(); 13643 for (auto [I, SI] : enumerate(Stores)) { 13644 // Check that we do not try to vectorize stores of different types. 13645 if (PrevStore->getValueOperand()->getType() != 13646 SI->getValueOperand()->getType()) { 13647 for (auto &Set : SortedStores) 13648 TryToVectorize(Set.second); 13649 SortedStores.clear(); 13650 PrevStore = SI; 13651 } 13652 FillStoresSet(I, SI); 13653 } 13654 13655 // Final vectorization attempt. 13656 for (auto &Set : SortedStores) 13657 TryToVectorize(Set.second); 13658 13659 return Changed; 13660 } 13661 13662 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 13663 // Initialize the collections. We will make a single pass over the block. 13664 Stores.clear(); 13665 GEPs.clear(); 13666 13667 // Visit the store and getelementptr instructions in BB and organize them in 13668 // Stores and GEPs according to the underlying objects of their pointer 13669 // operands. 13670 for (Instruction &I : *BB) { 13671 // Ignore store instructions that are volatile or have a pointer operand 13672 // that doesn't point to a scalar type. 13673 if (auto *SI = dyn_cast<StoreInst>(&I)) { 13674 if (!SI->isSimple()) 13675 continue; 13676 if (!isValidElementType(SI->getValueOperand()->getType())) 13677 continue; 13678 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 13679 } 13680 13681 // Ignore getelementptr instructions that have more than one index, a 13682 // constant index, or a pointer operand that doesn't point to a scalar 13683 // type. 13684 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 13685 if (GEP->getNumIndices() != 1) 13686 continue; 13687 Value *Idx = GEP->idx_begin()->get(); 13688 if (isa<Constant>(Idx)) 13689 continue; 13690 if (!isValidElementType(Idx->getType())) 13691 continue; 13692 if (GEP->getType()->isVectorTy()) 13693 continue; 13694 GEPs[GEP->getPointerOperand()].push_back(GEP); 13695 } 13696 } 13697 } 13698 13699 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 13700 bool MaxVFOnly) { 13701 if (VL.size() < 2) 13702 return false; 13703 13704 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 13705 << VL.size() << ".\n"); 13706 13707 // Check that all of the parts are instructions of the same type, 13708 // we permit an alternate opcode via InstructionsState. 13709 InstructionsState S = getSameOpcode(VL, *TLI); 13710 if (!S.getOpcode()) 13711 return false; 13712 13713 Instruction *I0 = cast<Instruction>(S.OpValue); 13714 // Make sure invalid types (including vector type) are rejected before 13715 // determining vectorization factor for scalar instructions. 13716 for (Value *V : VL) { 13717 Type *Ty = V->getType(); 13718 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 13719 // NOTE: the following will give user internal llvm type name, which may 13720 // not be useful. 13721 R.getORE()->emit([&]() { 13722 std::string TypeStr; 13723 llvm::raw_string_ostream rso(TypeStr); 13724 Ty->print(rso); 13725 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 13726 << "Cannot SLP vectorize list: type " 13727 << rso.str() + " is unsupported by vectorizer"; 13728 }); 13729 return false; 13730 } 13731 } 13732 13733 unsigned Sz = R.getVectorElementSize(I0); 13734 unsigned MinVF = R.getMinVF(Sz); 13735 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF); 13736 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 13737 if (MaxVF < 2) { 13738 R.getORE()->emit([&]() { 13739 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 13740 << "Cannot SLP vectorize list: vectorization factor " 13741 << "less than 2 is not supported"; 13742 }); 13743 return false; 13744 } 13745 13746 bool Changed = false; 13747 bool CandidateFound = false; 13748 InstructionCost MinCost = SLPCostThreshold.getValue(); 13749 Type *ScalarTy = VL[0]->getType(); 13750 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 13751 ScalarTy = IE->getOperand(1)->getType(); 13752 13753 unsigned NextInst = 0, MaxInst = VL.size(); 13754 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 13755 // No actual vectorization should happen, if number of parts is the same as 13756 // provided vectorization factor (i.e. the scalar type is used for vector 13757 // code during codegen). 13758 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 13759 if (TTI->getNumberOfParts(VecTy) == VF) 13760 continue; 13761 for (unsigned I = NextInst; I < MaxInst; ++I) { 13762 unsigned ActualVF = std::min(MaxInst - I, VF); 13763 13764 if (!isPowerOf2_32(ActualVF)) 13765 continue; 13766 13767 if (MaxVFOnly && ActualVF < MaxVF) 13768 break; 13769 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2)) 13770 break; 13771 13772 ArrayRef<Value *> Ops = VL.slice(I, ActualVF); 13773 // Check that a previous iteration of this loop did not delete the Value. 13774 if (llvm::any_of(Ops, [&R](Value *V) { 13775 auto *I = dyn_cast<Instruction>(V); 13776 return I && R.isDeleted(I); 13777 })) 13778 continue; 13779 13780 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations " 13781 << "\n"); 13782 13783 R.buildTree(Ops); 13784 if (R.isTreeTinyAndNotFullyVectorizable()) 13785 continue; 13786 R.reorderTopToBottom(); 13787 R.reorderBottomToTop( 13788 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) && 13789 !R.doesRootHaveInTreeUses()); 13790 R.buildExternalUses(); 13791 13792 R.computeMinimumValueSizes(); 13793 InstructionCost Cost = R.getTreeCost(); 13794 CandidateFound = true; 13795 MinCost = std::min(MinCost, Cost); 13796 13797 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 13798 << " for VF=" << ActualVF << "\n"); 13799 if (Cost < -SLPCostThreshold) { 13800 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 13801 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 13802 cast<Instruction>(Ops[0])) 13803 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 13804 << " and with tree size " 13805 << ore::NV("TreeSize", R.getTreeSize())); 13806 13807 R.vectorizeTree(); 13808 // Move to the next bundle. 13809 I += VF - 1; 13810 NextInst = I + 1; 13811 Changed = true; 13812 } 13813 } 13814 } 13815 13816 if (!Changed && CandidateFound) { 13817 R.getORE()->emit([&]() { 13818 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 13819 << "List vectorization was possible but not beneficial with cost " 13820 << ore::NV("Cost", MinCost) << " >= " 13821 << ore::NV("Treshold", -SLPCostThreshold); 13822 }); 13823 } else if (!Changed) { 13824 R.getORE()->emit([&]() { 13825 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 13826 << "Cannot SLP vectorize list: vectorization was impossible" 13827 << " with available vectorization factors"; 13828 }); 13829 } 13830 return Changed; 13831 } 13832 13833 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 13834 if (!I) 13835 return false; 13836 13837 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType())) 13838 return false; 13839 13840 Value *P = I->getParent(); 13841 13842 // Vectorize in current basic block only. 13843 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 13844 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 13845 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 13846 return false; 13847 13848 // First collect all possible candidates 13849 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 13850 Candidates.emplace_back(Op0, Op1); 13851 13852 auto *A = dyn_cast<BinaryOperator>(Op0); 13853 auto *B = dyn_cast<BinaryOperator>(Op1); 13854 // Try to skip B. 13855 if (A && B && B->hasOneUse()) { 13856 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 13857 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 13858 if (B0 && B0->getParent() == P) 13859 Candidates.emplace_back(A, B0); 13860 if (B1 && B1->getParent() == P) 13861 Candidates.emplace_back(A, B1); 13862 } 13863 // Try to skip A. 13864 if (B && A && A->hasOneUse()) { 13865 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 13866 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 13867 if (A0 && A0->getParent() == P) 13868 Candidates.emplace_back(A0, B); 13869 if (A1 && A1->getParent() == P) 13870 Candidates.emplace_back(A1, B); 13871 } 13872 13873 if (Candidates.size() == 1) 13874 return tryToVectorizeList({Op0, Op1}, R); 13875 13876 // We have multiple options. Try to pick the single best. 13877 std::optional<int> BestCandidate = R.findBestRootPair(Candidates); 13878 if (!BestCandidate) 13879 return false; 13880 return tryToVectorizeList( 13881 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R); 13882 } 13883 13884 namespace { 13885 13886 /// Model horizontal reductions. 13887 /// 13888 /// A horizontal reduction is a tree of reduction instructions that has values 13889 /// that can be put into a vector as its leaves. For example: 13890 /// 13891 /// mul mul mul mul 13892 /// \ / \ / 13893 /// + + 13894 /// \ / 13895 /// + 13896 /// This tree has "mul" as its leaf values and "+" as its reduction 13897 /// instructions. A reduction can feed into a store or a binary operation 13898 /// feeding a phi. 13899 /// ... 13900 /// \ / 13901 /// + 13902 /// | 13903 /// phi += 13904 /// 13905 /// Or: 13906 /// ... 13907 /// \ / 13908 /// + 13909 /// | 13910 /// *p = 13911 /// 13912 class HorizontalReduction { 13913 using ReductionOpsType = SmallVector<Value *, 16>; 13914 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 13915 ReductionOpsListType ReductionOps; 13916 /// List of possibly reduced values. 13917 SmallVector<SmallVector<Value *>> ReducedVals; 13918 /// Maps reduced value to the corresponding reduction operation. 13919 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 13920 // Use map vector to make stable output. 13921 MapVector<Instruction *, Value *> ExtraArgs; 13922 WeakTrackingVH ReductionRoot; 13923 /// The type of reduction operation. 13924 RecurKind RdxKind; 13925 /// Checks if the optimization of original scalar identity operations on 13926 /// matched horizontal reductions is enabled and allowed. 13927 bool IsSupportedHorRdxIdentityOp = false; 13928 13929 static bool isCmpSelMinMax(Instruction *I) { 13930 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 13931 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 13932 } 13933 13934 // And/or are potentially poison-safe logical patterns like: 13935 // select x, y, false 13936 // select x, true, y 13937 static bool isBoolLogicOp(Instruction *I) { 13938 return isa<SelectInst>(I) && 13939 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr())); 13940 } 13941 13942 /// Checks if instruction is associative and can be vectorized. 13943 static bool isVectorizable(RecurKind Kind, Instruction *I) { 13944 if (Kind == RecurKind::None) 13945 return false; 13946 13947 // Integer ops that map to select instructions or intrinsics are fine. 13948 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 13949 isBoolLogicOp(I)) 13950 return true; 13951 13952 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 13953 // FP min/max are associative except for NaN and -0.0. We do not 13954 // have to rule out -0.0 here because the intrinsic semantics do not 13955 // specify a fixed result for it. 13956 return I->getFastMathFlags().noNaNs(); 13957 } 13958 13959 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) 13960 return true; 13961 13962 return I->isAssociative(); 13963 } 13964 13965 static Value *getRdxOperand(Instruction *I, unsigned Index) { 13966 // Poison-safe 'or' takes the form: select X, true, Y 13967 // To make that work with the normal operand processing, we skip the 13968 // true value operand. 13969 // TODO: Change the code and data structures to handle this without a hack. 13970 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 13971 return I->getOperand(2); 13972 return I->getOperand(Index); 13973 } 13974 13975 /// Creates reduction operation with the current opcode. 13976 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 13977 Value *RHS, const Twine &Name, bool UseSelect) { 13978 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 13979 bool IsConstant = isConstant(LHS) && isConstant(RHS); 13980 switch (Kind) { 13981 case RecurKind::Or: 13982 if (UseSelect && 13983 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13984 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 13985 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13986 Name); 13987 case RecurKind::And: 13988 if (UseSelect && 13989 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13990 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 13991 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13992 Name); 13993 case RecurKind::Add: 13994 case RecurKind::Mul: 13995 case RecurKind::Xor: 13996 case RecurKind::FAdd: 13997 case RecurKind::FMul: 13998 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13999 Name); 14000 case RecurKind::FMax: 14001 if (IsConstant) 14002 return ConstantFP::get(LHS->getType(), 14003 maxnum(cast<ConstantFP>(LHS)->getValueAPF(), 14004 cast<ConstantFP>(RHS)->getValueAPF())); 14005 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 14006 case RecurKind::FMin: 14007 if (IsConstant) 14008 return ConstantFP::get(LHS->getType(), 14009 minnum(cast<ConstantFP>(LHS)->getValueAPF(), 14010 cast<ConstantFP>(RHS)->getValueAPF())); 14011 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 14012 case RecurKind::FMaximum: 14013 if (IsConstant) 14014 return ConstantFP::get(LHS->getType(), 14015 maximum(cast<ConstantFP>(LHS)->getValueAPF(), 14016 cast<ConstantFP>(RHS)->getValueAPF())); 14017 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); 14018 case RecurKind::FMinimum: 14019 if (IsConstant) 14020 return ConstantFP::get(LHS->getType(), 14021 minimum(cast<ConstantFP>(LHS)->getValueAPF(), 14022 cast<ConstantFP>(RHS)->getValueAPF())); 14023 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); 14024 case RecurKind::SMax: 14025 if (IsConstant || UseSelect) { 14026 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 14027 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14028 } 14029 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 14030 case RecurKind::SMin: 14031 if (IsConstant || UseSelect) { 14032 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 14033 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14034 } 14035 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 14036 case RecurKind::UMax: 14037 if (IsConstant || UseSelect) { 14038 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 14039 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14040 } 14041 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 14042 case RecurKind::UMin: 14043 if (IsConstant || UseSelect) { 14044 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 14045 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14046 } 14047 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 14048 default: 14049 llvm_unreachable("Unknown reduction operation."); 14050 } 14051 } 14052 14053 /// Creates reduction operation with the current opcode with the IR flags 14054 /// from \p ReductionOps, dropping nuw/nsw flags. 14055 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 14056 Value *RHS, const Twine &Name, 14057 const ReductionOpsListType &ReductionOps) { 14058 bool UseSelect = 14059 ReductionOps.size() == 2 || 14060 // Logical or/and. 14061 (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) { 14062 return isa<SelectInst>(V); 14063 })); 14064 assert((!UseSelect || ReductionOps.size() != 2 || 14065 isa<SelectInst>(ReductionOps[1][0])) && 14066 "Expected cmp + select pairs for reduction"); 14067 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 14068 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 14069 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 14070 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr, 14071 /*IncludeWrapFlags=*/false); 14072 propagateIRFlags(Op, ReductionOps[1], nullptr, 14073 /*IncludeWrapFlags=*/false); 14074 return Op; 14075 } 14076 } 14077 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false); 14078 return Op; 14079 } 14080 14081 public: 14082 static RecurKind getRdxKind(Value *V) { 14083 auto *I = dyn_cast<Instruction>(V); 14084 if (!I) 14085 return RecurKind::None; 14086 if (match(I, m_Add(m_Value(), m_Value()))) 14087 return RecurKind::Add; 14088 if (match(I, m_Mul(m_Value(), m_Value()))) 14089 return RecurKind::Mul; 14090 if (match(I, m_And(m_Value(), m_Value())) || 14091 match(I, m_LogicalAnd(m_Value(), m_Value()))) 14092 return RecurKind::And; 14093 if (match(I, m_Or(m_Value(), m_Value())) || 14094 match(I, m_LogicalOr(m_Value(), m_Value()))) 14095 return RecurKind::Or; 14096 if (match(I, m_Xor(m_Value(), m_Value()))) 14097 return RecurKind::Xor; 14098 if (match(I, m_FAdd(m_Value(), m_Value()))) 14099 return RecurKind::FAdd; 14100 if (match(I, m_FMul(m_Value(), m_Value()))) 14101 return RecurKind::FMul; 14102 14103 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 14104 return RecurKind::FMax; 14105 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 14106 return RecurKind::FMin; 14107 14108 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 14109 return RecurKind::FMaximum; 14110 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 14111 return RecurKind::FMinimum; 14112 // This matches either cmp+select or intrinsics. SLP is expected to handle 14113 // either form. 14114 // TODO: If we are canonicalizing to intrinsics, we can remove several 14115 // special-case paths that deal with selects. 14116 if (match(I, m_SMax(m_Value(), m_Value()))) 14117 return RecurKind::SMax; 14118 if (match(I, m_SMin(m_Value(), m_Value()))) 14119 return RecurKind::SMin; 14120 if (match(I, m_UMax(m_Value(), m_Value()))) 14121 return RecurKind::UMax; 14122 if (match(I, m_UMin(m_Value(), m_Value()))) 14123 return RecurKind::UMin; 14124 14125 if (auto *Select = dyn_cast<SelectInst>(I)) { 14126 // Try harder: look for min/max pattern based on instructions producing 14127 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 14128 // During the intermediate stages of SLP, it's very common to have 14129 // pattern like this (since optimizeGatherSequence is run only once 14130 // at the end): 14131 // %1 = extractelement <2 x i32> %a, i32 0 14132 // %2 = extractelement <2 x i32> %a, i32 1 14133 // %cond = icmp sgt i32 %1, %2 14134 // %3 = extractelement <2 x i32> %a, i32 0 14135 // %4 = extractelement <2 x i32> %a, i32 1 14136 // %select = select i1 %cond, i32 %3, i32 %4 14137 CmpInst::Predicate Pred; 14138 Instruction *L1; 14139 Instruction *L2; 14140 14141 Value *LHS = Select->getTrueValue(); 14142 Value *RHS = Select->getFalseValue(); 14143 Value *Cond = Select->getCondition(); 14144 14145 // TODO: Support inverse predicates. 14146 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 14147 if (!isa<ExtractElementInst>(RHS) || 14148 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14149 return RecurKind::None; 14150 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 14151 if (!isa<ExtractElementInst>(LHS) || 14152 !L1->isIdenticalTo(cast<Instruction>(LHS))) 14153 return RecurKind::None; 14154 } else { 14155 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 14156 return RecurKind::None; 14157 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 14158 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 14159 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14160 return RecurKind::None; 14161 } 14162 14163 switch (Pred) { 14164 default: 14165 return RecurKind::None; 14166 case CmpInst::ICMP_SGT: 14167 case CmpInst::ICMP_SGE: 14168 return RecurKind::SMax; 14169 case CmpInst::ICMP_SLT: 14170 case CmpInst::ICMP_SLE: 14171 return RecurKind::SMin; 14172 case CmpInst::ICMP_UGT: 14173 case CmpInst::ICMP_UGE: 14174 return RecurKind::UMax; 14175 case CmpInst::ICMP_ULT: 14176 case CmpInst::ICMP_ULE: 14177 return RecurKind::UMin; 14178 } 14179 } 14180 return RecurKind::None; 14181 } 14182 14183 /// Get the index of the first operand. 14184 static unsigned getFirstOperandIndex(Instruction *I) { 14185 return isCmpSelMinMax(I) ? 1 : 0; 14186 } 14187 14188 private: 14189 /// Total number of operands in the reduction operation. 14190 static unsigned getNumberOfOperands(Instruction *I) { 14191 return isCmpSelMinMax(I) ? 3 : 2; 14192 } 14193 14194 /// Checks if the instruction is in basic block \p BB. 14195 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 14196 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 14197 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) { 14198 auto *Sel = cast<SelectInst>(I); 14199 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 14200 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 14201 } 14202 return I->getParent() == BB; 14203 } 14204 14205 /// Expected number of uses for reduction operations/reduced values. 14206 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 14207 if (IsCmpSelMinMax) { 14208 // SelectInst must be used twice while the condition op must have single 14209 // use only. 14210 if (auto *Sel = dyn_cast<SelectInst>(I)) 14211 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 14212 return I->hasNUses(2); 14213 } 14214 14215 // Arithmetic reduction operation must be used once only. 14216 return I->hasOneUse(); 14217 } 14218 14219 /// Initializes the list of reduction operations. 14220 void initReductionOps(Instruction *I) { 14221 if (isCmpSelMinMax(I)) 14222 ReductionOps.assign(2, ReductionOpsType()); 14223 else 14224 ReductionOps.assign(1, ReductionOpsType()); 14225 } 14226 14227 /// Add all reduction operations for the reduction instruction \p I. 14228 void addReductionOps(Instruction *I) { 14229 if (isCmpSelMinMax(I)) { 14230 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 14231 ReductionOps[1].emplace_back(I); 14232 } else { 14233 ReductionOps[0].emplace_back(I); 14234 } 14235 } 14236 14237 static bool isGoodForReduction(ArrayRef<Value *> Data) { 14238 int Sz = Data.size(); 14239 auto *I = dyn_cast<Instruction>(Data.front()); 14240 return Sz > 1 || isConstant(Data.front()) || 14241 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode())); 14242 } 14243 14244 public: 14245 HorizontalReduction() = default; 14246 14247 /// Try to find a reduction tree. 14248 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root, 14249 ScalarEvolution &SE, const DataLayout &DL, 14250 const TargetLibraryInfo &TLI) { 14251 RdxKind = HorizontalReduction::getRdxKind(Root); 14252 if (!isVectorizable(RdxKind, Root)) 14253 return false; 14254 14255 // Analyze "regular" integer/FP types for reductions - no target-specific 14256 // types or pointers. 14257 Type *Ty = Root->getType(); 14258 if (!isValidElementType(Ty) || Ty->isPointerTy()) 14259 return false; 14260 14261 // Though the ultimate reduction may have multiple uses, its condition must 14262 // have only single use. 14263 if (auto *Sel = dyn_cast<SelectInst>(Root)) 14264 if (!Sel->getCondition()->hasOneUse()) 14265 return false; 14266 14267 ReductionRoot = Root; 14268 14269 // Iterate through all the operands of the possible reduction tree and 14270 // gather all the reduced values, sorting them by their value id. 14271 BasicBlock *BB = Root->getParent(); 14272 bool IsCmpSelMinMax = isCmpSelMinMax(Root); 14273 SmallVector<Instruction *> Worklist(1, Root); 14274 // Checks if the operands of the \p TreeN instruction are also reduction 14275 // operations or should be treated as reduced values or an extra argument, 14276 // which is not part of the reduction. 14277 auto CheckOperands = [&](Instruction *TreeN, 14278 SmallVectorImpl<Value *> &ExtraArgs, 14279 SmallVectorImpl<Value *> &PossibleReducedVals, 14280 SmallVectorImpl<Instruction *> &ReductionOps) { 14281 for (int I = getFirstOperandIndex(TreeN), 14282 End = getNumberOfOperands(TreeN); 14283 I < End; ++I) { 14284 Value *EdgeVal = getRdxOperand(TreeN, I); 14285 ReducedValsToOps[EdgeVal].push_back(TreeN); 14286 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 14287 // Edge has wrong parent - mark as an extra argument. 14288 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 14289 !hasSameParent(EdgeInst, BB)) { 14290 ExtraArgs.push_back(EdgeVal); 14291 continue; 14292 } 14293 // If the edge is not an instruction, or it is different from the main 14294 // reduction opcode or has too many uses - possible reduced value. 14295 // Also, do not try to reduce const values, if the operation is not 14296 // foldable. 14297 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 14298 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 14299 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 14300 !isVectorizable(RdxKind, EdgeInst) || 14301 (R.isAnalyzedReductionRoot(EdgeInst) && 14302 all_of(EdgeInst->operands(), Constant::classof))) { 14303 PossibleReducedVals.push_back(EdgeVal); 14304 continue; 14305 } 14306 ReductionOps.push_back(EdgeInst); 14307 } 14308 }; 14309 // Try to regroup reduced values so that it gets more profitable to try to 14310 // reduce them. Values are grouped by their value ids, instructions - by 14311 // instruction op id and/or alternate op id, plus do extra analysis for 14312 // loads (grouping them by the distabce between pointers) and cmp 14313 // instructions (grouping them by the predicate). 14314 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 14315 PossibleReducedVals; 14316 initReductionOps(Root); 14317 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap; 14318 SmallSet<size_t, 2> LoadKeyUsed; 14319 SmallPtrSet<Value *, 4> DoNotReverseVals; 14320 14321 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) { 14322 Value *Ptr = getUnderlyingObject(LI->getPointerOperand()); 14323 if (LoadKeyUsed.contains(Key)) { 14324 auto LIt = LoadsMap.find(Ptr); 14325 if (LIt != LoadsMap.end()) { 14326 for (LoadInst *RLI : LIt->second) { 14327 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 14328 LI->getType(), LI->getPointerOperand(), DL, SE, 14329 /*StrictCheck=*/true)) 14330 return hash_value(RLI->getPointerOperand()); 14331 } 14332 for (LoadInst *RLI : LIt->second) { 14333 if (arePointersCompatible(RLI->getPointerOperand(), 14334 LI->getPointerOperand(), TLI)) { 14335 hash_code SubKey = hash_value(RLI->getPointerOperand()); 14336 DoNotReverseVals.insert(RLI); 14337 return SubKey; 14338 } 14339 } 14340 if (LIt->second.size() > 2) { 14341 hash_code SubKey = 14342 hash_value(LIt->second.back()->getPointerOperand()); 14343 DoNotReverseVals.insert(LIt->second.back()); 14344 return SubKey; 14345 } 14346 } 14347 } 14348 LoadKeyUsed.insert(Key); 14349 LoadsMap.try_emplace(Ptr).first->second.push_back(LI); 14350 return hash_value(LI->getPointerOperand()); 14351 }; 14352 14353 while (!Worklist.empty()) { 14354 Instruction *TreeN = Worklist.pop_back_val(); 14355 SmallVector<Value *> Args; 14356 SmallVector<Value *> PossibleRedVals; 14357 SmallVector<Instruction *> PossibleReductionOps; 14358 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 14359 // If too many extra args - mark the instruction itself as a reduction 14360 // value, not a reduction operation. 14361 if (Args.size() < 2) { 14362 addReductionOps(TreeN); 14363 // Add extra args. 14364 if (!Args.empty()) { 14365 assert(Args.size() == 1 && "Expected only single argument."); 14366 ExtraArgs[TreeN] = Args.front(); 14367 } 14368 // Add reduction values. The values are sorted for better vectorization 14369 // results. 14370 for (Value *V : PossibleRedVals) { 14371 size_t Key, Idx; 14372 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey, 14373 /*AllowAlternate=*/false); 14374 ++PossibleReducedVals[Key][Idx] 14375 .insert(std::make_pair(V, 0)) 14376 .first->second; 14377 } 14378 Worklist.append(PossibleReductionOps.rbegin(), 14379 PossibleReductionOps.rend()); 14380 } else { 14381 size_t Key, Idx; 14382 std::tie(Key, Idx) = generateKeySubkey(TreeN, &TLI, GenerateLoadsSubkey, 14383 /*AllowAlternate=*/false); 14384 ++PossibleReducedVals[Key][Idx] 14385 .insert(std::make_pair(TreeN, 0)) 14386 .first->second; 14387 } 14388 } 14389 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 14390 // Sort values by the total number of values kinds to start the reduction 14391 // from the longest possible reduced values sequences. 14392 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 14393 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 14394 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 14395 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 14396 It != E; ++It) { 14397 PossibleRedValsVect.emplace_back(); 14398 auto RedValsVect = It->second.takeVector(); 14399 stable_sort(RedValsVect, llvm::less_second()); 14400 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 14401 PossibleRedValsVect.back().append(Data.second, Data.first); 14402 } 14403 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 14404 return P1.size() > P2.size(); 14405 }); 14406 int NewIdx = -1; 14407 for (ArrayRef<Value *> Data : PossibleRedValsVect) { 14408 if (isGoodForReduction(Data) || 14409 (isa<LoadInst>(Data.front()) && NewIdx >= 0 && 14410 isa<LoadInst>(ReducedVals[NewIdx].front()) && 14411 getUnderlyingObject( 14412 cast<LoadInst>(Data.front())->getPointerOperand()) == 14413 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front()) 14414 ->getPointerOperand()))) { 14415 if (NewIdx < 0) { 14416 NewIdx = ReducedVals.size(); 14417 ReducedVals.emplace_back(); 14418 } 14419 if (DoNotReverseVals.contains(Data.front())) 14420 ReducedVals[NewIdx].append(Data.begin(), Data.end()); 14421 else 14422 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend()); 14423 } else { 14424 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend()); 14425 } 14426 } 14427 } 14428 // Sort the reduced values by number of same/alternate opcode and/or pointer 14429 // operand. 14430 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 14431 return P1.size() > P2.size(); 14432 }); 14433 return true; 14434 } 14435 14436 /// Attempt to vectorize the tree found by matchAssociativeReduction. 14437 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI, 14438 const TargetLibraryInfo &TLI) { 14439 constexpr int ReductionLimit = 4; 14440 constexpr unsigned RegMaxNumber = 4; 14441 constexpr unsigned RedValsMaxNumber = 128; 14442 // If there are a sufficient number of reduction values, reduce 14443 // to a nearby power-of-2. We can safely generate oversized 14444 // vectors and rely on the backend to split them to legal sizes. 14445 unsigned NumReducedVals = 14446 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0, 14447 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned { 14448 if (!isGoodForReduction(Vals)) 14449 return Num; 14450 return Num + Vals.size(); 14451 }); 14452 if (NumReducedVals < ReductionLimit && 14453 (!AllowHorRdxIdenityOptimization || 14454 all_of(ReducedVals, [](ArrayRef<Value *> RedV) { 14455 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV); 14456 }))) { 14457 for (ReductionOpsType &RdxOps : ReductionOps) 14458 for (Value *RdxOp : RdxOps) 14459 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 14460 return nullptr; 14461 } 14462 14463 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 14464 14465 // Track the reduced values in case if they are replaced by extractelement 14466 // because of the vectorization. 14467 DenseMap<Value *, WeakTrackingVH> TrackedVals( 14468 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size()); 14469 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 14470 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 14471 ExternallyUsedValues.reserve(ExtraArgs.size() + 1); 14472 // The same extra argument may be used several times, so log each attempt 14473 // to use it. 14474 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 14475 assert(Pair.first && "DebugLoc must be set."); 14476 ExternallyUsedValues[Pair.second].push_back(Pair.first); 14477 TrackedVals.try_emplace(Pair.second, Pair.second); 14478 } 14479 14480 // The compare instruction of a min/max is the insertion point for new 14481 // instructions and may be replaced with a new compare instruction. 14482 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 14483 assert(isa<SelectInst>(RdxRootInst) && 14484 "Expected min/max reduction to have select root instruction"); 14485 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 14486 assert(isa<Instruction>(ScalarCond) && 14487 "Expected min/max reduction to have compare condition"); 14488 return cast<Instruction>(ScalarCond); 14489 }; 14490 14491 // Return new VectorizedTree, based on previous value. 14492 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) { 14493 if (VectorizedTree) { 14494 // Update the final value in the reduction. 14495 Builder.SetCurrentDebugLocation( 14496 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 14497 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) || 14498 (isGuaranteedNotToBePoison(Res) && 14499 !isGuaranteedNotToBePoison(VectorizedTree))) { 14500 auto It = ReducedValsToOps.find(Res); 14501 if (It != ReducedValsToOps.end() && 14502 any_of(It->getSecond(), 14503 [](Instruction *I) { return isBoolLogicOp(I); })) 14504 std::swap(VectorizedTree, Res); 14505 } 14506 14507 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx", 14508 ReductionOps); 14509 } 14510 // Initialize the final value in the reduction. 14511 return Res; 14512 }; 14513 bool AnyBoolLogicOp = 14514 any_of(ReductionOps.back(), [](Value *V) { 14515 return isBoolLogicOp(cast<Instruction>(V)); 14516 }); 14517 // The reduction root is used as the insertion point for new instructions, 14518 // so set it as externally used to prevent it from being deleted. 14519 ExternallyUsedValues[ReductionRoot]; 14520 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() * 14521 ReductionOps.front().size()); 14522 for (ReductionOpsType &RdxOps : ReductionOps) 14523 for (Value *RdxOp : RdxOps) { 14524 if (!RdxOp) 14525 continue; 14526 IgnoreList.insert(RdxOp); 14527 } 14528 // Intersect the fast-math-flags from all reduction operations. 14529 FastMathFlags RdxFMF; 14530 RdxFMF.set(); 14531 for (Value *U : IgnoreList) 14532 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 14533 RdxFMF &= FPMO->getFastMathFlags(); 14534 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 14535 14536 // Need to track reduced vals, they may be changed during vectorization of 14537 // subvectors. 14538 for (ArrayRef<Value *> Candidates : ReducedVals) 14539 for (Value *V : Candidates) 14540 TrackedVals.try_emplace(V, V); 14541 14542 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size()); 14543 // List of the values that were reduced in other trees as part of gather 14544 // nodes and thus requiring extract if fully vectorized in other trees. 14545 SmallPtrSet<Value *, 4> RequiredExtract; 14546 Value *VectorizedTree = nullptr; 14547 bool CheckForReusedReductionOps = false; 14548 // Try to vectorize elements based on their type. 14549 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 14550 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 14551 InstructionsState S = getSameOpcode(OrigReducedVals, TLI); 14552 SmallVector<Value *> Candidates; 14553 Candidates.reserve(2 * OrigReducedVals.size()); 14554 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size()); 14555 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 14556 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 14557 // Check if the reduction value was not overriden by the extractelement 14558 // instruction because of the vectorization and exclude it, if it is not 14559 // compatible with other values. 14560 // Also check if the instruction was folded to constant/other value. 14561 auto *Inst = dyn_cast<Instruction>(RdxVal); 14562 if ((Inst && isVectorLikeInstWithConstOps(Inst) && 14563 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) || 14564 (S.getOpcode() && !Inst)) 14565 continue; 14566 Candidates.push_back(RdxVal); 14567 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 14568 } 14569 bool ShuffledExtracts = false; 14570 // Try to handle shuffled extractelements. 14571 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 14572 I + 1 < E) { 14573 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI); 14574 if (NextS.getOpcode() == Instruction::ExtractElement && 14575 !NextS.isAltShuffle()) { 14576 SmallVector<Value *> CommonCandidates(Candidates); 14577 for (Value *RV : ReducedVals[I + 1]) { 14578 Value *RdxVal = TrackedVals.find(RV)->second; 14579 // Check if the reduction value was not overriden by the 14580 // extractelement instruction because of the vectorization and 14581 // exclude it, if it is not compatible with other values. 14582 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 14583 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 14584 continue; 14585 CommonCandidates.push_back(RdxVal); 14586 TrackedToOrig.try_emplace(RdxVal, RV); 14587 } 14588 SmallVector<int> Mask; 14589 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 14590 ++I; 14591 Candidates.swap(CommonCandidates); 14592 ShuffledExtracts = true; 14593 } 14594 } 14595 } 14596 14597 // Emit code for constant values. 14598 if (AllowHorRdxIdenityOptimization && Candidates.size() > 1 && 14599 allConstant(Candidates)) { 14600 Value *Res = Candidates.front(); 14601 ++VectorizedVals.try_emplace(Candidates.front(), 0).first->getSecond(); 14602 for (Value *VC : ArrayRef(Candidates).drop_front()) { 14603 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps); 14604 ++VectorizedVals.try_emplace(VC, 0).first->getSecond(); 14605 if (auto *ResI = dyn_cast<Instruction>(Res)) 14606 V.analyzedReductionRoot(ResI); 14607 } 14608 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res); 14609 continue; 14610 } 14611 14612 unsigned NumReducedVals = Candidates.size(); 14613 if (NumReducedVals < ReductionLimit && 14614 (NumReducedVals < 2 || !AllowHorRdxIdenityOptimization || 14615 !isSplat(Candidates))) 14616 continue; 14617 14618 // Check if we support repeated scalar values processing (optimization of 14619 // original scalar identity operations on matched horizontal reductions). 14620 IsSupportedHorRdxIdentityOp = 14621 AllowHorRdxIdenityOptimization && RdxKind != RecurKind::Mul && 14622 RdxKind != RecurKind::FMul && RdxKind != RecurKind::FMulAdd; 14623 // Gather same values. 14624 MapVector<Value *, unsigned> SameValuesCounter; 14625 if (IsSupportedHorRdxIdentityOp) 14626 for (Value *V : Candidates) 14627 ++SameValuesCounter.insert(std::make_pair(V, 0)).first->second; 14628 // Used to check if the reduced values used same number of times. In this 14629 // case the compiler may produce better code. E.g. if reduced values are 14630 // aabbccdd (8 x values), then the first node of the tree will have a node 14631 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>. 14632 // Plus, the final reduction will be performed on <8 x aabbccdd>. 14633 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4 14634 // x abcd) * 2. 14635 // Currently it only handles add/fadd/xor. and/or/min/max do not require 14636 // this analysis, other operations may require an extra estimation of 14637 // the profitability. 14638 bool SameScaleFactor = false; 14639 bool OptReusedScalars = IsSupportedHorRdxIdentityOp && 14640 SameValuesCounter.size() != Candidates.size(); 14641 if (OptReusedScalars) { 14642 SameScaleFactor = 14643 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd || 14644 RdxKind == RecurKind::Xor) && 14645 all_of(drop_begin(SameValuesCounter), 14646 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) { 14647 return P.second == SameValuesCounter.front().second; 14648 }); 14649 Candidates.resize(SameValuesCounter.size()); 14650 transform(SameValuesCounter, Candidates.begin(), 14651 [](const auto &P) { return P.first; }); 14652 NumReducedVals = Candidates.size(); 14653 // Have a reduction of the same element. 14654 if (NumReducedVals == 1) { 14655 Value *OrigV = TrackedToOrig.find(Candidates.front())->second; 14656 unsigned Cnt = SameValuesCounter.lookup(OrigV); 14657 Value *RedVal = 14658 emitScaleForReusedOps(Candidates.front(), Builder, Cnt); 14659 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14660 VectorizedVals.try_emplace(OrigV, Cnt); 14661 continue; 14662 } 14663 } 14664 14665 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 14666 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 14667 unsigned MaxElts = 14668 RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize); 14669 14670 unsigned ReduxWidth = std::min<unsigned>( 14671 llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 14672 unsigned Start = 0; 14673 unsigned Pos = Start; 14674 // Restarts vectorization attempt with lower vector factor. 14675 unsigned PrevReduxWidth = ReduxWidth; 14676 bool CheckForReusedReductionOpsLocal = false; 14677 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 14678 &CheckForReusedReductionOpsLocal, 14679 &PrevReduxWidth, &V, 14680 &IgnoreList](bool IgnoreVL = false) { 14681 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 14682 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 14683 // Check if any of the reduction ops are gathered. If so, worth 14684 // trying again with less number of reduction ops. 14685 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 14686 } 14687 ++Pos; 14688 if (Pos < NumReducedVals - ReduxWidth + 1) 14689 return IsAnyRedOpGathered; 14690 Pos = Start; 14691 ReduxWidth /= 2; 14692 return IsAnyRedOpGathered; 14693 }; 14694 bool AnyVectorized = false; 14695 while (Pos < NumReducedVals - ReduxWidth + 1 && 14696 ReduxWidth >= ReductionLimit) { 14697 // Dependency in tree of the reduction ops - drop this attempt, try 14698 // later. 14699 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 14700 Start == 0) { 14701 CheckForReusedReductionOps = true; 14702 break; 14703 } 14704 PrevReduxWidth = ReduxWidth; 14705 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 14706 // Beeing analyzed already - skip. 14707 if (V.areAnalyzedReductionVals(VL)) { 14708 (void)AdjustReducedVals(/*IgnoreVL=*/true); 14709 continue; 14710 } 14711 // Early exit if any of the reduction values were deleted during 14712 // previous vectorization attempts. 14713 if (any_of(VL, [&V](Value *RedVal) { 14714 auto *RedValI = dyn_cast<Instruction>(RedVal); 14715 if (!RedValI) 14716 return false; 14717 return V.isDeleted(RedValI); 14718 })) 14719 break; 14720 V.buildTree(VL, IgnoreList); 14721 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 14722 if (!AdjustReducedVals()) 14723 V.analyzedReductionVals(VL); 14724 continue; 14725 } 14726 if (V.isLoadCombineReductionCandidate(RdxKind)) { 14727 if (!AdjustReducedVals()) 14728 V.analyzedReductionVals(VL); 14729 continue; 14730 } 14731 V.reorderTopToBottom(); 14732 // No need to reorder the root node at all. 14733 V.reorderBottomToTop(/*IgnoreReorder=*/true); 14734 // Keep extracted other reduction values, if they are used in the 14735 // vectorization trees. 14736 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 14737 ExternallyUsedValues); 14738 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 14739 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 14740 continue; 14741 for (Value *V : ReducedVals[Cnt]) 14742 if (isa<Instruction>(V)) 14743 LocalExternallyUsedValues[TrackedVals[V]]; 14744 } 14745 if (!IsSupportedHorRdxIdentityOp) { 14746 // Number of uses of the candidates in the vector of values. 14747 assert(SameValuesCounter.empty() && 14748 "Reused values counter map is not empty"); 14749 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14750 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14751 continue; 14752 Value *V = Candidates[Cnt]; 14753 Value *OrigV = TrackedToOrig.find(V)->second; 14754 ++SameValuesCounter[OrigV]; 14755 } 14756 } 14757 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end()); 14758 // Gather externally used values. 14759 SmallPtrSet<Value *, 4> Visited; 14760 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14761 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14762 continue; 14763 Value *RdxVal = Candidates[Cnt]; 14764 if (!Visited.insert(RdxVal).second) 14765 continue; 14766 // Check if the scalar was vectorized as part of the vectorization 14767 // tree but not the top node. 14768 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) { 14769 LocalExternallyUsedValues[RdxVal]; 14770 continue; 14771 } 14772 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14773 unsigned NumOps = 14774 VectorizedVals.lookup(RdxVal) + SameValuesCounter[OrigV]; 14775 if (NumOps != ReducedValsToOps.find(OrigV)->second.size()) 14776 LocalExternallyUsedValues[RdxVal]; 14777 } 14778 // Do not need the list of reused scalars in regular mode anymore. 14779 if (!IsSupportedHorRdxIdentityOp) 14780 SameValuesCounter.clear(); 14781 for (Value *RdxVal : VL) 14782 if (RequiredExtract.contains(RdxVal)) 14783 LocalExternallyUsedValues[RdxVal]; 14784 // Update LocalExternallyUsedValues for the scalar, replaced by 14785 // extractelement instructions. 14786 for (const std::pair<Value *, Value *> &Pair : ReplacedExternals) { 14787 auto *It = ExternallyUsedValues.find(Pair.first); 14788 if (It == ExternallyUsedValues.end()) 14789 continue; 14790 LocalExternallyUsedValues[Pair.second].append(It->second); 14791 } 14792 V.buildExternalUses(LocalExternallyUsedValues); 14793 14794 V.computeMinimumValueSizes(); 14795 14796 // Estimate cost. 14797 InstructionCost TreeCost = V.getTreeCost(VL); 14798 InstructionCost ReductionCost = 14799 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF); 14800 InstructionCost Cost = TreeCost + ReductionCost; 14801 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 14802 << " for reduction\n"); 14803 if (!Cost.isValid()) 14804 return nullptr; 14805 if (Cost >= -SLPCostThreshold) { 14806 V.getORE()->emit([&]() { 14807 return OptimizationRemarkMissed( 14808 SV_NAME, "HorSLPNotBeneficial", 14809 ReducedValsToOps.find(VL[0])->second.front()) 14810 << "Vectorizing horizontal reduction is possible " 14811 << "but not beneficial with cost " << ore::NV("Cost", Cost) 14812 << " and threshold " 14813 << ore::NV("Threshold", -SLPCostThreshold); 14814 }); 14815 if (!AdjustReducedVals()) 14816 V.analyzedReductionVals(VL); 14817 continue; 14818 } 14819 14820 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 14821 << Cost << ". (HorRdx)\n"); 14822 V.getORE()->emit([&]() { 14823 return OptimizationRemark( 14824 SV_NAME, "VectorizedHorizontalReduction", 14825 ReducedValsToOps.find(VL[0])->second.front()) 14826 << "Vectorized horizontal reduction with cost " 14827 << ore::NV("Cost", Cost) << " and with tree size " 14828 << ore::NV("TreeSize", V.getTreeSize()); 14829 }); 14830 14831 Builder.setFastMathFlags(RdxFMF); 14832 14833 // Emit a reduction. If the root is a select (min/max idiom), the insert 14834 // point is the compare condition of that select. 14835 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 14836 Instruction *InsertPt = RdxRootInst; 14837 if (IsCmpSelMinMax) 14838 InsertPt = GetCmpForMinMaxReduction(RdxRootInst); 14839 14840 // Vectorize a tree. 14841 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues, 14842 ReplacedExternals, InsertPt); 14843 14844 Builder.SetInsertPoint(InsertPt); 14845 14846 // To prevent poison from leaking across what used to be sequential, 14847 // safe, scalar boolean logic operations, the reduction operand must be 14848 // frozen. 14849 if ((isBoolLogicOp(RdxRootInst) || 14850 (AnyBoolLogicOp && VL.size() != TrackedVals.size())) && 14851 !isGuaranteedNotToBePoison(VectorizedRoot)) 14852 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 14853 14854 // Emit code to correctly handle reused reduced values, if required. 14855 if (OptReusedScalars && !SameScaleFactor) { 14856 VectorizedRoot = 14857 emitReusedOps(VectorizedRoot, Builder, V.getRootNodeScalars(), 14858 SameValuesCounter, TrackedToOrig); 14859 } 14860 14861 Value *ReducedSubTree = 14862 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 14863 if (ReducedSubTree->getType() != VL.front()->getType()) { 14864 ReducedSubTree = Builder.CreateIntCast( 14865 ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) { 14866 KnownBits Known = computeKnownBits( 14867 R, cast<Instruction>(ReductionOps.front().front()) 14868 ->getModule() 14869 ->getDataLayout()); 14870 return !Known.isNonNegative(); 14871 })); 14872 } 14873 14874 // Improved analysis for add/fadd/xor reductions with same scale factor 14875 // for all operands of reductions. We can emit scalar ops for them 14876 // instead. 14877 if (OptReusedScalars && SameScaleFactor) 14878 ReducedSubTree = emitScaleForReusedOps( 14879 ReducedSubTree, Builder, SameValuesCounter.front().second); 14880 14881 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree); 14882 // Count vectorized reduced values to exclude them from final reduction. 14883 for (Value *RdxVal : VL) { 14884 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14885 if (IsSupportedHorRdxIdentityOp) { 14886 VectorizedVals.try_emplace(OrigV, SameValuesCounter[RdxVal]); 14887 continue; 14888 } 14889 ++VectorizedVals.try_emplace(OrigV, 0).first->getSecond(); 14890 if (!V.isVectorized(RdxVal)) 14891 RequiredExtract.insert(RdxVal); 14892 } 14893 Pos += ReduxWidth; 14894 Start = Pos; 14895 ReduxWidth = llvm::bit_floor(NumReducedVals - Pos); 14896 AnyVectorized = true; 14897 } 14898 if (OptReusedScalars && !AnyVectorized) { 14899 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) { 14900 Value *RedVal = emitScaleForReusedOps(P.first, Builder, P.second); 14901 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14902 Value *OrigV = TrackedToOrig.find(P.first)->second; 14903 VectorizedVals.try_emplace(OrigV, P.second); 14904 } 14905 continue; 14906 } 14907 } 14908 if (VectorizedTree) { 14909 // Reorder operands of bool logical op in the natural order to avoid 14910 // possible problem with poison propagation. If not possible to reorder 14911 // (both operands are originally RHS), emit an extra freeze instruction 14912 // for the LHS operand. 14913 // I.e., if we have original code like this: 14914 // RedOp1 = select i1 ?, i1 LHS, i1 false 14915 // RedOp2 = select i1 RHS, i1 ?, i1 false 14916 14917 // Then, we swap LHS/RHS to create a new op that matches the poison 14918 // semantics of the original code. 14919 14920 // If we have original code like this and both values could be poison: 14921 // RedOp1 = select i1 ?, i1 LHS, i1 false 14922 // RedOp2 = select i1 ?, i1 RHS, i1 false 14923 14924 // Then, we must freeze LHS in the new op. 14925 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS, 14926 Instruction *RedOp1, 14927 Instruction *RedOp2, 14928 bool InitStep) { 14929 if (!AnyBoolLogicOp) 14930 return; 14931 if (isBoolLogicOp(RedOp1) && 14932 ((!InitStep && LHS == VectorizedTree) || 14933 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS))) 14934 return; 14935 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) || 14936 getRdxOperand(RedOp2, 0) == RHS || 14937 isGuaranteedNotToBePoison(RHS))) { 14938 std::swap(LHS, RHS); 14939 return; 14940 } 14941 if (LHS != VectorizedTree) 14942 LHS = Builder.CreateFreeze(LHS); 14943 }; 14944 // Finish the reduction. 14945 // Need to add extra arguments and not vectorized possible reduction 14946 // values. 14947 // Try to avoid dependencies between the scalar remainders after 14948 // reductions. 14949 auto FinalGen = 14950 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals, 14951 bool InitStep) { 14952 unsigned Sz = InstVals.size(); 14953 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 14954 Sz % 2); 14955 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 14956 Instruction *RedOp = InstVals[I + 1].first; 14957 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 14958 Value *RdxVal1 = InstVals[I].second; 14959 Value *StableRdxVal1 = RdxVal1; 14960 auto It1 = TrackedVals.find(RdxVal1); 14961 if (It1 != TrackedVals.end()) 14962 StableRdxVal1 = It1->second; 14963 Value *RdxVal2 = InstVals[I + 1].second; 14964 Value *StableRdxVal2 = RdxVal2; 14965 auto It2 = TrackedVals.find(RdxVal2); 14966 if (It2 != TrackedVals.end()) 14967 StableRdxVal2 = It2->second; 14968 // To prevent poison from leaking across what used to be 14969 // sequential, safe, scalar boolean logic operations, the 14970 // reduction operand must be frozen. 14971 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, 14972 RedOp, InitStep); 14973 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 14974 StableRdxVal2, "op.rdx", ReductionOps); 14975 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 14976 } 14977 if (Sz % 2 == 1) 14978 ExtraReds[Sz / 2] = InstVals.back(); 14979 return ExtraReds; 14980 }; 14981 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 14982 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot), 14983 VectorizedTree); 14984 SmallPtrSet<Value *, 8> Visited; 14985 for (ArrayRef<Value *> Candidates : ReducedVals) { 14986 for (Value *RdxVal : Candidates) { 14987 if (!Visited.insert(RdxVal).second) 14988 continue; 14989 unsigned NumOps = VectorizedVals.lookup(RdxVal); 14990 for (Instruction *RedOp : 14991 ArrayRef(ReducedValsToOps.find(RdxVal)->second) 14992 .drop_back(NumOps)) 14993 ExtraReductions.emplace_back(RedOp, RdxVal); 14994 } 14995 } 14996 for (auto &Pair : ExternallyUsedValues) { 14997 // Add each externally used value to the final reduction. 14998 for (auto *I : Pair.second) 14999 ExtraReductions.emplace_back(I, Pair.first); 15000 } 15001 // Iterate through all not-vectorized reduction values/extra arguments. 15002 bool InitStep = true; 15003 while (ExtraReductions.size() > 1) { 15004 VectorizedTree = ExtraReductions.front().second; 15005 SmallVector<std::pair<Instruction *, Value *>> NewReds = 15006 FinalGen(ExtraReductions, InitStep); 15007 ExtraReductions.swap(NewReds); 15008 InitStep = false; 15009 } 15010 VectorizedTree = ExtraReductions.front().second; 15011 15012 ReductionRoot->replaceAllUsesWith(VectorizedTree); 15013 15014 // The original scalar reduction is expected to have no remaining 15015 // uses outside the reduction tree itself. Assert that we got this 15016 // correct, replace internal uses with undef, and mark for eventual 15017 // deletion. 15018 #ifndef NDEBUG 15019 SmallSet<Value *, 4> IgnoreSet; 15020 for (ArrayRef<Value *> RdxOps : ReductionOps) 15021 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 15022 #endif 15023 for (ArrayRef<Value *> RdxOps : ReductionOps) { 15024 for (Value *Ignore : RdxOps) { 15025 if (!Ignore) 15026 continue; 15027 #ifndef NDEBUG 15028 for (auto *U : Ignore->users()) { 15029 assert(IgnoreSet.count(U) && 15030 "All users must be either in the reduction ops list."); 15031 } 15032 #endif 15033 if (!Ignore->use_empty()) { 15034 Value *Undef = UndefValue::get(Ignore->getType()); 15035 Ignore->replaceAllUsesWith(Undef); 15036 } 15037 V.eraseInstruction(cast<Instruction>(Ignore)); 15038 } 15039 } 15040 } else if (!CheckForReusedReductionOps) { 15041 for (ReductionOpsType &RdxOps : ReductionOps) 15042 for (Value *RdxOp : RdxOps) 15043 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 15044 } 15045 return VectorizedTree; 15046 } 15047 15048 private: 15049 /// Calculate the cost of a reduction. 15050 InstructionCost getReductionCost(TargetTransformInfo *TTI, 15051 ArrayRef<Value *> ReducedVals, 15052 bool IsCmpSelMinMax, unsigned ReduxWidth, 15053 FastMathFlags FMF) { 15054 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 15055 Type *ScalarTy = ReducedVals.front()->getType(); 15056 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 15057 InstructionCost VectorCost = 0, ScalarCost; 15058 // If all of the reduced values are constant, the vector cost is 0, since 15059 // the reduction value can be calculated at the compile time. 15060 bool AllConsts = allConstant(ReducedVals); 15061 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) { 15062 InstructionCost Cost = 0; 15063 // Scalar cost is repeated for N-1 elements. 15064 int Cnt = ReducedVals.size(); 15065 for (Value *RdxVal : ReducedVals) { 15066 if (Cnt == 1) 15067 break; 15068 --Cnt; 15069 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) { 15070 Cost += GenCostFn(); 15071 continue; 15072 } 15073 InstructionCost ScalarCost = 0; 15074 for (User *U : RdxVal->users()) { 15075 auto *RdxOp = cast<Instruction>(U); 15076 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) { 15077 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind); 15078 continue; 15079 } 15080 ScalarCost = InstructionCost::getInvalid(); 15081 break; 15082 } 15083 if (ScalarCost.isValid()) 15084 Cost += ScalarCost; 15085 else 15086 Cost += GenCostFn(); 15087 } 15088 return Cost; 15089 }; 15090 switch (RdxKind) { 15091 case RecurKind::Add: 15092 case RecurKind::Mul: 15093 case RecurKind::Or: 15094 case RecurKind::And: 15095 case RecurKind::Xor: 15096 case RecurKind::FAdd: 15097 case RecurKind::FMul: { 15098 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 15099 if (!AllConsts) 15100 VectorCost = 15101 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 15102 ScalarCost = EvaluateScalarCost([&]() { 15103 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 15104 }); 15105 break; 15106 } 15107 case RecurKind::FMax: 15108 case RecurKind::FMin: 15109 case RecurKind::FMaximum: 15110 case RecurKind::FMinimum: 15111 case RecurKind::SMax: 15112 case RecurKind::SMin: 15113 case RecurKind::UMax: 15114 case RecurKind::UMin: { 15115 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind); 15116 if (!AllConsts) 15117 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind); 15118 ScalarCost = EvaluateScalarCost([&]() { 15119 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF); 15120 return TTI->getIntrinsicInstrCost(ICA, CostKind); 15121 }); 15122 break; 15123 } 15124 default: 15125 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 15126 } 15127 15128 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 15129 << " for reduction of " << shortBundleName(ReducedVals) 15130 << " (It is a splitting reduction)\n"); 15131 return VectorCost - ScalarCost; 15132 } 15133 15134 /// Emit a horizontal reduction of the vectorized value. 15135 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 15136 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 15137 assert(VectorizedValue && "Need to have a vectorized tree node"); 15138 assert(isPowerOf2_32(ReduxWidth) && 15139 "We only handle power-of-two reductions for now"); 15140 assert(RdxKind != RecurKind::FMulAdd && 15141 "A call to the llvm.fmuladd intrinsic is not handled yet"); 15142 15143 ++NumVectorInstructions; 15144 return createSimpleTargetReduction(Builder, VectorizedValue, RdxKind); 15145 } 15146 15147 /// Emits optimized code for unique scalar value reused \p Cnt times. 15148 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15149 unsigned Cnt) { 15150 assert(IsSupportedHorRdxIdentityOp && 15151 "The optimization of matched scalar identity horizontal reductions " 15152 "must be supported."); 15153 switch (RdxKind) { 15154 case RecurKind::Add: { 15155 // res = mul vv, n 15156 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt); 15157 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of " 15158 << VectorizedValue << ". (HorRdx)\n"); 15159 return Builder.CreateMul(VectorizedValue, Scale); 15160 } 15161 case RecurKind::Xor: { 15162 // res = n % 2 ? 0 : vv 15163 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue 15164 << ". (HorRdx)\n"); 15165 if (Cnt % 2 == 0) 15166 return Constant::getNullValue(VectorizedValue->getType()); 15167 return VectorizedValue; 15168 } 15169 case RecurKind::FAdd: { 15170 // res = fmul v, n 15171 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt); 15172 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of " 15173 << VectorizedValue << ". (HorRdx)\n"); 15174 return Builder.CreateFMul(VectorizedValue, Scale); 15175 } 15176 case RecurKind::And: 15177 case RecurKind::Or: 15178 case RecurKind::SMax: 15179 case RecurKind::SMin: 15180 case RecurKind::UMax: 15181 case RecurKind::UMin: 15182 case RecurKind::FMax: 15183 case RecurKind::FMin: 15184 case RecurKind::FMaximum: 15185 case RecurKind::FMinimum: 15186 // res = vv 15187 return VectorizedValue; 15188 case RecurKind::Mul: 15189 case RecurKind::FMul: 15190 case RecurKind::FMulAdd: 15191 case RecurKind::IAnyOf: 15192 case RecurKind::FAnyOf: 15193 case RecurKind::None: 15194 llvm_unreachable("Unexpected reduction kind for repeated scalar."); 15195 } 15196 return nullptr; 15197 } 15198 15199 /// Emits actual operation for the scalar identity values, found during 15200 /// horizontal reduction analysis. 15201 Value *emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15202 ArrayRef<Value *> VL, 15203 const MapVector<Value *, unsigned> &SameValuesCounter, 15204 const DenseMap<Value *, Value *> &TrackedToOrig) { 15205 assert(IsSupportedHorRdxIdentityOp && 15206 "The optimization of matched scalar identity horizontal reductions " 15207 "must be supported."); 15208 switch (RdxKind) { 15209 case RecurKind::Add: { 15210 // root = mul prev_root, <1, 1, n, 1> 15211 SmallVector<Constant *> Vals; 15212 for (Value *V : VL) { 15213 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15214 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false)); 15215 } 15216 auto *Scale = ConstantVector::get(Vals); 15217 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of " 15218 << VectorizedValue << ". (HorRdx)\n"); 15219 return Builder.CreateMul(VectorizedValue, Scale); 15220 } 15221 case RecurKind::And: 15222 case RecurKind::Or: 15223 // No need for multiple or/and(s). 15224 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue 15225 << ". (HorRdx)\n"); 15226 return VectorizedValue; 15227 case RecurKind::SMax: 15228 case RecurKind::SMin: 15229 case RecurKind::UMax: 15230 case RecurKind::UMin: 15231 case RecurKind::FMax: 15232 case RecurKind::FMin: 15233 case RecurKind::FMaximum: 15234 case RecurKind::FMinimum: 15235 // No need for multiple min/max(s) of the same value. 15236 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue 15237 << ". (HorRdx)\n"); 15238 return VectorizedValue; 15239 case RecurKind::Xor: { 15240 // Replace values with even number of repeats with 0, since 15241 // x xor x = 0. 15242 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6, 15243 // 7>, if elements 4th and 6th elements have even number of repeats. 15244 SmallVector<int> Mask( 15245 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(), 15246 PoisonMaskElem); 15247 std::iota(Mask.begin(), Mask.end(), 0); 15248 bool NeedShuffle = false; 15249 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) { 15250 Value *V = VL[I]; 15251 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15252 if (Cnt % 2 == 0) { 15253 Mask[I] = VF; 15254 NeedShuffle = true; 15255 } 15256 } 15257 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I 15258 : Mask) dbgs() 15259 << I << " "; 15260 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n"); 15261 if (NeedShuffle) 15262 VectorizedValue = Builder.CreateShuffleVector( 15263 VectorizedValue, 15264 ConstantVector::getNullValue(VectorizedValue->getType()), Mask); 15265 return VectorizedValue; 15266 } 15267 case RecurKind::FAdd: { 15268 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0> 15269 SmallVector<Constant *> Vals; 15270 for (Value *V : VL) { 15271 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15272 Vals.push_back(ConstantFP::get(V->getType(), Cnt)); 15273 } 15274 auto *Scale = ConstantVector::get(Vals); 15275 return Builder.CreateFMul(VectorizedValue, Scale); 15276 } 15277 case RecurKind::Mul: 15278 case RecurKind::FMul: 15279 case RecurKind::FMulAdd: 15280 case RecurKind::IAnyOf: 15281 case RecurKind::FAnyOf: 15282 case RecurKind::None: 15283 llvm_unreachable("Unexpected reduction kind for reused scalars."); 15284 } 15285 return nullptr; 15286 } 15287 }; 15288 } // end anonymous namespace 15289 15290 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) { 15291 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 15292 return cast<FixedVectorType>(IE->getType())->getNumElements(); 15293 15294 unsigned AggregateSize = 1; 15295 auto *IV = cast<InsertValueInst>(InsertInst); 15296 Type *CurrentType = IV->getType(); 15297 do { 15298 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 15299 for (auto *Elt : ST->elements()) 15300 if (Elt != ST->getElementType(0)) // check homogeneity 15301 return std::nullopt; 15302 AggregateSize *= ST->getNumElements(); 15303 CurrentType = ST->getElementType(0); 15304 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 15305 AggregateSize *= AT->getNumElements(); 15306 CurrentType = AT->getElementType(); 15307 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 15308 AggregateSize *= VT->getNumElements(); 15309 return AggregateSize; 15310 } else if (CurrentType->isSingleValueType()) { 15311 return AggregateSize; 15312 } else { 15313 return std::nullopt; 15314 } 15315 } while (true); 15316 } 15317 15318 static void findBuildAggregate_rec(Instruction *LastInsertInst, 15319 TargetTransformInfo *TTI, 15320 SmallVectorImpl<Value *> &BuildVectorOpds, 15321 SmallVectorImpl<Value *> &InsertElts, 15322 unsigned OperandOffset) { 15323 do { 15324 Value *InsertedOperand = LastInsertInst->getOperand(1); 15325 std::optional<unsigned> OperandIndex = 15326 getInsertIndex(LastInsertInst, OperandOffset); 15327 if (!OperandIndex) 15328 return; 15329 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) { 15330 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 15331 BuildVectorOpds, InsertElts, *OperandIndex); 15332 15333 } else { 15334 BuildVectorOpds[*OperandIndex] = InsertedOperand; 15335 InsertElts[*OperandIndex] = LastInsertInst; 15336 } 15337 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 15338 } while (LastInsertInst != nullptr && 15339 isa<InsertValueInst, InsertElementInst>(LastInsertInst) && 15340 LastInsertInst->hasOneUse()); 15341 } 15342 15343 /// Recognize construction of vectors like 15344 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 15345 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 15346 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 15347 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 15348 /// starting from the last insertelement or insertvalue instruction. 15349 /// 15350 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 15351 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 15352 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 15353 /// 15354 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 15355 /// 15356 /// \return true if it matches. 15357 static bool findBuildAggregate(Instruction *LastInsertInst, 15358 TargetTransformInfo *TTI, 15359 SmallVectorImpl<Value *> &BuildVectorOpds, 15360 SmallVectorImpl<Value *> &InsertElts) { 15361 15362 assert((isa<InsertElementInst>(LastInsertInst) || 15363 isa<InsertValueInst>(LastInsertInst)) && 15364 "Expected insertelement or insertvalue instruction!"); 15365 15366 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 15367 "Expected empty result vectors!"); 15368 15369 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 15370 if (!AggregateSize) 15371 return false; 15372 BuildVectorOpds.resize(*AggregateSize); 15373 InsertElts.resize(*AggregateSize); 15374 15375 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 15376 llvm::erase(BuildVectorOpds, nullptr); 15377 llvm::erase(InsertElts, nullptr); 15378 if (BuildVectorOpds.size() >= 2) 15379 return true; 15380 15381 return false; 15382 } 15383 15384 /// Try and get a reduction instruction from a phi node. 15385 /// 15386 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 15387 /// if they come from either \p ParentBB or a containing loop latch. 15388 /// 15389 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 15390 /// if not possible. 15391 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P, 15392 BasicBlock *ParentBB, LoopInfo *LI) { 15393 // There are situations where the reduction value is not dominated by the 15394 // reduction phi. Vectorizing such cases has been reported to cause 15395 // miscompiles. See PR25787. 15396 auto DominatedReduxValue = [&](Value *R) { 15397 return isa<Instruction>(R) && 15398 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 15399 }; 15400 15401 Instruction *Rdx = nullptr; 15402 15403 // Return the incoming value if it comes from the same BB as the phi node. 15404 if (P->getIncomingBlock(0) == ParentBB) { 15405 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15406 } else if (P->getIncomingBlock(1) == ParentBB) { 15407 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15408 } 15409 15410 if (Rdx && DominatedReduxValue(Rdx)) 15411 return Rdx; 15412 15413 // Otherwise, check whether we have a loop latch to look at. 15414 Loop *BBL = LI->getLoopFor(ParentBB); 15415 if (!BBL) 15416 return nullptr; 15417 BasicBlock *BBLatch = BBL->getLoopLatch(); 15418 if (!BBLatch) 15419 return nullptr; 15420 15421 // There is a loop latch, return the incoming value if it comes from 15422 // that. This reduction pattern occasionally turns up. 15423 if (P->getIncomingBlock(0) == BBLatch) { 15424 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15425 } else if (P->getIncomingBlock(1) == BBLatch) { 15426 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15427 } 15428 15429 if (Rdx && DominatedReduxValue(Rdx)) 15430 return Rdx; 15431 15432 return nullptr; 15433 } 15434 15435 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 15436 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 15437 return true; 15438 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 15439 return true; 15440 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 15441 return true; 15442 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1)))) 15443 return true; 15444 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1)))) 15445 return true; 15446 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 15447 return true; 15448 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 15449 return true; 15450 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 15451 return true; 15452 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 15453 return true; 15454 return false; 15455 } 15456 15457 /// We could have an initial reduction that is not an add. 15458 /// r *= v1 + v2 + v3 + v4 15459 /// In such a case start looking for a tree rooted in the first '+'. 15460 /// \Returns the new root if found, which may be nullptr if not an instruction. 15461 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi, 15462 Instruction *Root) { 15463 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) || 15464 isa<IntrinsicInst>(Root)) && 15465 "Expected binop, select, or intrinsic for reduction matching"); 15466 Value *LHS = 15467 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root)); 15468 Value *RHS = 15469 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1); 15470 if (LHS == Phi) 15471 return dyn_cast<Instruction>(RHS); 15472 if (RHS == Phi) 15473 return dyn_cast<Instruction>(LHS); 15474 return nullptr; 15475 } 15476 15477 /// \p Returns the first operand of \p I that does not match \p Phi. If 15478 /// operand is not an instruction it returns nullptr. 15479 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) { 15480 Value *Op0 = nullptr; 15481 Value *Op1 = nullptr; 15482 if (!matchRdxBop(I, Op0, Op1)) 15483 return nullptr; 15484 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0); 15485 } 15486 15487 /// \Returns true if \p I is a candidate instruction for reduction vectorization. 15488 static bool isReductionCandidate(Instruction *I) { 15489 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value())); 15490 Value *B0 = nullptr, *B1 = nullptr; 15491 bool IsBinop = matchRdxBop(I, B0, B1); 15492 return IsBinop || IsSelect; 15493 } 15494 15495 bool SLPVectorizerPass::vectorizeHorReduction( 15496 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI, 15497 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) { 15498 if (!ShouldVectorizeHor) 15499 return false; 15500 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root); 15501 15502 if (Root->getParent() != BB || isa<PHINode>(Root)) 15503 return false; 15504 15505 // If we can find a secondary reduction root, use that instead. 15506 auto SelectRoot = [&]() { 15507 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) && 15508 HorizontalReduction::getRdxKind(Root) != RecurKind::None) 15509 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root)) 15510 return NewRoot; 15511 return Root; 15512 }; 15513 15514 // Start analysis starting from Root instruction. If horizontal reduction is 15515 // found, try to vectorize it. If it is not a horizontal reduction or 15516 // vectorization is not possible or not effective, and currently analyzed 15517 // instruction is a binary operation, try to vectorize the operands, using 15518 // pre-order DFS traversal order. If the operands were not vectorized, repeat 15519 // the same procedure considering each operand as a possible root of the 15520 // horizontal reduction. 15521 // Interrupt the process if the Root instruction itself was vectorized or all 15522 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 15523 // If a horizintal reduction was not matched or vectorized we collect 15524 // instructions for possible later attempts for vectorization. 15525 std::queue<std::pair<Instruction *, unsigned>> Stack; 15526 Stack.emplace(SelectRoot(), 0); 15527 SmallPtrSet<Value *, 8> VisitedInstrs; 15528 bool Res = false; 15529 auto &&TryToReduce = [this, TTI, &R](Instruction *Inst) -> Value * { 15530 if (R.isAnalyzedReductionRoot(Inst)) 15531 return nullptr; 15532 if (!isReductionCandidate(Inst)) 15533 return nullptr; 15534 HorizontalReduction HorRdx; 15535 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI)) 15536 return nullptr; 15537 return HorRdx.tryToReduce(R, TTI, *TLI); 15538 }; 15539 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) { 15540 if (TryOperandsAsNewSeeds && FutureSeed == Root) { 15541 FutureSeed = getNonPhiOperand(Root, P); 15542 if (!FutureSeed) 15543 return false; 15544 } 15545 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their 15546 // analysis is done separately. 15547 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed)) 15548 PostponedInsts.push_back(FutureSeed); 15549 return true; 15550 }; 15551 15552 while (!Stack.empty()) { 15553 Instruction *Inst; 15554 unsigned Level; 15555 std::tie(Inst, Level) = Stack.front(); 15556 Stack.pop(); 15557 // Do not try to analyze instruction that has already been vectorized. 15558 // This may happen when we vectorize instruction operands on a previous 15559 // iteration while stack was populated before that happened. 15560 if (R.isDeleted(Inst)) 15561 continue; 15562 if (Value *VectorizedV = TryToReduce(Inst)) { 15563 Res = true; 15564 if (auto *I = dyn_cast<Instruction>(VectorizedV)) { 15565 // Try to find another reduction. 15566 Stack.emplace(I, Level); 15567 continue; 15568 } 15569 } else { 15570 // We could not vectorize `Inst` so try to use it as a future seed. 15571 if (!TryAppendToPostponedInsts(Inst)) { 15572 assert(Stack.empty() && "Expected empty stack"); 15573 break; 15574 } 15575 } 15576 15577 // Try to vectorize operands. 15578 // Continue analysis for the instruction from the same basic block only to 15579 // save compile time. 15580 if (++Level < RecursionMaxDepth) 15581 for (auto *Op : Inst->operand_values()) 15582 if (VisitedInstrs.insert(Op).second) 15583 if (auto *I = dyn_cast<Instruction>(Op)) 15584 // Do not try to vectorize CmpInst operands, this is done 15585 // separately. 15586 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 15587 !R.isDeleted(I) && I->getParent() == BB) 15588 Stack.emplace(I, Level); 15589 } 15590 return Res; 15591 } 15592 15593 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root, 15594 BasicBlock *BB, BoUpSLP &R, 15595 TargetTransformInfo *TTI) { 15596 SmallVector<WeakTrackingVH> PostponedInsts; 15597 bool Res = vectorizeHorReduction(P, Root, BB, R, TTI, PostponedInsts); 15598 Res |= tryToVectorize(PostponedInsts, R); 15599 return Res; 15600 } 15601 15602 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts, 15603 BoUpSLP &R) { 15604 bool Res = false; 15605 for (Value *V : Insts) 15606 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst)) 15607 Res |= tryToVectorize(Inst, R); 15608 return Res; 15609 } 15610 15611 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 15612 BasicBlock *BB, BoUpSLP &R) { 15613 if (!R.canMapToVector(IVI->getType())) 15614 return false; 15615 15616 SmallVector<Value *, 16> BuildVectorOpds; 15617 SmallVector<Value *, 16> BuildVectorInsts; 15618 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 15619 return false; 15620 15621 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 15622 // Aggregate value is unlikely to be processed in vector register. 15623 return tryToVectorizeList(BuildVectorOpds, R); 15624 } 15625 15626 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 15627 BasicBlock *BB, BoUpSLP &R) { 15628 SmallVector<Value *, 16> BuildVectorInsts; 15629 SmallVector<Value *, 16> BuildVectorOpds; 15630 SmallVector<int> Mask; 15631 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 15632 (llvm::all_of( 15633 BuildVectorOpds, 15634 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 15635 isFixedVectorShuffle(BuildVectorOpds, Mask))) 15636 return false; 15637 15638 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 15639 return tryToVectorizeList(BuildVectorInsts, R); 15640 } 15641 15642 template <typename T> 15643 static bool tryToVectorizeSequence( 15644 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator, 15645 function_ref<bool(T *, T *)> AreCompatible, 15646 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 15647 bool MaxVFOnly, BoUpSLP &R) { 15648 bool Changed = false; 15649 // Sort by type, parent, operands. 15650 stable_sort(Incoming, Comparator); 15651 15652 // Try to vectorize elements base on their type. 15653 SmallVector<T *> Candidates; 15654 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 15655 // Look for the next elements with the same type, parent and operand 15656 // kinds. 15657 auto *SameTypeIt = IncIt; 15658 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 15659 ++SameTypeIt; 15660 15661 // Try to vectorize them. 15662 unsigned NumElts = (SameTypeIt - IncIt); 15663 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 15664 << NumElts << ")\n"); 15665 // The vectorization is a 3-state attempt: 15666 // 1. Try to vectorize instructions with the same/alternate opcodes with the 15667 // size of maximal register at first. 15668 // 2. Try to vectorize remaining instructions with the same type, if 15669 // possible. This may result in the better vectorization results rather than 15670 // if we try just to vectorize instructions with the same/alternate opcodes. 15671 // 3. Final attempt to try to vectorize all instructions with the 15672 // same/alternate ops only, this may result in some extra final 15673 // vectorization. 15674 if (NumElts > 1 && 15675 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), MaxVFOnly)) { 15676 // Success start over because instructions might have been changed. 15677 Changed = true; 15678 } else { 15679 /// \Returns the minimum number of elements that we will attempt to 15680 /// vectorize. 15681 auto GetMinNumElements = [&R](Value *V) { 15682 unsigned EltSize = R.getVectorElementSize(V); 15683 return std::max(2U, R.getMaxVecRegSize() / EltSize); 15684 }; 15685 if (NumElts < GetMinNumElements(*IncIt) && 15686 (Candidates.empty() || 15687 Candidates.front()->getType() == (*IncIt)->getType())) { 15688 Candidates.append(IncIt, std::next(IncIt, NumElts)); 15689 } 15690 } 15691 // Final attempt to vectorize instructions with the same types. 15692 if (Candidates.size() > 1 && 15693 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 15694 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) { 15695 // Success start over because instructions might have been changed. 15696 Changed = true; 15697 } else if (MaxVFOnly) { 15698 // Try to vectorize using small vectors. 15699 for (auto *It = Candidates.begin(), *End = Candidates.end(); 15700 It != End;) { 15701 auto *SameTypeIt = It; 15702 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 15703 ++SameTypeIt; 15704 unsigned NumElts = (SameTypeIt - It); 15705 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(It, NumElts), 15706 /*MaxVFOnly=*/false)) 15707 Changed = true; 15708 It = SameTypeIt; 15709 } 15710 } 15711 Candidates.clear(); 15712 } 15713 15714 // Start over at the next instruction of a different type (or the end). 15715 IncIt = SameTypeIt; 15716 } 15717 return Changed; 15718 } 15719 15720 /// Compare two cmp instructions. If IsCompatibility is true, function returns 15721 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 15722 /// operands. If IsCompatibility is false, function implements strict weak 15723 /// ordering relation between two cmp instructions, returning true if the first 15724 /// instruction is "less" than the second, i.e. its predicate is less than the 15725 /// predicate of the second or the operands IDs are less than the operands IDs 15726 /// of the second cmp instruction. 15727 template <bool IsCompatibility> 15728 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI, 15729 const DominatorTree &DT) { 15730 assert(isValidElementType(V->getType()) && 15731 isValidElementType(V2->getType()) && 15732 "Expected valid element types only."); 15733 auto *CI1 = cast<CmpInst>(V); 15734 auto *CI2 = cast<CmpInst>(V2); 15735 if (CI1->getOperand(0)->getType()->getTypeID() < 15736 CI2->getOperand(0)->getType()->getTypeID()) 15737 return !IsCompatibility; 15738 if (CI1->getOperand(0)->getType()->getTypeID() > 15739 CI2->getOperand(0)->getType()->getTypeID()) 15740 return false; 15741 CmpInst::Predicate Pred1 = CI1->getPredicate(); 15742 CmpInst::Predicate Pred2 = CI2->getPredicate(); 15743 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 15744 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 15745 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 15746 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 15747 if (BasePred1 < BasePred2) 15748 return !IsCompatibility; 15749 if (BasePred1 > BasePred2) 15750 return false; 15751 // Compare operands. 15752 bool CI1Preds = Pred1 == BasePred1; 15753 bool CI2Preds = Pred2 == BasePred1; 15754 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 15755 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1); 15756 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1); 15757 if (Op1->getValueID() < Op2->getValueID()) 15758 return !IsCompatibility; 15759 if (Op1->getValueID() > Op2->getValueID()) 15760 return false; 15761 if (auto *I1 = dyn_cast<Instruction>(Op1)) 15762 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 15763 if (IsCompatibility) { 15764 if (I1->getParent() != I2->getParent()) 15765 return false; 15766 } else { 15767 // Try to compare nodes with same parent. 15768 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent()); 15769 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent()); 15770 if (!NodeI1) 15771 return NodeI2 != nullptr; 15772 if (!NodeI2) 15773 return false; 15774 assert((NodeI1 == NodeI2) == 15775 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15776 "Different nodes should have different DFS numbers"); 15777 if (NodeI1 != NodeI2) 15778 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15779 } 15780 InstructionsState S = getSameOpcode({I1, I2}, TLI); 15781 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle())) 15782 continue; 15783 return !IsCompatibility && I1->getOpcode() < I2->getOpcode(); 15784 } 15785 } 15786 return IsCompatibility; 15787 } 15788 15789 template <typename ItT> 15790 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts, 15791 BasicBlock *BB, BoUpSLP &R) { 15792 bool Changed = false; 15793 // Try to find reductions first. 15794 for (CmpInst *I : CmpInsts) { 15795 if (R.isDeleted(I)) 15796 continue; 15797 for (Value *Op : I->operands()) 15798 if (auto *RootOp = dyn_cast<Instruction>(Op)) 15799 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R, TTI); 15800 } 15801 // Try to vectorize operands as vector bundles. 15802 for (CmpInst *I : CmpInsts) { 15803 if (R.isDeleted(I)) 15804 continue; 15805 Changed |= tryToVectorize(I, R); 15806 } 15807 // Try to vectorize list of compares. 15808 // Sort by type, compare predicate, etc. 15809 auto CompareSorter = [&](Value *V, Value *V2) { 15810 if (V == V2) 15811 return false; 15812 return compareCmp<false>(V, V2, *TLI, *DT); 15813 }; 15814 15815 auto AreCompatibleCompares = [&](Value *V1, Value *V2) { 15816 if (V1 == V2) 15817 return true; 15818 return compareCmp<true>(V1, V2, *TLI, *DT); 15819 }; 15820 15821 SmallVector<Value *> Vals; 15822 for (Instruction *V : CmpInsts) 15823 if (!R.isDeleted(V) && isValidElementType(V->getType())) 15824 Vals.push_back(V); 15825 if (Vals.size() <= 1) 15826 return Changed; 15827 Changed |= tryToVectorizeSequence<Value>( 15828 Vals, CompareSorter, AreCompatibleCompares, 15829 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 15830 // Exclude possible reductions from other blocks. 15831 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) { 15832 return any_of(V->users(), [V](User *U) { 15833 auto *Select = dyn_cast<SelectInst>(U); 15834 return Select && 15835 Select->getParent() != cast<Instruction>(V)->getParent(); 15836 }); 15837 }); 15838 if (ArePossiblyReducedInOtherBlock) 15839 return false; 15840 return tryToVectorizeList(Candidates, R, MaxVFOnly); 15841 }, 15842 /*MaxVFOnly=*/true, R); 15843 return Changed; 15844 } 15845 15846 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions, 15847 BasicBlock *BB, BoUpSLP &R) { 15848 assert(all_of(Instructions, 15849 [](auto *I) { 15850 return isa<InsertElementInst, InsertValueInst>(I); 15851 }) && 15852 "This function only accepts Insert instructions"); 15853 bool OpsChanged = false; 15854 SmallVector<WeakTrackingVH> PostponedInsts; 15855 // pass1 - try to vectorize reductions only 15856 for (auto *I : reverse(Instructions)) { 15857 if (R.isDeleted(I)) 15858 continue; 15859 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts); 15860 } 15861 // pass2 - try to match and vectorize a buildvector sequence. 15862 for (auto *I : reverse(Instructions)) { 15863 if (R.isDeleted(I) || isa<CmpInst>(I)) 15864 continue; 15865 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 15866 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 15867 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 15868 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 15869 } 15870 } 15871 // Now try to vectorize postponed instructions. 15872 OpsChanged |= tryToVectorize(PostponedInsts, R); 15873 15874 Instructions.clear(); 15875 return OpsChanged; 15876 } 15877 15878 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 15879 bool Changed = false; 15880 SmallVector<Value *, 4> Incoming; 15881 SmallPtrSet<Value *, 16> VisitedInstrs; 15882 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 15883 // node. Allows better to identify the chains that can be vectorized in the 15884 // better way. 15885 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 15886 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 15887 assert(isValidElementType(V1->getType()) && 15888 isValidElementType(V2->getType()) && 15889 "Expected vectorizable types only."); 15890 // It is fine to compare type IDs here, since we expect only vectorizable 15891 // types, like ints, floats and pointers, we don't care about other type. 15892 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 15893 return true; 15894 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 15895 return false; 15896 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15897 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15898 if (Opcodes1.size() < Opcodes2.size()) 15899 return true; 15900 if (Opcodes1.size() > Opcodes2.size()) 15901 return false; 15902 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15903 // Undefs are compatible with any other value. 15904 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 15905 if (isa<Instruction>(Opcodes1[I])) 15906 return true; 15907 if (isa<Instruction>(Opcodes2[I])) 15908 return false; 15909 if (isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I])) 15910 return true; 15911 if (isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I])) 15912 return false; 15913 if (isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I])) 15914 continue; 15915 return isa<UndefValue>(Opcodes2[I]); 15916 } 15917 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15918 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15919 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 15920 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 15921 if (!NodeI1) 15922 return NodeI2 != nullptr; 15923 if (!NodeI2) 15924 return false; 15925 assert((NodeI1 == NodeI2) == 15926 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15927 "Different nodes should have different DFS numbers"); 15928 if (NodeI1 != NodeI2) 15929 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15930 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15931 if (S.getOpcode() && !S.isAltShuffle()) 15932 continue; 15933 return I1->getOpcode() < I2->getOpcode(); 15934 } 15935 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15936 return Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 15937 if (isa<Instruction>(Opcodes1[I])) 15938 return true; 15939 if (isa<Instruction>(Opcodes2[I])) 15940 return false; 15941 if (isa<Constant>(Opcodes1[I])) 15942 return true; 15943 if (isa<Constant>(Opcodes2[I])) 15944 return false; 15945 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 15946 return true; 15947 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 15948 return false; 15949 } 15950 return false; 15951 }; 15952 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) { 15953 if (V1 == V2) 15954 return true; 15955 if (V1->getType() != V2->getType()) 15956 return false; 15957 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15958 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15959 if (Opcodes1.size() != Opcodes2.size()) 15960 return false; 15961 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15962 // Undefs are compatible with any other value. 15963 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 15964 continue; 15965 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15966 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15967 if (I1->getParent() != I2->getParent()) 15968 return false; 15969 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15970 if (S.getOpcode()) 15971 continue; 15972 return false; 15973 } 15974 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15975 continue; 15976 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 15977 return false; 15978 } 15979 return true; 15980 }; 15981 15982 bool HaveVectorizedPhiNodes = false; 15983 do { 15984 // Collect the incoming values from the PHIs. 15985 Incoming.clear(); 15986 for (Instruction &I : *BB) { 15987 PHINode *P = dyn_cast<PHINode>(&I); 15988 if (!P) 15989 break; 15990 15991 // No need to analyze deleted, vectorized and non-vectorizable 15992 // instructions. 15993 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 15994 isValidElementType(P->getType())) 15995 Incoming.push_back(P); 15996 } 15997 15998 if (Incoming.size() <= 1) 15999 break; 16000 16001 // Find the corresponding non-phi nodes for better matching when trying to 16002 // build the tree. 16003 for (Value *V : Incoming) { 16004 SmallVectorImpl<Value *> &Opcodes = 16005 PHIToOpcodes.try_emplace(V).first->getSecond(); 16006 if (!Opcodes.empty()) 16007 continue; 16008 SmallVector<Value *, 4> Nodes(1, V); 16009 SmallPtrSet<Value *, 4> Visited; 16010 while (!Nodes.empty()) { 16011 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 16012 if (!Visited.insert(PHI).second) 16013 continue; 16014 for (Value *V : PHI->incoming_values()) { 16015 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 16016 Nodes.push_back(PHI1); 16017 continue; 16018 } 16019 Opcodes.emplace_back(V); 16020 } 16021 } 16022 } 16023 16024 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 16025 Incoming, PHICompare, AreCompatiblePHIs, 16026 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 16027 return tryToVectorizeList(Candidates, R, MaxVFOnly); 16028 }, 16029 /*MaxVFOnly=*/true, R); 16030 Changed |= HaveVectorizedPhiNodes; 16031 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 16032 } while (HaveVectorizedPhiNodes); 16033 16034 VisitedInstrs.clear(); 16035 16036 InstSetVector PostProcessInserts; 16037 SmallSetVector<CmpInst *, 8> PostProcessCmps; 16038 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true 16039 // also vectorizes `PostProcessCmps`. 16040 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) { 16041 bool Changed = vectorizeInserts(PostProcessInserts, BB, R); 16042 if (VectorizeCmps) { 16043 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R); 16044 PostProcessCmps.clear(); 16045 } 16046 PostProcessInserts.clear(); 16047 return Changed; 16048 }; 16049 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`. 16050 auto IsInPostProcessInstrs = [&](Instruction *I) { 16051 if (auto *Cmp = dyn_cast<CmpInst>(I)) 16052 return PostProcessCmps.contains(Cmp); 16053 return isa<InsertElementInst, InsertValueInst>(I) && 16054 PostProcessInserts.contains(I); 16055 }; 16056 // Returns true if `I` is an instruction without users, like terminator, or 16057 // function call with ignored return value, store. Ignore unused instructions 16058 // (basing on instruction type, except for CallInst and InvokeInst). 16059 auto HasNoUsers = [](Instruction *I) { 16060 return I->use_empty() && 16061 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I)); 16062 }; 16063 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) { 16064 // Skip instructions with scalable type. The num of elements is unknown at 16065 // compile-time for scalable type. 16066 if (isa<ScalableVectorType>(It->getType())) 16067 continue; 16068 16069 // Skip instructions marked for the deletion. 16070 if (R.isDeleted(&*It)) 16071 continue; 16072 // We may go through BB multiple times so skip the one we have checked. 16073 if (!VisitedInstrs.insert(&*It).second) { 16074 if (HasNoUsers(&*It) && 16075 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) { 16076 // We would like to start over since some instructions are deleted 16077 // and the iterator may become invalid value. 16078 Changed = true; 16079 It = BB->begin(); 16080 E = BB->end(); 16081 } 16082 continue; 16083 } 16084 16085 if (isa<DbgInfoIntrinsic>(It)) 16086 continue; 16087 16088 // Try to vectorize reductions that use PHINodes. 16089 if (PHINode *P = dyn_cast<PHINode>(It)) { 16090 // Check that the PHI is a reduction PHI. 16091 if (P->getNumIncomingValues() == 2) { 16092 // Try to match and vectorize a horizontal reduction. 16093 Instruction *Root = getReductionInstr(DT, P, BB, LI); 16094 if (Root && vectorizeRootInstruction(P, Root, BB, R, TTI)) { 16095 Changed = true; 16096 It = BB->begin(); 16097 E = BB->end(); 16098 continue; 16099 } 16100 } 16101 // Try to vectorize the incoming values of the PHI, to catch reductions 16102 // that feed into PHIs. 16103 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 16104 // Skip if the incoming block is the current BB for now. Also, bypass 16105 // unreachable IR for efficiency and to avoid crashing. 16106 // TODO: Collect the skipped incoming values and try to vectorize them 16107 // after processing BB. 16108 if (BB == P->getIncomingBlock(I) || 16109 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 16110 continue; 16111 16112 // Postponed instructions should not be vectorized here, delay their 16113 // vectorization. 16114 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I)); 16115 PI && !IsInPostProcessInstrs(PI)) 16116 Changed |= vectorizeRootInstruction(nullptr, PI, 16117 P->getIncomingBlock(I), R, TTI); 16118 } 16119 continue; 16120 } 16121 16122 if (HasNoUsers(&*It)) { 16123 bool OpsChanged = false; 16124 auto *SI = dyn_cast<StoreInst>(It); 16125 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI; 16126 if (SI) { 16127 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand())); 16128 // Try to vectorize chain in store, if this is the only store to the 16129 // address in the block. 16130 // TODO: This is just a temporarily solution to save compile time. Need 16131 // to investigate if we can safely turn on slp-vectorize-hor-store 16132 // instead to allow lookup for reduction chains in all non-vectorized 16133 // stores (need to check side effects and compile time). 16134 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) && 16135 SI->getValueOperand()->hasOneUse(); 16136 } 16137 if (TryToVectorizeRoot) { 16138 for (auto *V : It->operand_values()) { 16139 // Postponed instructions should not be vectorized here, delay their 16140 // vectorization. 16141 if (auto *VI = dyn_cast<Instruction>(V); 16142 VI && !IsInPostProcessInstrs(VI)) 16143 // Try to match and vectorize a horizontal reduction. 16144 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R, TTI); 16145 } 16146 } 16147 // Start vectorization of post-process list of instructions from the 16148 // top-tree instructions to try to vectorize as many instructions as 16149 // possible. 16150 OpsChanged |= 16151 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator()); 16152 if (OpsChanged) { 16153 // We would like to start over since some instructions are deleted 16154 // and the iterator may become invalid value. 16155 Changed = true; 16156 It = BB->begin(); 16157 E = BB->end(); 16158 continue; 16159 } 16160 } 16161 16162 if (isa<InsertElementInst, InsertValueInst>(It)) 16163 PostProcessInserts.insert(&*It); 16164 else if (isa<CmpInst>(It)) 16165 PostProcessCmps.insert(cast<CmpInst>(&*It)); 16166 } 16167 16168 return Changed; 16169 } 16170 16171 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 16172 auto Changed = false; 16173 for (auto &Entry : GEPs) { 16174 // If the getelementptr list has fewer than two elements, there's nothing 16175 // to do. 16176 if (Entry.second.size() < 2) 16177 continue; 16178 16179 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 16180 << Entry.second.size() << ".\n"); 16181 16182 // Process the GEP list in chunks suitable for the target's supported 16183 // vector size. If a vector register can't hold 1 element, we are done. We 16184 // are trying to vectorize the index computations, so the maximum number of 16185 // elements is based on the size of the index expression, rather than the 16186 // size of the GEP itself (the target's pointer size). 16187 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 16188 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 16189 if (MaxVecRegSize < EltSize) 16190 continue; 16191 16192 unsigned MaxElts = MaxVecRegSize / EltSize; 16193 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 16194 auto Len = std::min<unsigned>(BE - BI, MaxElts); 16195 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 16196 16197 // Initialize a set a candidate getelementptrs. Note that we use a 16198 // SetVector here to preserve program order. If the index computations 16199 // are vectorizable and begin with loads, we want to minimize the chance 16200 // of having to reorder them later. 16201 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 16202 16203 // Some of the candidates may have already been vectorized after we 16204 // initially collected them. If so, they are marked as deleted, so remove 16205 // them from the set of candidates. 16206 Candidates.remove_if( 16207 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 16208 16209 // Remove from the set of candidates all pairs of getelementptrs with 16210 // constant differences. Such getelementptrs are likely not good 16211 // candidates for vectorization in a bottom-up phase since one can be 16212 // computed from the other. We also ensure all candidate getelementptr 16213 // indices are unique. 16214 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 16215 auto *GEPI = GEPList[I]; 16216 if (!Candidates.count(GEPI)) 16217 continue; 16218 auto *SCEVI = SE->getSCEV(GEPList[I]); 16219 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 16220 auto *GEPJ = GEPList[J]; 16221 auto *SCEVJ = SE->getSCEV(GEPList[J]); 16222 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 16223 Candidates.remove(GEPI); 16224 Candidates.remove(GEPJ); 16225 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 16226 Candidates.remove(GEPJ); 16227 } 16228 } 16229 } 16230 16231 // We break out of the above computation as soon as we know there are 16232 // fewer than two candidates remaining. 16233 if (Candidates.size() < 2) 16234 continue; 16235 16236 // Add the single, non-constant index of each candidate to the bundle. We 16237 // ensured the indices met these constraints when we originally collected 16238 // the getelementptrs. 16239 SmallVector<Value *, 16> Bundle(Candidates.size()); 16240 auto BundleIndex = 0u; 16241 for (auto *V : Candidates) { 16242 auto *GEP = cast<GetElementPtrInst>(V); 16243 auto *GEPIdx = GEP->idx_begin()->get(); 16244 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 16245 Bundle[BundleIndex++] = GEPIdx; 16246 } 16247 16248 // Try and vectorize the indices. We are currently only interested in 16249 // gather-like cases of the form: 16250 // 16251 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 16252 // 16253 // where the loads of "a", the loads of "b", and the subtractions can be 16254 // performed in parallel. It's likely that detecting this pattern in a 16255 // bottom-up phase will be simpler and less costly than building a 16256 // full-blown top-down phase beginning at the consecutive loads. 16257 Changed |= tryToVectorizeList(Bundle, R); 16258 } 16259 } 16260 return Changed; 16261 } 16262 16263 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 16264 bool Changed = false; 16265 // Sort by type, base pointers and values operand. Value operands must be 16266 // compatible (have the same opcode, same parent), otherwise it is 16267 // definitely not profitable to try to vectorize them. 16268 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 16269 if (V->getValueOperand()->getType()->getTypeID() < 16270 V2->getValueOperand()->getType()->getTypeID()) 16271 return true; 16272 if (V->getValueOperand()->getType()->getTypeID() > 16273 V2->getValueOperand()->getType()->getTypeID()) 16274 return false; 16275 if (V->getPointerOperandType()->getTypeID() < 16276 V2->getPointerOperandType()->getTypeID()) 16277 return true; 16278 if (V->getPointerOperandType()->getTypeID() > 16279 V2->getPointerOperandType()->getTypeID()) 16280 return false; 16281 // UndefValues are compatible with all other values. 16282 if (isa<UndefValue>(V->getValueOperand()) || 16283 isa<UndefValue>(V2->getValueOperand())) 16284 return false; 16285 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 16286 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16287 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 16288 DT->getNode(I1->getParent()); 16289 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 16290 DT->getNode(I2->getParent()); 16291 assert(NodeI1 && "Should only process reachable instructions"); 16292 assert(NodeI2 && "Should only process reachable instructions"); 16293 assert((NodeI1 == NodeI2) == 16294 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 16295 "Different nodes should have different DFS numbers"); 16296 if (NodeI1 != NodeI2) 16297 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 16298 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16299 if (S.getOpcode()) 16300 return false; 16301 return I1->getOpcode() < I2->getOpcode(); 16302 } 16303 if (isa<Constant>(V->getValueOperand()) && 16304 isa<Constant>(V2->getValueOperand())) 16305 return false; 16306 return V->getValueOperand()->getValueID() < 16307 V2->getValueOperand()->getValueID(); 16308 }; 16309 16310 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) { 16311 if (V1 == V2) 16312 return true; 16313 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType()) 16314 return false; 16315 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 16316 return false; 16317 // Undefs are compatible with any other value. 16318 if (isa<UndefValue>(V1->getValueOperand()) || 16319 isa<UndefValue>(V2->getValueOperand())) 16320 return true; 16321 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 16322 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16323 if (I1->getParent() != I2->getParent()) 16324 return false; 16325 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16326 return S.getOpcode() > 0; 16327 } 16328 if (isa<Constant>(V1->getValueOperand()) && 16329 isa<Constant>(V2->getValueOperand())) 16330 return true; 16331 return V1->getValueOperand()->getValueID() == 16332 V2->getValueOperand()->getValueID(); 16333 }; 16334 16335 // Attempt to sort and vectorize each of the store-groups. 16336 for (auto &Pair : Stores) { 16337 if (Pair.second.size() < 2) 16338 continue; 16339 16340 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 16341 << Pair.second.size() << ".\n"); 16342 16343 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 16344 continue; 16345 16346 // Reverse stores to do bottom-to-top analysis. This is important if the 16347 // values are stores to the same addresses several times, in this case need 16348 // to follow the stores order (reversed to meet the memory dependecies). 16349 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(), 16350 Pair.second.rend()); 16351 Changed |= tryToVectorizeSequence<StoreInst>( 16352 ReversedStores, StoreSorter, AreCompatibleStores, 16353 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 16354 return vectorizeStores(Candidates, R); 16355 }, 16356 /*MaxVFOnly=*/false, R); 16357 } 16358 return Changed; 16359 } 16360