1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines vectorizer utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/VectorUtils.h" 14 #include "llvm/ADT/EquivalenceClasses.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/Analysis/DemandedBits.h" 17 #include "llvm/Analysis/LoopInfo.h" 18 #include "llvm/Analysis/LoopIterator.h" 19 #include "llvm/Analysis/ScalarEvolution.h" 20 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/MemoryModelRelaxationAnnotations.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Support/CommandLine.h" 30 31 #define DEBUG_TYPE "vectorutils" 32 33 using namespace llvm; 34 using namespace llvm::PatternMatch; 35 36 /// Maximum factor for an interleaved memory access. 37 static cl::opt<unsigned> MaxInterleaveGroupFactor( 38 "max-interleave-group-factor", cl::Hidden, 39 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 40 cl::init(8)); 41 42 /// Return true if all of the intrinsic's arguments and return type are scalars 43 /// for the scalar form of the intrinsic, and vectors for the vector form of the 44 /// intrinsic (except operands that are marked as always being scalar by 45 /// isVectorIntrinsicWithScalarOpAtArg). 46 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 47 switch (ID) { 48 case Intrinsic::abs: // Begin integer bit-manipulation. 49 case Intrinsic::bswap: 50 case Intrinsic::bitreverse: 51 case Intrinsic::ctpop: 52 case Intrinsic::ctlz: 53 case Intrinsic::cttz: 54 case Intrinsic::fshl: 55 case Intrinsic::fshr: 56 case Intrinsic::smax: 57 case Intrinsic::smin: 58 case Intrinsic::umax: 59 case Intrinsic::umin: 60 case Intrinsic::sadd_sat: 61 case Intrinsic::ssub_sat: 62 case Intrinsic::uadd_sat: 63 case Intrinsic::usub_sat: 64 case Intrinsic::smul_fix: 65 case Intrinsic::smul_fix_sat: 66 case Intrinsic::umul_fix: 67 case Intrinsic::umul_fix_sat: 68 case Intrinsic::sqrt: // Begin floating-point. 69 case Intrinsic::asin: 70 case Intrinsic::acos: 71 case Intrinsic::atan: 72 case Intrinsic::sin: 73 case Intrinsic::cos: 74 case Intrinsic::tan: 75 case Intrinsic::sinh: 76 case Intrinsic::cosh: 77 case Intrinsic::tanh: 78 case Intrinsic::exp: 79 case Intrinsic::exp10: 80 case Intrinsic::exp2: 81 case Intrinsic::log: 82 case Intrinsic::log10: 83 case Intrinsic::log2: 84 case Intrinsic::fabs: 85 case Intrinsic::minnum: 86 case Intrinsic::maxnum: 87 case Intrinsic::minimum: 88 case Intrinsic::maximum: 89 case Intrinsic::copysign: 90 case Intrinsic::floor: 91 case Intrinsic::ceil: 92 case Intrinsic::trunc: 93 case Intrinsic::rint: 94 case Intrinsic::nearbyint: 95 case Intrinsic::round: 96 case Intrinsic::roundeven: 97 case Intrinsic::pow: 98 case Intrinsic::fma: 99 case Intrinsic::fmuladd: 100 case Intrinsic::is_fpclass: 101 case Intrinsic::powi: 102 case Intrinsic::canonicalize: 103 case Intrinsic::fptosi_sat: 104 case Intrinsic::fptoui_sat: 105 case Intrinsic::lrint: 106 case Intrinsic::llrint: 107 case Intrinsic::ucmp: 108 case Intrinsic::scmp: 109 return true; 110 default: 111 return false; 112 } 113 } 114 115 /// Identifies if the vector form of the intrinsic has a scalar operand. 116 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 117 unsigned ScalarOpdIdx) { 118 switch (ID) { 119 case Intrinsic::abs: 120 case Intrinsic::ctlz: 121 case Intrinsic::cttz: 122 case Intrinsic::is_fpclass: 123 case Intrinsic::powi: 124 return (ScalarOpdIdx == 1); 125 case Intrinsic::smul_fix: 126 case Intrinsic::smul_fix_sat: 127 case Intrinsic::umul_fix: 128 case Intrinsic::umul_fix_sat: 129 return (ScalarOpdIdx == 2); 130 default: 131 return false; 132 } 133 } 134 135 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 136 int OpdIdx) { 137 assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!"); 138 139 switch (ID) { 140 case Intrinsic::fptosi_sat: 141 case Intrinsic::fptoui_sat: 142 case Intrinsic::lrint: 143 case Intrinsic::llrint: 144 case Intrinsic::ucmp: 145 case Intrinsic::scmp: 146 return OpdIdx == -1 || OpdIdx == 0; 147 case Intrinsic::is_fpclass: 148 return OpdIdx == 0; 149 case Intrinsic::powi: 150 return OpdIdx == -1 || OpdIdx == 1; 151 default: 152 return OpdIdx == -1; 153 } 154 } 155 156 bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, 157 int RetIdx) { 158 switch (ID) { 159 case Intrinsic::frexp: 160 return RetIdx == 0 || RetIdx == 1; 161 default: 162 return RetIdx == 0; 163 } 164 } 165 166 /// Returns intrinsic ID for call. 167 /// For the input call instruction it finds mapping intrinsic and returns 168 /// its ID, in case it does not found it return not_intrinsic. 169 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 170 const TargetLibraryInfo *TLI) { 171 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); 172 if (ID == Intrinsic::not_intrinsic) 173 return Intrinsic::not_intrinsic; 174 175 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 176 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 177 ID == Intrinsic::experimental_noalias_scope_decl || 178 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe) 179 return ID; 180 return Intrinsic::not_intrinsic; 181 } 182 183 /// Given a vector and an element number, see if the scalar value is 184 /// already around as a register, for example if it were inserted then extracted 185 /// from the vector. 186 Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 187 assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 188 VectorType *VTy = cast<VectorType>(V->getType()); 189 // For fixed-length vector, return poison for out of range access. 190 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { 191 unsigned Width = FVTy->getNumElements(); 192 if (EltNo >= Width) 193 return PoisonValue::get(FVTy->getElementType()); 194 } 195 196 if (Constant *C = dyn_cast<Constant>(V)) 197 return C->getAggregateElement(EltNo); 198 199 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 200 // If this is an insert to a variable element, we don't know what it is. 201 if (!isa<ConstantInt>(III->getOperand(2))) 202 return nullptr; 203 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 204 205 // If this is an insert to the element we are looking for, return the 206 // inserted value. 207 if (EltNo == IIElt) 208 return III->getOperand(1); 209 210 // Guard against infinite loop on malformed, unreachable IR. 211 if (III == III->getOperand(0)) 212 return nullptr; 213 214 // Otherwise, the insertelement doesn't modify the value, recurse on its 215 // vector input. 216 return findScalarElement(III->getOperand(0), EltNo); 217 } 218 219 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); 220 // Restrict the following transformation to fixed-length vector. 221 if (SVI && isa<FixedVectorType>(SVI->getType())) { 222 unsigned LHSWidth = 223 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); 224 int InEl = SVI->getMaskValue(EltNo); 225 if (InEl < 0) 226 return PoisonValue::get(VTy->getElementType()); 227 if (InEl < (int)LHSWidth) 228 return findScalarElement(SVI->getOperand(0), InEl); 229 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 230 } 231 232 // Extract a value from a vector add operation with a constant zero. 233 // TODO: Use getBinOpIdentity() to generalize this. 234 Value *Val; Constant *C; 235 if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 236 if (Constant *Elt = C->getAggregateElement(EltNo)) 237 if (Elt->isNullValue()) 238 return findScalarElement(Val, EltNo); 239 240 // If the vector is a splat then we can trivially find the scalar element. 241 if (isa<ScalableVectorType>(VTy)) 242 if (Value *Splat = getSplatValue(V)) 243 if (EltNo < VTy->getElementCount().getKnownMinValue()) 244 return Splat; 245 246 // Otherwise, we don't know. 247 return nullptr; 248 } 249 250 int llvm::getSplatIndex(ArrayRef<int> Mask) { 251 int SplatIndex = -1; 252 for (int M : Mask) { 253 // Ignore invalid (undefined) mask elements. 254 if (M < 0) 255 continue; 256 257 // There can be only 1 non-negative mask element value if this is a splat. 258 if (SplatIndex != -1 && SplatIndex != M) 259 return -1; 260 261 // Initialize the splat index to the 1st non-negative mask element. 262 SplatIndex = M; 263 } 264 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); 265 return SplatIndex; 266 } 267 268 /// Get splat value if the input is a splat vector or return nullptr. 269 /// This function is not fully general. It checks only 2 cases: 270 /// the input value is (1) a splat constant vector or (2) a sequence 271 /// of instructions that broadcasts a scalar at element 0. 272 Value *llvm::getSplatValue(const Value *V) { 273 if (isa<VectorType>(V->getType())) 274 if (auto *C = dyn_cast<Constant>(V)) 275 return C->getSplatValue(); 276 277 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 278 Value *Splat; 279 if (match(V, 280 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()), 281 m_Value(), m_ZeroMask()))) 282 return Splat; 283 284 return nullptr; 285 } 286 287 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { 288 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 289 290 if (isa<VectorType>(V->getType())) { 291 if (isa<UndefValue>(V)) 292 return true; 293 // FIXME: We can allow undefs, but if Index was specified, we may want to 294 // check that the constant is defined at that index. 295 if (auto *C = dyn_cast<Constant>(V)) 296 return C->getSplatValue() != nullptr; 297 } 298 299 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { 300 // FIXME: We can safely allow undefs here. If Index was specified, we will 301 // check that the mask elt is defined at the required index. 302 if (!all_equal(Shuf->getShuffleMask())) 303 return false; 304 305 // Match any index. 306 if (Index == -1) 307 return true; 308 309 // Match a specific element. The mask should be defined at and match the 310 // specified index. 311 return Shuf->getMaskValue(Index) == Index; 312 } 313 314 // The remaining tests are all recursive, so bail out if we hit the limit. 315 if (Depth++ == MaxAnalysisRecursionDepth) 316 return false; 317 318 // If both operands of a binop are splats, the result is a splat. 319 Value *X, *Y, *Z; 320 if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 321 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); 322 323 // If all operands of a select are splats, the result is a splat. 324 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 325 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && 326 isSplatValue(Z, Index, Depth); 327 328 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 329 330 return false; 331 } 332 333 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask, 334 const APInt &DemandedElts, APInt &DemandedLHS, 335 APInt &DemandedRHS, bool AllowUndefElts) { 336 DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth); 337 338 // Early out if we don't demand any elements. 339 if (DemandedElts.isZero()) 340 return true; 341 342 // Simple case of a shuffle with zeroinitializer. 343 if (all_of(Mask, [](int Elt) { return Elt == 0; })) { 344 DemandedLHS.setBit(0); 345 return true; 346 } 347 348 for (unsigned I = 0, E = Mask.size(); I != E; ++I) { 349 int M = Mask[I]; 350 assert((-1 <= M) && (M < (SrcWidth * 2)) && 351 "Invalid shuffle mask constant"); 352 353 if (!DemandedElts[I] || (AllowUndefElts && (M < 0))) 354 continue; 355 356 // For undef elements, we don't know anything about the common state of 357 // the shuffle result. 358 if (M < 0) 359 return false; 360 361 if (M < SrcWidth) 362 DemandedLHS.setBit(M); 363 else 364 DemandedRHS.setBit(M - SrcWidth); 365 } 366 367 return true; 368 } 369 370 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, 371 SmallVectorImpl<int> &ScaledMask) { 372 assert(Scale > 0 && "Unexpected scaling factor"); 373 374 // Fast-path: if no scaling, then it is just a copy. 375 if (Scale == 1) { 376 ScaledMask.assign(Mask.begin(), Mask.end()); 377 return; 378 } 379 380 ScaledMask.clear(); 381 for (int MaskElt : Mask) { 382 if (MaskElt >= 0) { 383 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX && 384 "Overflowed 32-bits"); 385 } 386 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) 387 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); 388 } 389 } 390 391 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, 392 SmallVectorImpl<int> &ScaledMask) { 393 assert(Scale > 0 && "Unexpected scaling factor"); 394 395 // Fast-path: if no scaling, then it is just a copy. 396 if (Scale == 1) { 397 ScaledMask.assign(Mask.begin(), Mask.end()); 398 return true; 399 } 400 401 // We must map the original elements down evenly to a type with less elements. 402 int NumElts = Mask.size(); 403 if (NumElts % Scale != 0) 404 return false; 405 406 ScaledMask.clear(); 407 ScaledMask.reserve(NumElts / Scale); 408 409 // Step through the input mask by splitting into Scale-sized slices. 410 do { 411 ArrayRef<int> MaskSlice = Mask.take_front(Scale); 412 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); 413 414 // The first element of the slice determines how we evaluate this slice. 415 int SliceFront = MaskSlice.front(); 416 if (SliceFront < 0) { 417 // Negative values (undef or other "sentinel" values) must be equal across 418 // the entire slice. 419 if (!all_equal(MaskSlice)) 420 return false; 421 ScaledMask.push_back(SliceFront); 422 } else { 423 // A positive mask element must be cleanly divisible. 424 if (SliceFront % Scale != 0) 425 return false; 426 // Elements of the slice must be consecutive. 427 for (int i = 1; i < Scale; ++i) 428 if (MaskSlice[i] != SliceFront + i) 429 return false; 430 ScaledMask.push_back(SliceFront / Scale); 431 } 432 Mask = Mask.drop_front(Scale); 433 } while (!Mask.empty()); 434 435 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); 436 437 // All elements of the original mask can be scaled down to map to the elements 438 // of a mask with wider elements. 439 return true; 440 } 441 442 bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask, 443 SmallVectorImpl<int> &ScaledMask) { 444 unsigned NumSrcElts = Mask.size(); 445 assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor"); 446 447 // Fast-path: if no scaling, then it is just a copy. 448 if (NumSrcElts == NumDstElts) { 449 ScaledMask.assign(Mask.begin(), Mask.end()); 450 return true; 451 } 452 453 // Ensure we can find a whole scale factor. 454 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) && 455 "Unexpected scaling factor"); 456 457 if (NumSrcElts > NumDstElts) { 458 int Scale = NumSrcElts / NumDstElts; 459 return widenShuffleMaskElts(Scale, Mask, ScaledMask); 460 } 461 462 int Scale = NumDstElts / NumSrcElts; 463 narrowShuffleMaskElts(Scale, Mask, ScaledMask); 464 return true; 465 } 466 467 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask, 468 SmallVectorImpl<int> &ScaledMask) { 469 std::array<SmallVector<int, 16>, 2> TmpMasks; 470 SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1]; 471 ArrayRef<int> InputMask = Mask; 472 for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) { 473 while (widenShuffleMaskElts(Scale, InputMask, *Output)) { 474 InputMask = *Output; 475 std::swap(Output, Tmp); 476 } 477 } 478 ScaledMask.assign(InputMask.begin(), InputMask.end()); 479 } 480 481 void llvm::processShuffleMasks( 482 ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, 483 unsigned NumOfUsedRegs, function_ref<void()> NoInputAction, 484 function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction, 485 function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) { 486 SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs); 487 // Try to perform better estimation of the permutation. 488 // 1. Split the source/destination vectors into real registers. 489 // 2. Do the mask analysis to identify which real registers are 490 // permuted. 491 int Sz = Mask.size(); 492 unsigned SzDest = Sz / NumOfDestRegs; 493 unsigned SzSrc = Sz / NumOfSrcRegs; 494 for (unsigned I = 0; I < NumOfDestRegs; ++I) { 495 auto &RegMasks = Res[I]; 496 RegMasks.assign(NumOfSrcRegs, {}); 497 // Check that the values in dest registers are in the one src 498 // register. 499 for (unsigned K = 0; K < SzDest; ++K) { 500 int Idx = I * SzDest + K; 501 if (Idx == Sz) 502 break; 503 if (Mask[Idx] >= Sz || Mask[Idx] == PoisonMaskElem) 504 continue; 505 int SrcRegIdx = Mask[Idx] / SzSrc; 506 // Add a cost of PermuteTwoSrc for each new source register permute, 507 // if we have more than one source registers. 508 if (RegMasks[SrcRegIdx].empty()) 509 RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem); 510 RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc; 511 } 512 } 513 // Process split mask. 514 for (unsigned I = 0; I < NumOfUsedRegs; ++I) { 515 auto &Dest = Res[I]; 516 int NumSrcRegs = 517 count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 518 switch (NumSrcRegs) { 519 case 0: 520 // No input vectors were used! 521 NoInputAction(); 522 break; 523 case 1: { 524 // Find the only mask with at least single undef mask elem. 525 auto *It = 526 find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 527 unsigned SrcReg = std::distance(Dest.begin(), It); 528 SingleInputAction(*It, SrcReg, I); 529 break; 530 } 531 default: { 532 // The first mask is a permutation of a single register. Since we have >2 533 // input registers to shuffle, we merge the masks for 2 first registers 534 // and generate a shuffle of 2 registers rather than the reordering of the 535 // first register and then shuffle with the second register. Next, 536 // generate the shuffles of the resulting register + the remaining 537 // registers from the list. 538 auto &&CombineMasks = [](MutableArrayRef<int> FirstMask, 539 ArrayRef<int> SecondMask) { 540 for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { 541 if (SecondMask[Idx] != PoisonMaskElem) { 542 assert(FirstMask[Idx] == PoisonMaskElem && 543 "Expected undefined mask element."); 544 FirstMask[Idx] = SecondMask[Idx] + VF; 545 } 546 } 547 }; 548 auto &&NormalizeMask = [](MutableArrayRef<int> Mask) { 549 for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 550 if (Mask[Idx] != PoisonMaskElem) 551 Mask[Idx] = Idx; 552 } 553 }; 554 int SecondIdx; 555 do { 556 int FirstIdx = -1; 557 SecondIdx = -1; 558 MutableArrayRef<int> FirstMask, SecondMask; 559 for (unsigned I = 0; I < NumOfDestRegs; ++I) { 560 SmallVectorImpl<int> &RegMask = Dest[I]; 561 if (RegMask.empty()) 562 continue; 563 564 if (FirstIdx == SecondIdx) { 565 FirstIdx = I; 566 FirstMask = RegMask; 567 continue; 568 } 569 SecondIdx = I; 570 SecondMask = RegMask; 571 CombineMasks(FirstMask, SecondMask); 572 ManyInputsAction(FirstMask, FirstIdx, SecondIdx); 573 NormalizeMask(FirstMask); 574 RegMask.clear(); 575 SecondMask = FirstMask; 576 SecondIdx = FirstIdx; 577 } 578 if (FirstIdx != SecondIdx && SecondIdx >= 0) { 579 CombineMasks(SecondMask, FirstMask); 580 ManyInputsAction(SecondMask, SecondIdx, FirstIdx); 581 Dest[FirstIdx].clear(); 582 NormalizeMask(SecondMask); 583 } 584 } while (SecondIdx >= 0); 585 break; 586 } 587 } 588 } 589 } 590 591 void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, 592 const APInt &DemandedElts, 593 APInt &DemandedLHS, 594 APInt &DemandedRHS) { 595 assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported"); 596 int NumLanes = VectorBitWidth / 128; 597 int NumElts = DemandedElts.getBitWidth(); 598 int NumEltsPerLane = NumElts / NumLanes; 599 int HalfEltsPerLane = NumEltsPerLane / 2; 600 601 DemandedLHS = APInt::getZero(NumElts); 602 DemandedRHS = APInt::getZero(NumElts); 603 604 // Map DemandedElts to the horizontal operands. 605 for (int Idx = 0; Idx != NumElts; ++Idx) { 606 if (!DemandedElts[Idx]) 607 continue; 608 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane; 609 int LocalIdx = Idx % NumEltsPerLane; 610 if (LocalIdx < HalfEltsPerLane) { 611 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx); 612 } else { 613 LocalIdx -= HalfEltsPerLane; 614 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx); 615 } 616 } 617 } 618 619 MapVector<Instruction *, uint64_t> 620 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 621 const TargetTransformInfo *TTI) { 622 623 // DemandedBits will give us every value's live-out bits. But we want 624 // to ensure no extra casts would need to be inserted, so every DAG 625 // of connected values must have the same minimum bitwidth. 626 EquivalenceClasses<Value *> ECs; 627 SmallVector<Value *, 16> Worklist; 628 SmallPtrSet<Value *, 4> Roots; 629 SmallPtrSet<Value *, 16> Visited; 630 DenseMap<Value *, uint64_t> DBits; 631 SmallPtrSet<Instruction *, 4> InstructionSet; 632 MapVector<Instruction *, uint64_t> MinBWs; 633 634 // Determine the roots. We work bottom-up, from truncs or icmps. 635 bool SeenExtFromIllegalType = false; 636 for (auto *BB : Blocks) 637 for (auto &I : *BB) { 638 InstructionSet.insert(&I); 639 640 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 641 !TTI->isTypeLegal(I.getOperand(0)->getType())) 642 SeenExtFromIllegalType = true; 643 644 // Only deal with non-vector integers up to 64-bits wide. 645 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 646 !I.getType()->isVectorTy() && 647 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 648 // Don't make work for ourselves. If we know the loaded type is legal, 649 // don't add it to the worklist. 650 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 651 continue; 652 653 Worklist.push_back(&I); 654 Roots.insert(&I); 655 } 656 } 657 // Early exit. 658 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 659 return MinBWs; 660 661 // Now proceed breadth-first, unioning values together. 662 while (!Worklist.empty()) { 663 Value *Val = Worklist.pop_back_val(); 664 Value *Leader = ECs.getOrInsertLeaderValue(Val); 665 666 if (!Visited.insert(Val).second) 667 continue; 668 669 // Non-instructions terminate a chain successfully. 670 if (!isa<Instruction>(Val)) 671 continue; 672 Instruction *I = cast<Instruction>(Val); 673 674 // If we encounter a type that is larger than 64 bits, we can't represent 675 // it so bail out. 676 if (DB.getDemandedBits(I).getBitWidth() > 64) 677 return MapVector<Instruction *, uint64_t>(); 678 679 uint64_t V = DB.getDemandedBits(I).getZExtValue(); 680 DBits[Leader] |= V; 681 DBits[I] = V; 682 683 // Casts, loads and instructions outside of our range terminate a chain 684 // successfully. 685 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 686 !InstructionSet.count(I)) 687 continue; 688 689 // Unsafe casts terminate a chain unsuccessfully. We can't do anything 690 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 691 // transform anything that relies on them. 692 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 693 !I->getType()->isIntegerTy()) { 694 DBits[Leader] |= ~0ULL; 695 continue; 696 } 697 698 // We don't modify the types of PHIs. Reductions will already have been 699 // truncated if possible, and inductions' sizes will have been chosen by 700 // indvars. 701 if (isa<PHINode>(I)) 702 continue; 703 704 if (DBits[Leader] == ~0ULL) 705 // All bits demanded, no point continuing. 706 continue; 707 708 for (Value *O : cast<User>(I)->operands()) { 709 ECs.unionSets(Leader, O); 710 Worklist.push_back(O); 711 } 712 } 713 714 // Now we've discovered all values, walk them to see if there are 715 // any users we didn't see. If there are, we can't optimize that 716 // chain. 717 for (auto &I : DBits) 718 for (auto *U : I.first->users()) 719 if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 720 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 721 722 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 723 uint64_t LeaderDemandedBits = 0; 724 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 725 LeaderDemandedBits |= DBits[M]; 726 727 uint64_t MinBW = llvm::bit_width(LeaderDemandedBits); 728 // Round up to a power of 2 729 MinBW = llvm::bit_ceil(MinBW); 730 731 // We don't modify the types of PHIs. Reductions will already have been 732 // truncated if possible, and inductions' sizes will have been chosen by 733 // indvars. 734 // If we are required to shrink a PHI, abandon this entire equivalence class. 735 bool Abort = false; 736 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 737 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) { 738 Abort = true; 739 break; 740 } 741 if (Abort) 742 continue; 743 744 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) { 745 auto *MI = dyn_cast<Instruction>(M); 746 if (!MI) 747 continue; 748 Type *Ty = M->getType(); 749 if (Roots.count(M)) 750 Ty = MI->getOperand(0)->getType(); 751 752 if (MinBW >= Ty->getScalarSizeInBits()) 753 continue; 754 755 // If any of M's operands demand more bits than MinBW then M cannot be 756 // performed safely in MinBW. 757 if (any_of(MI->operands(), [&DB, MinBW](Use &U) { 758 auto *CI = dyn_cast<ConstantInt>(U); 759 // For constants shift amounts, check if the shift would result in 760 // poison. 761 if (CI && 762 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) && 763 U.getOperandNo() == 1) 764 return CI->uge(MinBW); 765 uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue()); 766 return bit_ceil(BW) > MinBW; 767 })) 768 continue; 769 770 MinBWs[MI] = MinBW; 771 } 772 } 773 774 return MinBWs; 775 } 776 777 /// Add all access groups in @p AccGroups to @p List. 778 template <typename ListT> 779 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 780 // Interpret an access group as a list containing itself. 781 if (AccGroups->getNumOperands() == 0) { 782 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 783 List.insert(AccGroups); 784 return; 785 } 786 787 for (const auto &AccGroupListOp : AccGroups->operands()) { 788 auto *Item = cast<MDNode>(AccGroupListOp.get()); 789 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 790 List.insert(Item); 791 } 792 } 793 794 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 795 if (!AccGroups1) 796 return AccGroups2; 797 if (!AccGroups2) 798 return AccGroups1; 799 if (AccGroups1 == AccGroups2) 800 return AccGroups1; 801 802 SmallSetVector<Metadata *, 4> Union; 803 addToAccessGroupList(Union, AccGroups1); 804 addToAccessGroupList(Union, AccGroups2); 805 806 if (Union.size() == 0) 807 return nullptr; 808 if (Union.size() == 1) 809 return cast<MDNode>(Union.front()); 810 811 LLVMContext &Ctx = AccGroups1->getContext(); 812 return MDNode::get(Ctx, Union.getArrayRef()); 813 } 814 815 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 816 const Instruction *Inst2) { 817 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 818 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 819 820 if (!MayAccessMem1 && !MayAccessMem2) 821 return nullptr; 822 if (!MayAccessMem1) 823 return Inst2->getMetadata(LLVMContext::MD_access_group); 824 if (!MayAccessMem2) 825 return Inst1->getMetadata(LLVMContext::MD_access_group); 826 827 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 828 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 829 if (!MD1 || !MD2) 830 return nullptr; 831 if (MD1 == MD2) 832 return MD1; 833 834 // Use set for scalable 'contains' check. 835 SmallPtrSet<Metadata *, 4> AccGroupSet2; 836 addToAccessGroupList(AccGroupSet2, MD2); 837 838 SmallVector<Metadata *, 4> Intersection; 839 if (MD1->getNumOperands() == 0) { 840 assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 841 if (AccGroupSet2.count(MD1)) 842 Intersection.push_back(MD1); 843 } else { 844 for (const MDOperand &Node : MD1->operands()) { 845 auto *Item = cast<MDNode>(Node.get()); 846 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 847 if (AccGroupSet2.count(Item)) 848 Intersection.push_back(Item); 849 } 850 } 851 852 if (Intersection.size() == 0) 853 return nullptr; 854 if (Intersection.size() == 1) 855 return cast<MDNode>(Intersection.front()); 856 857 LLVMContext &Ctx = Inst1->getContext(); 858 return MDNode::get(Ctx, Intersection); 859 } 860 861 /// \returns \p I after propagating metadata from \p VL. 862 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 863 if (VL.empty()) 864 return Inst; 865 Instruction *I0 = cast<Instruction>(VL[0]); 866 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 867 I0->getAllMetadataOtherThanDebugLoc(Metadata); 868 869 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 870 LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 871 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 872 LLVMContext::MD_access_group, LLVMContext::MD_mmra}) { 873 MDNode *MD = I0->getMetadata(Kind); 874 for (int J = 1, E = VL.size(); MD && J != E; ++J) { 875 const Instruction *IJ = cast<Instruction>(VL[J]); 876 MDNode *IMD = IJ->getMetadata(Kind); 877 878 switch (Kind) { 879 case LLVMContext::MD_mmra: { 880 MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD); 881 break; 882 } 883 case LLVMContext::MD_tbaa: 884 MD = MDNode::getMostGenericTBAA(MD, IMD); 885 break; 886 case LLVMContext::MD_alias_scope: 887 MD = MDNode::getMostGenericAliasScope(MD, IMD); 888 break; 889 case LLVMContext::MD_fpmath: 890 MD = MDNode::getMostGenericFPMath(MD, IMD); 891 break; 892 case LLVMContext::MD_noalias: 893 case LLVMContext::MD_nontemporal: 894 case LLVMContext::MD_invariant_load: 895 MD = MDNode::intersect(MD, IMD); 896 break; 897 case LLVMContext::MD_access_group: 898 MD = intersectAccessGroups(Inst, IJ); 899 break; 900 default: 901 llvm_unreachable("unhandled metadata"); 902 } 903 } 904 905 Inst->setMetadata(Kind, MD); 906 } 907 908 return Inst; 909 } 910 911 Constant * 912 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, 913 const InterleaveGroup<Instruction> &Group) { 914 // All 1's means mask is not needed. 915 if (Group.getNumMembers() == Group.getFactor()) 916 return nullptr; 917 918 // TODO: support reversed access. 919 assert(!Group.isReverse() && "Reversed group not supported."); 920 921 SmallVector<Constant *, 16> Mask; 922 for (unsigned i = 0; i < VF; i++) 923 for (unsigned j = 0; j < Group.getFactor(); ++j) { 924 unsigned HasMember = Group.getMember(j) ? 1 : 0; 925 Mask.push_back(Builder.getInt1(HasMember)); 926 } 927 928 return ConstantVector::get(Mask); 929 } 930 931 llvm::SmallVector<int, 16> 932 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { 933 SmallVector<int, 16> MaskVec; 934 for (unsigned i = 0; i < VF; i++) 935 for (unsigned j = 0; j < ReplicationFactor; j++) 936 MaskVec.push_back(i); 937 938 return MaskVec; 939 } 940 941 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, 942 unsigned NumVecs) { 943 SmallVector<int, 16> Mask; 944 for (unsigned i = 0; i < VF; i++) 945 for (unsigned j = 0; j < NumVecs; j++) 946 Mask.push_back(j * VF + i); 947 948 return Mask; 949 } 950 951 llvm::SmallVector<int, 16> 952 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { 953 SmallVector<int, 16> Mask; 954 for (unsigned i = 0; i < VF; i++) 955 Mask.push_back(Start + i * Stride); 956 957 return Mask; 958 } 959 960 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, 961 unsigned NumInts, 962 unsigned NumUndefs) { 963 SmallVector<int, 16> Mask; 964 for (unsigned i = 0; i < NumInts; i++) 965 Mask.push_back(Start + i); 966 967 for (unsigned i = 0; i < NumUndefs; i++) 968 Mask.push_back(-1); 969 970 return Mask; 971 } 972 973 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask, 974 unsigned NumElts) { 975 // Avoid casts in the loop and make sure we have a reasonable number. 976 int NumEltsSigned = NumElts; 977 assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count"); 978 979 // If the mask chooses an element from operand 1, reduce it to choose from the 980 // corresponding element of operand 0. Undef mask elements are unchanged. 981 SmallVector<int, 16> UnaryMask; 982 for (int MaskElt : Mask) { 983 assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask"); 984 int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt; 985 UnaryMask.push_back(UnaryElt); 986 } 987 return UnaryMask; 988 } 989 990 /// A helper function for concatenating vectors. This function concatenates two 991 /// vectors having the same element type. If the second vector has fewer 992 /// elements than the first, it is padded with undefs. 993 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, 994 Value *V2) { 995 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 996 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 997 assert(VecTy1 && VecTy2 && 998 VecTy1->getScalarType() == VecTy2->getScalarType() && 999 "Expect two vectors with the same element type"); 1000 1001 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements(); 1002 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements(); 1003 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 1004 1005 if (NumElts1 > NumElts2) { 1006 // Extend with UNDEFs. 1007 V2 = Builder.CreateShuffleVector( 1008 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); 1009 } 1010 1011 return Builder.CreateShuffleVector( 1012 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); 1013 } 1014 1015 Value *llvm::concatenateVectors(IRBuilderBase &Builder, 1016 ArrayRef<Value *> Vecs) { 1017 unsigned NumVecs = Vecs.size(); 1018 assert(NumVecs > 1 && "Should be at least two vectors"); 1019 1020 SmallVector<Value *, 8> ResList; 1021 ResList.append(Vecs.begin(), Vecs.end()); 1022 do { 1023 SmallVector<Value *, 8> TmpList; 1024 for (unsigned i = 0; i < NumVecs - 1; i += 2) { 1025 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 1026 assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 1027 "Only the last vector may have a different type"); 1028 1029 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 1030 } 1031 1032 // Push the last vector if the total number of vectors is odd. 1033 if (NumVecs % 2 != 0) 1034 TmpList.push_back(ResList[NumVecs - 1]); 1035 1036 ResList = TmpList; 1037 NumVecs = ResList.size(); 1038 } while (NumVecs > 1); 1039 1040 return ResList[0]; 1041 } 1042 1043 bool llvm::maskIsAllZeroOrUndef(Value *Mask) { 1044 assert(isa<VectorType>(Mask->getType()) && 1045 isa<IntegerType>(Mask->getType()->getScalarType()) && 1046 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1047 1 && 1048 "Mask must be a vector of i1"); 1049 1050 auto *ConstMask = dyn_cast<Constant>(Mask); 1051 if (!ConstMask) 1052 return false; 1053 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 1054 return true; 1055 if (isa<ScalableVectorType>(ConstMask->getType())) 1056 return false; 1057 for (unsigned 1058 I = 0, 1059 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1060 I != E; ++I) { 1061 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1062 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 1063 continue; 1064 return false; 1065 } 1066 return true; 1067 } 1068 1069 bool llvm::maskIsAllOneOrUndef(Value *Mask) { 1070 assert(isa<VectorType>(Mask->getType()) && 1071 isa<IntegerType>(Mask->getType()->getScalarType()) && 1072 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1073 1 && 1074 "Mask must be a vector of i1"); 1075 1076 auto *ConstMask = dyn_cast<Constant>(Mask); 1077 if (!ConstMask) 1078 return false; 1079 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 1080 return true; 1081 if (isa<ScalableVectorType>(ConstMask->getType())) 1082 return false; 1083 for (unsigned 1084 I = 0, 1085 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1086 I != E; ++I) { 1087 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1088 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 1089 continue; 1090 return false; 1091 } 1092 return true; 1093 } 1094 1095 bool llvm::maskContainsAllOneOrUndef(Value *Mask) { 1096 assert(isa<VectorType>(Mask->getType()) && 1097 isa<IntegerType>(Mask->getType()->getScalarType()) && 1098 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1099 1 && 1100 "Mask must be a vector of i1"); 1101 1102 auto *ConstMask = dyn_cast<Constant>(Mask); 1103 if (!ConstMask) 1104 return false; 1105 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 1106 return true; 1107 if (isa<ScalableVectorType>(ConstMask->getType())) 1108 return false; 1109 for (unsigned 1110 I = 0, 1111 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1112 I != E; ++I) { 1113 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1114 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 1115 return true; 1116 } 1117 return false; 1118 } 1119 1120 /// TODO: This is a lot like known bits, but for 1121 /// vectors. Is there something we can common this with? 1122 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 1123 assert(isa<FixedVectorType>(Mask->getType()) && 1124 isa<IntegerType>(Mask->getType()->getScalarType()) && 1125 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1126 1 && 1127 "Mask must be a fixed width vector of i1"); 1128 1129 const unsigned VWidth = 1130 cast<FixedVectorType>(Mask->getType())->getNumElements(); 1131 APInt DemandedElts = APInt::getAllOnes(VWidth); 1132 if (auto *CV = dyn_cast<ConstantVector>(Mask)) 1133 for (unsigned i = 0; i < VWidth; i++) 1134 if (CV->getAggregateElement(i)->isNullValue()) 1135 DemandedElts.clearBit(i); 1136 return DemandedElts; 1137 } 1138 1139 bool InterleavedAccessInfo::isStrided(int Stride) { 1140 unsigned Factor = std::abs(Stride); 1141 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1142 } 1143 1144 void InterleavedAccessInfo::collectConstStrideAccesses( 1145 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1146 const DenseMap<Value*, const SCEV*> &Strides) { 1147 auto &DL = TheLoop->getHeader()->getDataLayout(); 1148 1149 // Since it's desired that the load/store instructions be maintained in 1150 // "program order" for the interleaved access analysis, we have to visit the 1151 // blocks in the loop in reverse postorder (i.e., in a topological order). 1152 // Such an ordering will ensure that any load/store that may be executed 1153 // before a second load/store will precede the second load/store in 1154 // AccessStrideInfo. 1155 LoopBlocksDFS DFS(TheLoop); 1156 DFS.perform(LI); 1157 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 1158 for (auto &I : *BB) { 1159 Value *Ptr = getLoadStorePointerOperand(&I); 1160 if (!Ptr) 1161 continue; 1162 Type *ElementTy = getLoadStoreType(&I); 1163 1164 // Currently, codegen doesn't support cases where the type size doesn't 1165 // match the alloc size. Skip them for now. 1166 uint64_t Size = DL.getTypeAllocSize(ElementTy); 1167 if (Size * 8 != DL.getTypeSizeInBits(ElementTy)) 1168 continue; 1169 1170 // We don't check wrapping here because we don't know yet if Ptr will be 1171 // part of a full group or a group with gaps. Checking wrapping for all 1172 // pointers (even those that end up in groups with no gaps) will be overly 1173 // conservative. For full groups, wrapping should be ok since if we would 1174 // wrap around the address space we would do a memory access at nullptr 1175 // even without the transformation. The wrapping checks are therefore 1176 // deferred until after we've formed the interleaved groups. 1177 int64_t Stride = 1178 getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides, 1179 /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0); 1180 1181 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 1182 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, 1183 getLoadStoreAlignment(&I)); 1184 } 1185 } 1186 1187 // Analyze interleaved accesses and collect them into interleaved load and 1188 // store groups. 1189 // 1190 // When generating code for an interleaved load group, we effectively hoist all 1191 // loads in the group to the location of the first load in program order. When 1192 // generating code for an interleaved store group, we sink all stores to the 1193 // location of the last store. This code motion can change the order of load 1194 // and store instructions and may break dependences. 1195 // 1196 // The code generation strategy mentioned above ensures that we won't violate 1197 // any write-after-read (WAR) dependences. 1198 // 1199 // E.g., for the WAR dependence: a = A[i]; // (1) 1200 // A[i] = b; // (2) 1201 // 1202 // The store group of (2) is always inserted at or below (2), and the load 1203 // group of (1) is always inserted at or above (1). Thus, the instructions will 1204 // never be reordered. All other dependences are checked to ensure the 1205 // correctness of the instruction reordering. 1206 // 1207 // The algorithm visits all memory accesses in the loop in bottom-up program 1208 // order. Program order is established by traversing the blocks in the loop in 1209 // reverse postorder when collecting the accesses. 1210 // 1211 // We visit the memory accesses in bottom-up order because it can simplify the 1212 // construction of store groups in the presence of write-after-write (WAW) 1213 // dependences. 1214 // 1215 // E.g., for the WAW dependence: A[i] = a; // (1) 1216 // A[i] = b; // (2) 1217 // A[i + 1] = c; // (3) 1218 // 1219 // We will first create a store group with (3) and (2). (1) can't be added to 1220 // this group because it and (2) are dependent. However, (1) can be grouped 1221 // with other accesses that may precede it in program order. Note that a 1222 // bottom-up order does not imply that WAW dependences should not be checked. 1223 void InterleavedAccessInfo::analyzeInterleaving( 1224 bool EnablePredicatedInterleavedMemAccesses) { 1225 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 1226 const auto &Strides = LAI->getSymbolicStrides(); 1227 1228 // Holds all accesses with a constant stride. 1229 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 1230 collectConstStrideAccesses(AccessStrideInfo, Strides); 1231 1232 if (AccessStrideInfo.empty()) 1233 return; 1234 1235 // Collect the dependences in the loop. 1236 collectDependences(); 1237 1238 // Holds all interleaved store groups temporarily. 1239 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 1240 // Holds all interleaved load groups temporarily. 1241 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 1242 // Groups added to this set cannot have new members added. 1243 SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups; 1244 1245 // Search in bottom-up program order for pairs of accesses (A and B) that can 1246 // form interleaved load or store groups. In the algorithm below, access A 1247 // precedes access B in program order. We initialize a group for B in the 1248 // outer loop of the algorithm, and then in the inner loop, we attempt to 1249 // insert each A into B's group if: 1250 // 1251 // 1. A and B have the same stride, 1252 // 2. A and B have the same memory object size, and 1253 // 3. A belongs in B's group according to its distance from B. 1254 // 1255 // Special care is taken to ensure group formation will not break any 1256 // dependences. 1257 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 1258 BI != E; ++BI) { 1259 Instruction *B = BI->first; 1260 StrideDescriptor DesB = BI->second; 1261 1262 // Initialize a group for B if it has an allowable stride. Even if we don't 1263 // create a group for B, we continue with the bottom-up algorithm to ensure 1264 // we don't break any of B's dependences. 1265 InterleaveGroup<Instruction> *GroupB = nullptr; 1266 if (isStrided(DesB.Stride) && 1267 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 1268 GroupB = getInterleaveGroup(B); 1269 if (!GroupB) { 1270 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 1271 << '\n'); 1272 GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 1273 if (B->mayWriteToMemory()) 1274 StoreGroups.insert(GroupB); 1275 else 1276 LoadGroups.insert(GroupB); 1277 } 1278 } 1279 1280 for (auto AI = std::next(BI); AI != E; ++AI) { 1281 Instruction *A = AI->first; 1282 StrideDescriptor DesA = AI->second; 1283 1284 // Our code motion strategy implies that we can't have dependences 1285 // between accesses in an interleaved group and other accesses located 1286 // between the first and last member of the group. Note that this also 1287 // means that a group can't have more than one member at a given offset. 1288 // The accesses in a group can have dependences with other accesses, but 1289 // we must ensure we don't extend the boundaries of the group such that 1290 // we encompass those dependent accesses. 1291 // 1292 // For example, assume we have the sequence of accesses shown below in a 1293 // stride-2 loop: 1294 // 1295 // (1, 2) is a group | A[i] = a; // (1) 1296 // | A[i-1] = b; // (2) | 1297 // A[i-3] = c; // (3) 1298 // A[i] = d; // (4) | (2, 4) is not a group 1299 // 1300 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 1301 // but not with (4). If we did, the dependent access (3) would be within 1302 // the boundaries of the (2, 4) group. 1303 auto DependentMember = [&](InterleaveGroup<Instruction> *Group, 1304 StrideEntry *A) -> Instruction * { 1305 for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) { 1306 Instruction *MemberOfGroupB = Group->getMember(Index); 1307 if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups( 1308 A, &*AccessStrideInfo.find(MemberOfGroupB))) 1309 return MemberOfGroupB; 1310 } 1311 return nullptr; 1312 }; 1313 1314 auto GroupA = getInterleaveGroup(A); 1315 // If A is a load, dependencies are tolerable, there's nothing to do here. 1316 // If both A and B belong to the same (store) group, they are independent, 1317 // even if dependencies have not been recorded. 1318 // If both GroupA and GroupB are null, there's nothing to do here. 1319 if (A->mayWriteToMemory() && GroupA != GroupB) { 1320 Instruction *DependentInst = nullptr; 1321 // If GroupB is a load group, we have to compare AI against all 1322 // members of GroupB because if any load within GroupB has a dependency 1323 // on AI, we need to mark GroupB as complete and also release the 1324 // store GroupA (if A belongs to one). The former prevents incorrect 1325 // hoisting of load B above store A while the latter prevents incorrect 1326 // sinking of store A below load B. 1327 if (GroupB && LoadGroups.contains(GroupB)) 1328 DependentInst = DependentMember(GroupB, &*AI); 1329 else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) 1330 DependentInst = B; 1331 1332 if (DependentInst) { 1333 // A has a store dependence on B (or on some load within GroupB) and 1334 // is part of a store group. Release A's group to prevent illegal 1335 // sinking of A below B. A will then be free to form another group 1336 // with instructions that precede it. 1337 if (GroupA && StoreGroups.contains(GroupA)) { 1338 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 1339 "dependence between " 1340 << *A << " and " << *DependentInst << '\n'); 1341 StoreGroups.remove(GroupA); 1342 releaseGroup(GroupA); 1343 } 1344 // If B is a load and part of an interleave group, no earlier loads 1345 // can be added to B's interleave group, because this would mean the 1346 // DependentInst would move across store A. Mark the interleave group 1347 // as complete. 1348 if (GroupB && LoadGroups.contains(GroupB)) { 1349 LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B 1350 << " as complete.\n"); 1351 CompletedLoadGroups.insert(GroupB); 1352 } 1353 } 1354 } 1355 if (CompletedLoadGroups.contains(GroupB)) { 1356 // Skip trying to add A to B, continue to look for other conflicting A's 1357 // in groups to be released. 1358 continue; 1359 } 1360 1361 // At this point, we've checked for illegal code motion. If either A or B 1362 // isn't strided, there's nothing left to do. 1363 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 1364 continue; 1365 1366 // Ignore A if it's already in a group or isn't the same kind of memory 1367 // operation as B. 1368 // Note that mayReadFromMemory() isn't mutually exclusive to 1369 // mayWriteToMemory in the case of atomic loads. We shouldn't see those 1370 // here, canVectorizeMemory() should have returned false - except for the 1371 // case we asked for optimization remarks. 1372 if (isInterleaved(A) || 1373 (A->mayReadFromMemory() != B->mayReadFromMemory()) || 1374 (A->mayWriteToMemory() != B->mayWriteToMemory())) 1375 continue; 1376 1377 // Check rules 1 and 2. Ignore A if its stride or size is different from 1378 // that of B. 1379 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 1380 continue; 1381 1382 // Ignore A if the memory object of A and B don't belong to the same 1383 // address space 1384 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 1385 continue; 1386 1387 // Calculate the distance from A to B. 1388 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 1389 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 1390 if (!DistToB) 1391 continue; 1392 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 1393 1394 // Check rule 3. Ignore A if its distance to B is not a multiple of the 1395 // size. 1396 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 1397 continue; 1398 1399 // All members of a predicated interleave-group must have the same predicate, 1400 // and currently must reside in the same BB. 1401 BasicBlock *BlockA = A->getParent(); 1402 BasicBlock *BlockB = B->getParent(); 1403 if ((isPredicated(BlockA) || isPredicated(BlockB)) && 1404 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 1405 continue; 1406 1407 // The index of A is the index of B plus A's distance to B in multiples 1408 // of the size. 1409 int IndexA = 1410 GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 1411 1412 // Try to insert A into B's group. 1413 if (GroupB->insertMember(A, IndexA, DesA.Alignment)) { 1414 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 1415 << " into the interleave group with" << *B 1416 << '\n'); 1417 InterleaveGroupMap[A] = GroupB; 1418 1419 // Set the first load in program order as the insert position. 1420 if (A->mayReadFromMemory()) 1421 GroupB->setInsertPos(A); 1422 } 1423 } // Iteration over A accesses. 1424 } // Iteration over B accesses. 1425 1426 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group, 1427 int Index, 1428 const char *FirstOrLast) -> bool { 1429 Instruction *Member = Group->getMember(Index); 1430 assert(Member && "Group member does not exist"); 1431 Value *MemberPtr = getLoadStorePointerOperand(Member); 1432 Type *AccessTy = getLoadStoreType(Member); 1433 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides, 1434 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0)) 1435 return false; 1436 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 1437 << FirstOrLast 1438 << " group member potentially pointer-wrapping.\n"); 1439 releaseGroup(Group); 1440 return true; 1441 }; 1442 1443 // Remove interleaved groups with gaps whose memory 1444 // accesses may wrap around. We have to revisit the getPtrStride analysis, 1445 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 1446 // not check wrapping (see documentation there). 1447 // FORNOW we use Assume=false; 1448 // TODO: Change to Assume=true but making sure we don't exceed the threshold 1449 // of runtime SCEV assumptions checks (thereby potentially failing to 1450 // vectorize altogether). 1451 // Additional optional optimizations: 1452 // TODO: If we are peeling the loop and we know that the first pointer doesn't 1453 // wrap then we can deduce that all pointers in the group don't wrap. 1454 // This means that we can forcefully peel the loop in order to only have to 1455 // check the first pointer for no-wrap. When we'll change to use Assume=true 1456 // we'll only need at most one runtime check per interleaved group. 1457 for (auto *Group : LoadGroups) { 1458 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1459 // load would wrap around the address space we would do a memory access at 1460 // nullptr even without the transformation. 1461 if (Group->getNumMembers() == Group->getFactor()) 1462 continue; 1463 1464 // Case 2: If first and last members of the group don't wrap this implies 1465 // that all the pointers in the group don't wrap. 1466 // So we check only group member 0 (which is always guaranteed to exist), 1467 // and group member Factor - 1; If the latter doesn't exist we rely on 1468 // peeling (if it is a non-reversed access -- see Case 3). 1469 if (InvalidateGroupIfMemberMayWrap(Group, 0, "first")) 1470 continue; 1471 if (Group->getMember(Group->getFactor() - 1)) 1472 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, "last"); 1473 else { 1474 // Case 3: A non-reversed interleaved load group with gaps: We need 1475 // to execute at least one scalar epilogue iteration. This will ensure 1476 // we don't speculatively access memory out-of-bounds. We only need 1477 // to look for a member at index factor - 1, since every group must have 1478 // a member at index zero. 1479 if (Group->isReverse()) { 1480 LLVM_DEBUG( 1481 dbgs() << "LV: Invalidate candidate interleaved group due to " 1482 "a reverse access with gaps.\n"); 1483 releaseGroup(Group); 1484 continue; 1485 } 1486 LLVM_DEBUG( 1487 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 1488 RequiresScalarEpilogue = true; 1489 } 1490 } 1491 1492 for (auto *Group : StoreGroups) { 1493 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1494 // store would wrap around the address space we would do a memory access at 1495 // nullptr even without the transformation. 1496 if (Group->getNumMembers() == Group->getFactor()) 1497 continue; 1498 1499 // Interleave-store-group with gaps is implemented using masked wide store. 1500 // Remove interleaved store groups with gaps if 1501 // masked-interleaved-accesses are not enabled by the target. 1502 if (!EnablePredicatedInterleavedMemAccesses) { 1503 LLVM_DEBUG( 1504 dbgs() << "LV: Invalidate candidate interleaved store group due " 1505 "to gaps.\n"); 1506 releaseGroup(Group); 1507 continue; 1508 } 1509 1510 // Case 2: If first and last members of the group don't wrap this implies 1511 // that all the pointers in the group don't wrap. 1512 // So we check only group member 0 (which is always guaranteed to exist), 1513 // and the last group member. Case 3 (scalar epilog) is not relevant for 1514 // stores with gaps, which are implemented with masked-store (rather than 1515 // speculative access, as in loads). 1516 if (InvalidateGroupIfMemberMayWrap(Group, 0, "first")) 1517 continue; 1518 for (int Index = Group->getFactor() - 1; Index > 0; Index--) 1519 if (Group->getMember(Index)) { 1520 InvalidateGroupIfMemberMayWrap(Group, Index, "last"); 1521 break; 1522 } 1523 } 1524 } 1525 1526 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 1527 // If no group had triggered the requirement to create an epilogue loop, 1528 // there is nothing to do. 1529 if (!requiresScalarEpilogue()) 1530 return; 1531 1532 // Release groups requiring scalar epilogues. Note that this also removes them 1533 // from InterleaveGroups. 1534 bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) { 1535 if (!Group->requiresScalarEpilogue()) 1536 return false; 1537 LLVM_DEBUG( 1538 dbgs() 1539 << "LV: Invalidate candidate interleaved group due to gaps that " 1540 "require a scalar epilogue (not allowed under optsize) and cannot " 1541 "be masked (not enabled). \n"); 1542 releaseGroupWithoutRemovingFromSet(Group); 1543 return true; 1544 }); 1545 assert(ReleasedGroup && "At least one group must be invalidated, as a " 1546 "scalar epilogue was required"); 1547 (void)ReleasedGroup; 1548 RequiresScalarEpilogue = false; 1549 } 1550 1551 template <typename InstT> 1552 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 1553 llvm_unreachable("addMetadata can only be used for Instruction"); 1554 } 1555 1556 namespace llvm { 1557 template <> 1558 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 1559 SmallVector<Value *, 4> VL; 1560 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 1561 [](std::pair<int, Instruction *> p) { return p.second; }); 1562 propagateMetadata(NewInst, VL); 1563 } 1564 } // namespace llvm 1565