1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass optimizes scalar/vector interactions using target cost models. The 10 // transforms implemented here may not fit in traditional loop-based or SLP 11 // vectorization passes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Vectorize/VectorCombine.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/AssumptionCache.h" 18 #include "llvm/Analysis/BasicAliasAnalysis.h" 19 #include "llvm/Analysis/GlobalsModRef.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/Analysis/VectorUtils.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/InitializePasses.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Transforms/Utils/Local.h" 32 #include "llvm/Transforms/Vectorize.h" 33 34 #define DEBUG_TYPE "vector-combine" 35 #include "llvm/Transforms/Utils/InstructionWorklist.h" 36 37 using namespace llvm; 38 using namespace llvm::PatternMatch; 39 40 STATISTIC(NumVecLoad, "Number of vector loads formed"); 41 STATISTIC(NumVecCmp, "Number of vector compares formed"); 42 STATISTIC(NumVecBO, "Number of vector binops formed"); 43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed"); 44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast"); 45 STATISTIC(NumScalarBO, "Number of scalar binops formed"); 46 STATISTIC(NumScalarCmp, "Number of scalar compares formed"); 47 48 static cl::opt<bool> DisableVectorCombine( 49 "disable-vector-combine", cl::init(false), cl::Hidden, 50 cl::desc("Disable all vector combine transforms")); 51 52 static cl::opt<bool> DisableBinopExtractShuffle( 53 "disable-binop-extract-shuffle", cl::init(false), cl::Hidden, 54 cl::desc("Disable binop extract to shuffle transforms")); 55 56 static cl::opt<unsigned> MaxInstrsToScan( 57 "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden, 58 cl::desc("Max number of instructions to scan for vector combining.")); 59 60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max(); 61 62 namespace { 63 class VectorCombine { 64 public: 65 VectorCombine(Function &F, const TargetTransformInfo &TTI, 66 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC, 67 bool ScalarizationOnly) 68 : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC), 69 ScalarizationOnly(ScalarizationOnly) {} 70 71 bool run(); 72 73 private: 74 Function &F; 75 IRBuilder<> Builder; 76 const TargetTransformInfo &TTI; 77 const DominatorTree &DT; 78 AAResults &AA; 79 AssumptionCache &AC; 80 81 /// If true only perform scalarization combines and do not introduce new 82 /// vector operations. 83 bool ScalarizationOnly; 84 85 InstructionWorklist Worklist; 86 87 bool vectorizeLoadInsert(Instruction &I); 88 ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0, 89 ExtractElementInst *Ext1, 90 unsigned PreferredExtractIndex) const; 91 bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 92 const Instruction &I, 93 ExtractElementInst *&ConvertToShuffle, 94 unsigned PreferredExtractIndex); 95 void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 96 Instruction &I); 97 void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 98 Instruction &I); 99 bool foldExtractExtract(Instruction &I); 100 bool foldBitcastShuf(Instruction &I); 101 bool scalarizeBinopOrCmp(Instruction &I); 102 bool foldExtractedCmps(Instruction &I); 103 bool foldSingleElementStore(Instruction &I); 104 bool scalarizeLoadExtract(Instruction &I); 105 bool foldShuffleOfBinops(Instruction &I); 106 107 void replaceValue(Value &Old, Value &New) { 108 Old.replaceAllUsesWith(&New); 109 New.takeName(&Old); 110 if (auto *NewI = dyn_cast<Instruction>(&New)) { 111 Worklist.pushUsersToWorkList(*NewI); 112 Worklist.pushValue(NewI); 113 } 114 Worklist.pushValue(&Old); 115 } 116 117 void eraseInstruction(Instruction &I) { 118 for (Value *Op : I.operands()) 119 Worklist.pushValue(Op); 120 Worklist.remove(&I); 121 I.eraseFromParent(); 122 } 123 }; 124 } // namespace 125 126 bool VectorCombine::vectorizeLoadInsert(Instruction &I) { 127 // Match insert into fixed vector of scalar value. 128 // TODO: Handle non-zero insert index. 129 auto *Ty = dyn_cast<FixedVectorType>(I.getType()); 130 Value *Scalar; 131 if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) || 132 !Scalar->hasOneUse()) 133 return false; 134 135 // Optionally match an extract from another vector. 136 Value *X; 137 bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt())); 138 if (!HasExtract) 139 X = Scalar; 140 141 // Match source value as load of scalar or vector. 142 // Do not vectorize scalar load (widening) if atomic/volatile or under 143 // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions 144 // or create data races non-existent in the source. 145 auto *Load = dyn_cast<LoadInst>(X); 146 if (!Load || !Load->isSimple() || !Load->hasOneUse() || 147 Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) || 148 mustSuppressSpeculation(*Load)) 149 return false; 150 151 const DataLayout &DL = I.getModule()->getDataLayout(); 152 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts(); 153 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type"); 154 155 unsigned AS = Load->getPointerAddressSpace(); 156 157 // We are potentially transforming byte-sized (8-bit) memory accesses, so make 158 // sure we have all of our type-based constraints in place for this target. 159 Type *ScalarTy = Scalar->getType(); 160 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits(); 161 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth(); 162 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 || 163 ScalarSize % 8 != 0) 164 return false; 165 166 // Check safety of replacing the scalar load with a larger vector load. 167 // We use minimal alignment (maximum flexibility) because we only care about 168 // the dereferenceable region. When calculating cost and creating a new op, 169 // we may use a larger value based on alignment attributes. 170 unsigned MinVecNumElts = MinVectorSize / ScalarSize; 171 auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false); 172 unsigned OffsetEltIndex = 0; 173 Align Alignment = Load->getAlign(); 174 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) { 175 // It is not safe to load directly from the pointer, but we can still peek 176 // through gep offsets and check if it safe to load from a base address with 177 // updated alignment. If it is, we can shuffle the element(s) into place 178 // after loading. 179 unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType()); 180 APInt Offset(OffsetBitWidth, 0); 181 SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 182 183 // We want to shuffle the result down from a high element of a vector, so 184 // the offset must be positive. 185 if (Offset.isNegative()) 186 return false; 187 188 // The offset must be a multiple of the scalar element to shuffle cleanly 189 // in the element's size. 190 uint64_t ScalarSizeInBytes = ScalarSize / 8; 191 if (Offset.urem(ScalarSizeInBytes) != 0) 192 return false; 193 194 // If we load MinVecNumElts, will our target element still be loaded? 195 OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue(); 196 if (OffsetEltIndex >= MinVecNumElts) 197 return false; 198 199 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) 200 return false; 201 202 // Update alignment with offset value. Note that the offset could be negated 203 // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but 204 // negation does not change the result of the alignment calculation. 205 Alignment = commonAlignment(Alignment, Offset.getZExtValue()); 206 } 207 208 // Original pattern: insertelt undef, load [free casts of] PtrOp, 0 209 // Use the greater of the alignment on the load or its source pointer. 210 Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment); 211 Type *LoadTy = Load->getType(); 212 InstructionCost OldCost = 213 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS); 214 APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0); 215 OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts, 216 /* Insert */ true, HasExtract); 217 218 // New pattern: load VecPtr 219 InstructionCost NewCost = 220 TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS); 221 // Optionally, we are shuffling the loaded vector element(s) into place. 222 // For the mask set everything but element 0 to undef to prevent poison from 223 // propagating from the extra loaded memory. This will also optionally 224 // shrink/grow the vector from the loaded size to the output size. 225 // We assume this operation has no cost in codegen if there was no offset. 226 // Note that we could use freeze to avoid poison problems, but then we might 227 // still need a shuffle to change the vector size. 228 unsigned OutputNumElts = Ty->getNumElements(); 229 SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem); 230 assert(OffsetEltIndex < MinVecNumElts && "Address offset too big"); 231 Mask[0] = OffsetEltIndex; 232 if (OffsetEltIndex) 233 NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask); 234 235 // We can aggressively convert to the vector form because the backend can 236 // invert this transform if it does not result in a performance win. 237 if (OldCost < NewCost || !NewCost.isValid()) 238 return false; 239 240 // It is safe and potentially profitable to load a vector directly: 241 // inselt undef, load Scalar, 0 --> load VecPtr 242 IRBuilder<> Builder(Load); 243 Value *CastedPtr = Builder.CreatePointerBitCastOrAddrSpaceCast( 244 SrcPtr, MinVecTy->getPointerTo(AS)); 245 Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment); 246 VecLd = Builder.CreateShuffleVector(VecLd, Mask); 247 248 replaceValue(I, *VecLd); 249 ++NumVecLoad; 250 return true; 251 } 252 253 /// Determine which, if any, of the inputs should be replaced by a shuffle 254 /// followed by extract from a different index. 255 ExtractElementInst *VectorCombine::getShuffleExtract( 256 ExtractElementInst *Ext0, ExtractElementInst *Ext1, 257 unsigned PreferredExtractIndex = InvalidIndex) const { 258 assert(isa<ConstantInt>(Ext0->getIndexOperand()) && 259 isa<ConstantInt>(Ext1->getIndexOperand()) && 260 "Expected constant extract indexes"); 261 262 unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue(); 263 unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue(); 264 265 // If the extract indexes are identical, no shuffle is needed. 266 if (Index0 == Index1) 267 return nullptr; 268 269 Type *VecTy = Ext0->getVectorOperand()->getType(); 270 assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types"); 271 InstructionCost Cost0 = 272 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 273 InstructionCost Cost1 = 274 TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 275 276 // If both costs are invalid no shuffle is needed 277 if (!Cost0.isValid() && !Cost1.isValid()) 278 return nullptr; 279 280 // We are extracting from 2 different indexes, so one operand must be shuffled 281 // before performing a vector operation and/or extract. The more expensive 282 // extract will be replaced by a shuffle. 283 if (Cost0 > Cost1) 284 return Ext0; 285 if (Cost1 > Cost0) 286 return Ext1; 287 288 // If the costs are equal and there is a preferred extract index, shuffle the 289 // opposite operand. 290 if (PreferredExtractIndex == Index0) 291 return Ext1; 292 if (PreferredExtractIndex == Index1) 293 return Ext0; 294 295 // Otherwise, replace the extract with the higher index. 296 return Index0 > Index1 ? Ext0 : Ext1; 297 } 298 299 /// Compare the relative costs of 2 extracts followed by scalar operation vs. 300 /// vector operation(s) followed by extract. Return true if the existing 301 /// instructions are cheaper than a vector alternative. Otherwise, return false 302 /// and if one of the extracts should be transformed to a shufflevector, set 303 /// \p ConvertToShuffle to that extract instruction. 304 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0, 305 ExtractElementInst *Ext1, 306 const Instruction &I, 307 ExtractElementInst *&ConvertToShuffle, 308 unsigned PreferredExtractIndex) { 309 assert(isa<ConstantInt>(Ext0->getOperand(1)) && 310 isa<ConstantInt>(Ext1->getOperand(1)) && 311 "Expected constant extract indexes"); 312 unsigned Opcode = I.getOpcode(); 313 Type *ScalarTy = Ext0->getType(); 314 auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType()); 315 InstructionCost ScalarOpCost, VectorOpCost; 316 317 // Get cost estimates for scalar and vector versions of the operation. 318 bool IsBinOp = Instruction::isBinaryOp(Opcode); 319 if (IsBinOp) { 320 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 321 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 322 } else { 323 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 324 "Expected a compare"); 325 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 326 ScalarOpCost = TTI.getCmpSelInstrCost( 327 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 328 VectorOpCost = TTI.getCmpSelInstrCost( 329 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 330 } 331 332 // Get cost estimates for the extract elements. These costs will factor into 333 // both sequences. 334 unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue(); 335 unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue(); 336 337 InstructionCost Extract0Cost = 338 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index); 339 InstructionCost Extract1Cost = 340 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index); 341 342 // A more expensive extract will always be replaced by a splat shuffle. 343 // For example, if Ext0 is more expensive: 344 // opcode (extelt V0, Ext0), (ext V1, Ext1) --> 345 // extelt (opcode (splat V0, Ext0), V1), Ext1 346 // TODO: Evaluate whether that always results in lowest cost. Alternatively, 347 // check the cost of creating a broadcast shuffle and shuffling both 348 // operands to element 0. 349 InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost); 350 351 // Extra uses of the extracts mean that we include those costs in the 352 // vector total because those instructions will not be eliminated. 353 InstructionCost OldCost, NewCost; 354 if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) { 355 // Handle a special case. If the 2 extracts are identical, adjust the 356 // formulas to account for that. The extra use charge allows for either the 357 // CSE'd pattern or an unoptimized form with identical values: 358 // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C 359 bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2) 360 : !Ext0->hasOneUse() || !Ext1->hasOneUse(); 361 OldCost = CheapExtractCost + ScalarOpCost; 362 NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost; 363 } else { 364 // Handle the general case. Each extract is actually a different value: 365 // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C 366 OldCost = Extract0Cost + Extract1Cost + ScalarOpCost; 367 NewCost = VectorOpCost + CheapExtractCost + 368 !Ext0->hasOneUse() * Extract0Cost + 369 !Ext1->hasOneUse() * Extract1Cost; 370 } 371 372 ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex); 373 if (ConvertToShuffle) { 374 if (IsBinOp && DisableBinopExtractShuffle) 375 return true; 376 377 // If we are extracting from 2 different indexes, then one operand must be 378 // shuffled before performing the vector operation. The shuffle mask is 379 // undefined except for 1 lane that is being translated to the remaining 380 // extraction lane. Therefore, it is a splat shuffle. Ex: 381 // ShufMask = { undef, undef, 0, undef } 382 // TODO: The cost model has an option for a "broadcast" shuffle 383 // (splat-from-element-0), but no option for a more general splat. 384 NewCost += 385 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 386 } 387 388 // Aggressively form a vector op if the cost is equal because the transform 389 // may enable further optimization. 390 // Codegen can reverse this transform (scalarize) if it was not profitable. 391 return OldCost < NewCost; 392 } 393 394 /// Create a shuffle that translates (shifts) 1 element from the input vector 395 /// to a new element location. 396 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex, 397 unsigned NewIndex, IRBuilder<> &Builder) { 398 // The shuffle mask is undefined except for 1 lane that is being translated 399 // to the new element index. Example for OldIndex == 2 and NewIndex == 0: 400 // ShufMask = { 2, undef, undef, undef } 401 auto *VecTy = cast<FixedVectorType>(Vec->getType()); 402 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 403 ShufMask[NewIndex] = OldIndex; 404 return Builder.CreateShuffleVector(Vec, ShufMask, "shift"); 405 } 406 407 /// Given an extract element instruction with constant index operand, shuffle 408 /// the source vector (shift the scalar element) to a NewIndex for extraction. 409 /// Return null if the input can be constant folded, so that we are not creating 410 /// unnecessary instructions. 411 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt, 412 unsigned NewIndex, 413 IRBuilder<> &Builder) { 414 // If the extract can be constant-folded, this code is unsimplified. Defer 415 // to other passes to handle that. 416 Value *X = ExtElt->getVectorOperand(); 417 Value *C = ExtElt->getIndexOperand(); 418 assert(isa<ConstantInt>(C) && "Expected a constant index operand"); 419 if (isa<Constant>(X)) 420 return nullptr; 421 422 Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(), 423 NewIndex, Builder); 424 return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex)); 425 } 426 427 /// Try to reduce extract element costs by converting scalar compares to vector 428 /// compares followed by extract. 429 /// cmp (ext0 V0, C), (ext1 V1, C) 430 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0, 431 ExtractElementInst *Ext1, Instruction &I) { 432 assert(isa<CmpInst>(&I) && "Expected a compare"); 433 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 434 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 435 "Expected matching constant extract indexes"); 436 437 // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C 438 ++NumVecCmp; 439 CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate(); 440 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 441 Value *VecCmp = Builder.CreateCmp(Pred, V0, V1); 442 Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand()); 443 replaceValue(I, *NewExt); 444 } 445 446 /// Try to reduce extract element costs by converting scalar binops to vector 447 /// binops followed by extract. 448 /// bo (ext0 V0, C), (ext1 V1, C) 449 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0, 450 ExtractElementInst *Ext1, Instruction &I) { 451 assert(isa<BinaryOperator>(&I) && "Expected a binary operator"); 452 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 453 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 454 "Expected matching constant extract indexes"); 455 456 // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C 457 ++NumVecBO; 458 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 459 Value *VecBO = 460 Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1); 461 462 // All IR flags are safe to back-propagate because any potential poison 463 // created in unused vector elements is discarded by the extract. 464 if (auto *VecBOInst = dyn_cast<Instruction>(VecBO)) 465 VecBOInst->copyIRFlags(&I); 466 467 Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand()); 468 replaceValue(I, *NewExt); 469 } 470 471 /// Match an instruction with extracted vector operands. 472 bool VectorCombine::foldExtractExtract(Instruction &I) { 473 // It is not safe to transform things like div, urem, etc. because we may 474 // create undefined behavior when executing those on unknown vector elements. 475 if (!isSafeToSpeculativelyExecute(&I)) 476 return false; 477 478 Instruction *I0, *I1; 479 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 480 if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) && 481 !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1)))) 482 return false; 483 484 Value *V0, *V1; 485 uint64_t C0, C1; 486 if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) || 487 !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) || 488 V0->getType() != V1->getType()) 489 return false; 490 491 // If the scalar value 'I' is going to be re-inserted into a vector, then try 492 // to create an extract to that same element. The extract/insert can be 493 // reduced to a "select shuffle". 494 // TODO: If we add a larger pattern match that starts from an insert, this 495 // probably becomes unnecessary. 496 auto *Ext0 = cast<ExtractElementInst>(I0); 497 auto *Ext1 = cast<ExtractElementInst>(I1); 498 uint64_t InsertIndex = InvalidIndex; 499 if (I.hasOneUse()) 500 match(I.user_back(), 501 m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex))); 502 503 ExtractElementInst *ExtractToChange; 504 if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex)) 505 return false; 506 507 if (ExtractToChange) { 508 unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0; 509 ExtractElementInst *NewExtract = 510 translateExtract(ExtractToChange, CheapExtractIdx, Builder); 511 if (!NewExtract) 512 return false; 513 if (ExtractToChange == Ext0) 514 Ext0 = NewExtract; 515 else 516 Ext1 = NewExtract; 517 } 518 519 if (Pred != CmpInst::BAD_ICMP_PREDICATE) 520 foldExtExtCmp(Ext0, Ext1, I); 521 else 522 foldExtExtBinop(Ext0, Ext1, I); 523 524 Worklist.push(Ext0); 525 Worklist.push(Ext1); 526 return true; 527 } 528 529 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the 530 /// destination type followed by shuffle. This can enable further transforms by 531 /// moving bitcasts or shuffles together. 532 bool VectorCombine::foldBitcastShuf(Instruction &I) { 533 Value *V; 534 ArrayRef<int> Mask; 535 if (!match(&I, m_BitCast( 536 m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask)))))) 537 return false; 538 539 // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for 540 // scalable type is unknown; Second, we cannot reason if the narrowed shuffle 541 // mask for scalable type is a splat or not. 542 // 2) Disallow non-vector casts and length-changing shuffles. 543 // TODO: We could allow any shuffle. 544 auto *DestTy = dyn_cast<FixedVectorType>(I.getType()); 545 auto *SrcTy = dyn_cast<FixedVectorType>(V->getType()); 546 if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy) 547 return false; 548 549 unsigned DestNumElts = DestTy->getNumElements(); 550 unsigned SrcNumElts = SrcTy->getNumElements(); 551 SmallVector<int, 16> NewMask; 552 if (SrcNumElts <= DestNumElts) { 553 // The bitcast is from wide to narrow/equal elements. The shuffle mask can 554 // always be expanded to the equivalent form choosing narrower elements. 555 assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask"); 556 unsigned ScaleFactor = DestNumElts / SrcNumElts; 557 narrowShuffleMaskElts(ScaleFactor, Mask, NewMask); 558 } else { 559 // The bitcast is from narrow elements to wide elements. The shuffle mask 560 // must choose consecutive elements to allow casting first. 561 assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask"); 562 unsigned ScaleFactor = SrcNumElts / DestNumElts; 563 if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask)) 564 return false; 565 } 566 567 // The new shuffle must not cost more than the old shuffle. The bitcast is 568 // moved ahead of the shuffle, so assume that it has the same cost as before. 569 InstructionCost DestCost = TTI.getShuffleCost( 570 TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask); 571 InstructionCost SrcCost = 572 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask); 573 if (DestCost > SrcCost || !DestCost.isValid()) 574 return false; 575 576 // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC' 577 ++NumShufOfBitcast; 578 Value *CastV = Builder.CreateBitCast(V, DestTy); 579 Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask); 580 replaceValue(I, *Shuf); 581 return true; 582 } 583 584 /// Match a vector binop or compare instruction with at least one inserted 585 /// scalar operand and convert to scalar binop/cmp followed by insertelement. 586 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) { 587 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 588 Value *Ins0, *Ins1; 589 if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) && 590 !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1)))) 591 return false; 592 593 // Do not convert the vector condition of a vector select into a scalar 594 // condition. That may cause problems for codegen because of differences in 595 // boolean formats and register-file transfers. 596 // TODO: Can we account for that in the cost model? 597 bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE; 598 if (IsCmp) 599 for (User *U : I.users()) 600 if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value()))) 601 return false; 602 603 // Match against one or both scalar values being inserted into constant 604 // vectors: 605 // vec_op VecC0, (inselt VecC1, V1, Index) 606 // vec_op (inselt VecC0, V0, Index), VecC1 607 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) 608 // TODO: Deal with mismatched index constants and variable indexes? 609 Constant *VecC0 = nullptr, *VecC1 = nullptr; 610 Value *V0 = nullptr, *V1 = nullptr; 611 uint64_t Index0 = 0, Index1 = 0; 612 if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0), 613 m_ConstantInt(Index0))) && 614 !match(Ins0, m_Constant(VecC0))) 615 return false; 616 if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1), 617 m_ConstantInt(Index1))) && 618 !match(Ins1, m_Constant(VecC1))) 619 return false; 620 621 bool IsConst0 = !V0; 622 bool IsConst1 = !V1; 623 if (IsConst0 && IsConst1) 624 return false; 625 if (!IsConst0 && !IsConst1 && Index0 != Index1) 626 return false; 627 628 // Bail for single insertion if it is a load. 629 // TODO: Handle this once getVectorInstrCost can cost for load/stores. 630 auto *I0 = dyn_cast_or_null<Instruction>(V0); 631 auto *I1 = dyn_cast_or_null<Instruction>(V1); 632 if ((IsConst0 && I1 && I1->mayReadFromMemory()) || 633 (IsConst1 && I0 && I0->mayReadFromMemory())) 634 return false; 635 636 uint64_t Index = IsConst0 ? Index1 : Index0; 637 Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType(); 638 Type *VecTy = I.getType(); 639 assert(VecTy->isVectorTy() && 640 (IsConst0 || IsConst1 || V0->getType() == V1->getType()) && 641 (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() || 642 ScalarTy->isPointerTy()) && 643 "Unexpected types for insert element into binop or cmp"); 644 645 unsigned Opcode = I.getOpcode(); 646 InstructionCost ScalarOpCost, VectorOpCost; 647 if (IsCmp) { 648 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 649 ScalarOpCost = TTI.getCmpSelInstrCost( 650 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 651 VectorOpCost = TTI.getCmpSelInstrCost( 652 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 653 } else { 654 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 655 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 656 } 657 658 // Get cost estimate for the insert element. This cost will factor into 659 // both sequences. 660 InstructionCost InsertCost = 661 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index); 662 InstructionCost OldCost = 663 (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost; 664 InstructionCost NewCost = ScalarOpCost + InsertCost + 665 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) + 666 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost); 667 668 // We want to scalarize unless the vector variant actually has lower cost. 669 if (OldCost < NewCost || !NewCost.isValid()) 670 return false; 671 672 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) --> 673 // inselt NewVecC, (scalar_op V0, V1), Index 674 if (IsCmp) 675 ++NumScalarCmp; 676 else 677 ++NumScalarBO; 678 679 // For constant cases, extract the scalar element, this should constant fold. 680 if (IsConst0) 681 V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index)); 682 if (IsConst1) 683 V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index)); 684 685 Value *Scalar = 686 IsCmp ? Builder.CreateCmp(Pred, V0, V1) 687 : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1); 688 689 Scalar->setName(I.getName() + ".scalar"); 690 691 // All IR flags are safe to back-propagate. There is no potential for extra 692 // poison to be created by the scalar instruction. 693 if (auto *ScalarInst = dyn_cast<Instruction>(Scalar)) 694 ScalarInst->copyIRFlags(&I); 695 696 // Fold the vector constants in the original vectors into a new base vector. 697 Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1) 698 : ConstantExpr::get(Opcode, VecC0, VecC1); 699 Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index); 700 replaceValue(I, *Insert); 701 return true; 702 } 703 704 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of 705 /// a vector into vector operations followed by extract. Note: The SLP pass 706 /// may miss this pattern because of implementation problems. 707 bool VectorCombine::foldExtractedCmps(Instruction &I) { 708 // We are looking for a scalar binop of booleans. 709 // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1) 710 if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1)) 711 return false; 712 713 // The compare predicates should match, and each compare should have a 714 // constant operand. 715 // TODO: Relax the one-use constraints. 716 Value *B0 = I.getOperand(0), *B1 = I.getOperand(1); 717 Instruction *I0, *I1; 718 Constant *C0, *C1; 719 CmpInst::Predicate P0, P1; 720 if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) || 721 !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) || 722 P0 != P1) 723 return false; 724 725 // The compare operands must be extracts of the same vector with constant 726 // extract indexes. 727 // TODO: Relax the one-use constraints. 728 Value *X; 729 uint64_t Index0, Index1; 730 if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) || 731 !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1))))) 732 return false; 733 734 auto *Ext0 = cast<ExtractElementInst>(I0); 735 auto *Ext1 = cast<ExtractElementInst>(I1); 736 ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1); 737 if (!ConvertToShuf) 738 return false; 739 740 // The original scalar pattern is: 741 // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1) 742 CmpInst::Predicate Pred = P0; 743 unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp 744 : Instruction::ICmp; 745 auto *VecTy = dyn_cast<FixedVectorType>(X->getType()); 746 if (!VecTy) 747 return false; 748 749 InstructionCost OldCost = 750 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 751 OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 752 OldCost += 753 TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(), 754 CmpInst::makeCmpResultType(I0->getType()), Pred) * 755 2; 756 OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType()); 757 758 // The proposed vector pattern is: 759 // vcmp = cmp Pred X, VecC 760 // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0 761 int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0; 762 int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1; 763 auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType())); 764 InstructionCost NewCost = TTI.getCmpSelInstrCost( 765 CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred); 766 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 767 ShufMask[CheapIndex] = ExpensiveIndex; 768 NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy, 769 ShufMask); 770 NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy); 771 NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex); 772 773 // Aggressively form vector ops if the cost is equal because the transform 774 // may enable further optimization. 775 // Codegen can reverse this transform (scalarize) if it was not profitable. 776 if (OldCost < NewCost || !NewCost.isValid()) 777 return false; 778 779 // Create a vector constant from the 2 scalar constants. 780 SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(), 781 UndefValue::get(VecTy->getElementType())); 782 CmpC[Index0] = C0; 783 CmpC[Index1] = C1; 784 Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC)); 785 786 Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder); 787 Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 788 VCmp, Shuf); 789 Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex); 790 replaceValue(I, *NewExt); 791 ++NumVecCmpBO; 792 return true; 793 } 794 795 // Check if memory loc modified between two instrs in the same BB 796 static bool isMemModifiedBetween(BasicBlock::iterator Begin, 797 BasicBlock::iterator End, 798 const MemoryLocation &Loc, AAResults &AA) { 799 unsigned NumScanned = 0; 800 return std::any_of(Begin, End, [&](const Instruction &Instr) { 801 return isModSet(AA.getModRefInfo(&Instr, Loc)) || 802 ++NumScanned > MaxInstrsToScan; 803 }); 804 } 805 806 /// Helper class to indicate whether a vector index can be safely scalarized and 807 /// if a freeze needs to be inserted. 808 class ScalarizationResult { 809 enum class StatusTy { Unsafe, Safe, SafeWithFreeze }; 810 811 StatusTy Status; 812 Value *ToFreeze; 813 814 ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr) 815 : Status(Status), ToFreeze(ToFreeze) {} 816 817 public: 818 ScalarizationResult(const ScalarizationResult &Other) = default; 819 ~ScalarizationResult() { 820 assert(!ToFreeze && "freeze() not called with ToFreeze being set"); 821 } 822 823 static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; } 824 static ScalarizationResult safe() { return {StatusTy::Safe}; } 825 static ScalarizationResult safeWithFreeze(Value *ToFreeze) { 826 return {StatusTy::SafeWithFreeze, ToFreeze}; 827 } 828 829 /// Returns true if the index can be scalarize without requiring a freeze. 830 bool isSafe() const { return Status == StatusTy::Safe; } 831 /// Returns true if the index cannot be scalarized. 832 bool isUnsafe() const { return Status == StatusTy::Unsafe; } 833 /// Returns true if the index can be scalarize, but requires inserting a 834 /// freeze. 835 bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; } 836 837 /// Reset the state of Unsafe and clear ToFreze if set. 838 void discard() { 839 ToFreeze = nullptr; 840 Status = StatusTy::Unsafe; 841 } 842 843 /// Freeze the ToFreeze and update the use in \p User to use it. 844 void freeze(IRBuilder<> &Builder, Instruction &UserI) { 845 assert(isSafeWithFreeze() && 846 "should only be used when freezing is required"); 847 assert(is_contained(ToFreeze->users(), &UserI) && 848 "UserI must be a user of ToFreeze"); 849 IRBuilder<>::InsertPointGuard Guard(Builder); 850 Builder.SetInsertPoint(cast<Instruction>(&UserI)); 851 Value *Frozen = 852 Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen"); 853 for (Use &U : make_early_inc_range((UserI.operands()))) 854 if (U.get() == ToFreeze) 855 U.set(Frozen); 856 857 ToFreeze = nullptr; 858 } 859 }; 860 861 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p 862 /// Idx. \p Idx must access a valid vector element. 863 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy, 864 Value *Idx, Instruction *CtxI, 865 AssumptionCache &AC, 866 const DominatorTree &DT) { 867 if (auto *C = dyn_cast<ConstantInt>(Idx)) { 868 if (C->getValue().ult(VecTy->getNumElements())) 869 return ScalarizationResult::safe(); 870 return ScalarizationResult::unsafe(); 871 } 872 873 unsigned IntWidth = Idx->getType()->getScalarSizeInBits(); 874 APInt Zero(IntWidth, 0); 875 APInt MaxElts(IntWidth, VecTy->getNumElements()); 876 ConstantRange ValidIndices(Zero, MaxElts); 877 ConstantRange IdxRange(IntWidth, true); 878 879 if (isGuaranteedNotToBePoison(Idx, &AC)) { 880 if (ValidIndices.contains(computeConstantRange(Idx, /* ForSigned */ false, 881 true, &AC, CtxI, &DT))) 882 return ScalarizationResult::safe(); 883 return ScalarizationResult::unsafe(); 884 } 885 886 // If the index may be poison, check if we can insert a freeze before the 887 // range of the index is restricted. 888 Value *IdxBase; 889 ConstantInt *CI; 890 if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) { 891 IdxRange = IdxRange.binaryAnd(CI->getValue()); 892 } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) { 893 IdxRange = IdxRange.urem(CI->getValue()); 894 } 895 896 if (ValidIndices.contains(IdxRange)) 897 return ScalarizationResult::safeWithFreeze(IdxBase); 898 return ScalarizationResult::unsafe(); 899 } 900 901 /// The memory operation on a vector of \p ScalarType had alignment of 902 /// \p VectorAlignment. Compute the maximal, but conservatively correct, 903 /// alignment that will be valid for the memory operation on a single scalar 904 /// element of the same type with index \p Idx. 905 static Align computeAlignmentAfterScalarization(Align VectorAlignment, 906 Type *ScalarType, Value *Idx, 907 const DataLayout &DL) { 908 if (auto *C = dyn_cast<ConstantInt>(Idx)) 909 return commonAlignment(VectorAlignment, 910 C->getZExtValue() * DL.getTypeStoreSize(ScalarType)); 911 return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType)); 912 } 913 914 // Combine patterns like: 915 // %0 = load <4 x i32>, <4 x i32>* %a 916 // %1 = insertelement <4 x i32> %0, i32 %b, i32 1 917 // store <4 x i32> %1, <4 x i32>* %a 918 // to: 919 // %0 = bitcast <4 x i32>* %a to i32* 920 // %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1 921 // store i32 %b, i32* %1 922 bool VectorCombine::foldSingleElementStore(Instruction &I) { 923 StoreInst *SI = dyn_cast<StoreInst>(&I); 924 if (!SI || !SI->isSimple() || 925 !isa<FixedVectorType>(SI->getValueOperand()->getType())) 926 return false; 927 928 // TODO: Combine more complicated patterns (multiple insert) by referencing 929 // TargetTransformInfo. 930 Instruction *Source; 931 Value *NewElement; 932 Value *Idx; 933 if (!match(SI->getValueOperand(), 934 m_InsertElt(m_Instruction(Source), m_Value(NewElement), 935 m_Value(Idx)))) 936 return false; 937 938 if (auto *Load = dyn_cast<LoadInst>(Source)) { 939 auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType()); 940 const DataLayout &DL = I.getModule()->getDataLayout(); 941 Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts(); 942 // Don't optimize for atomic/volatile load or store. Ensure memory is not 943 // modified between, vector type matches store size, and index is inbounds. 944 if (!Load->isSimple() || Load->getParent() != SI->getParent() || 945 !DL.typeSizeEqualsStoreSize(Load->getType()) || 946 SrcAddr != SI->getPointerOperand()->stripPointerCasts()) 947 return false; 948 949 auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT); 950 if (ScalarizableIdx.isUnsafe() || 951 isMemModifiedBetween(Load->getIterator(), SI->getIterator(), 952 MemoryLocation::get(SI), AA)) 953 return false; 954 955 if (ScalarizableIdx.isSafeWithFreeze()) 956 ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx)); 957 Value *GEP = Builder.CreateInBoundsGEP( 958 SI->getValueOperand()->getType(), SI->getPointerOperand(), 959 {ConstantInt::get(Idx->getType(), 0), Idx}); 960 StoreInst *NSI = Builder.CreateStore(NewElement, GEP); 961 NSI->copyMetadata(*SI); 962 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 963 std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx, 964 DL); 965 NSI->setAlignment(ScalarOpAlignment); 966 replaceValue(I, *NSI); 967 eraseInstruction(I); 968 return true; 969 } 970 971 return false; 972 } 973 974 /// Try to scalarize vector loads feeding extractelement instructions. 975 bool VectorCombine::scalarizeLoadExtract(Instruction &I) { 976 Value *Ptr; 977 if (!match(&I, m_Load(m_Value(Ptr)))) 978 return false; 979 980 auto *LI = cast<LoadInst>(&I); 981 const DataLayout &DL = I.getModule()->getDataLayout(); 982 if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType())) 983 return false; 984 985 auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType()); 986 if (!FixedVT) 987 return false; 988 989 InstructionCost OriginalCost = 990 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), LI->getAlign(), 991 LI->getPointerAddressSpace()); 992 InstructionCost ScalarizedCost = 0; 993 994 Instruction *LastCheckedInst = LI; 995 unsigned NumInstChecked = 0; 996 // Check if all users of the load are extracts with no memory modifications 997 // between the load and the extract. Compute the cost of both the original 998 // code and the scalarized version. 999 for (User *U : LI->users()) { 1000 auto *UI = dyn_cast<ExtractElementInst>(U); 1001 if (!UI || UI->getParent() != LI->getParent()) 1002 return false; 1003 1004 if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT)) 1005 return false; 1006 1007 // Check if any instruction between the load and the extract may modify 1008 // memory. 1009 if (LastCheckedInst->comesBefore(UI)) { 1010 for (Instruction &I : 1011 make_range(std::next(LI->getIterator()), UI->getIterator())) { 1012 // Bail out if we reached the check limit or the instruction may write 1013 // to memory. 1014 if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory()) 1015 return false; 1016 NumInstChecked++; 1017 } 1018 } 1019 1020 if (!LastCheckedInst) 1021 LastCheckedInst = UI; 1022 else if (LastCheckedInst->comesBefore(UI)) 1023 LastCheckedInst = UI; 1024 1025 auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT); 1026 if (!ScalarIdx.isSafe()) { 1027 // TODO: Freeze index if it is safe to do so. 1028 ScalarIdx.discard(); 1029 return false; 1030 } 1031 1032 auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1)); 1033 OriginalCost += 1034 TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(), 1035 Index ? Index->getZExtValue() : -1); 1036 ScalarizedCost += 1037 TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(), 1038 Align(1), LI->getPointerAddressSpace()); 1039 ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType()); 1040 } 1041 1042 if (ScalarizedCost >= OriginalCost) 1043 return false; 1044 1045 // Replace extracts with narrow scalar loads. 1046 for (User *U : LI->users()) { 1047 auto *EI = cast<ExtractElementInst>(U); 1048 Builder.SetInsertPoint(EI); 1049 1050 Value *Idx = EI->getOperand(1); 1051 Value *GEP = 1052 Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx}); 1053 auto *NewLoad = cast<LoadInst>(Builder.CreateLoad( 1054 FixedVT->getElementType(), GEP, EI->getName() + ".scalar")); 1055 1056 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 1057 LI->getAlign(), FixedVT->getElementType(), Idx, DL); 1058 NewLoad->setAlignment(ScalarOpAlignment); 1059 1060 replaceValue(*EI, *NewLoad); 1061 } 1062 1063 return true; 1064 } 1065 1066 /// Try to convert "shuffle (binop), (binop)" with a shared binop operand into 1067 /// "binop (shuffle), (shuffle)". 1068 bool VectorCombine::foldShuffleOfBinops(Instruction &I) { 1069 auto *VecTy = dyn_cast<FixedVectorType>(I.getType()); 1070 if (!VecTy) 1071 return false; 1072 1073 BinaryOperator *B0, *B1; 1074 ArrayRef<int> Mask; 1075 if (!match(&I, m_Shuffle(m_OneUse(m_BinOp(B0)), m_OneUse(m_BinOp(B1)), 1076 m_Mask(Mask))) || 1077 B0->getOpcode() != B1->getOpcode() || B0->getType() != VecTy) 1078 return false; 1079 1080 // Try to replace a binop with a shuffle if the shuffle is not costly. 1081 // The new shuffle will choose from a single, common operand, so it may be 1082 // cheaper than the existing two-operand shuffle. 1083 SmallVector<int> UnaryMask = createUnaryMask(Mask, Mask.size()); 1084 Instruction::BinaryOps Opcode = B0->getOpcode(); 1085 InstructionCost BinopCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 1086 InstructionCost ShufCost = TTI.getShuffleCost( 1087 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, UnaryMask); 1088 if (ShufCost > BinopCost) 1089 return false; 1090 1091 // If we have something like "add X, Y" and "add Z, X", swap ops to match. 1092 Value *X = B0->getOperand(0), *Y = B0->getOperand(1); 1093 Value *Z = B1->getOperand(0), *W = B1->getOperand(1); 1094 if (BinaryOperator::isCommutative(Opcode) && X != Z && Y != W) 1095 std::swap(X, Y); 1096 1097 Value *Shuf0, *Shuf1; 1098 if (X == Z) { 1099 // shuf (bo X, Y), (bo X, W) --> bo (shuf X), (shuf Y, W) 1100 Shuf0 = Builder.CreateShuffleVector(X, UnaryMask); 1101 Shuf1 = Builder.CreateShuffleVector(Y, W, Mask); 1102 } else if (Y == W) { 1103 // shuf (bo X, Y), (bo Z, Y) --> bo (shuf X, Z), (shuf Y) 1104 Shuf0 = Builder.CreateShuffleVector(X, Z, Mask); 1105 Shuf1 = Builder.CreateShuffleVector(Y, UnaryMask); 1106 } else { 1107 return false; 1108 } 1109 1110 Value *NewBO = Builder.CreateBinOp(Opcode, Shuf0, Shuf1); 1111 // Intersect flags from the old binops. 1112 if (auto *NewInst = dyn_cast<Instruction>(NewBO)) { 1113 NewInst->copyIRFlags(B0); 1114 NewInst->andIRFlags(B1); 1115 } 1116 replaceValue(I, *NewBO); 1117 return true; 1118 } 1119 1120 /// This is the entry point for all transforms. Pass manager differences are 1121 /// handled in the callers of this function. 1122 bool VectorCombine::run() { 1123 if (DisableVectorCombine) 1124 return false; 1125 1126 // Don't attempt vectorization if the target does not support vectors. 1127 if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true))) 1128 return false; 1129 1130 bool MadeChange = false; 1131 auto FoldInst = [this, &MadeChange](Instruction &I) { 1132 Builder.SetInsertPoint(&I); 1133 if (!ScalarizationOnly) { 1134 MadeChange |= vectorizeLoadInsert(I); 1135 MadeChange |= foldExtractExtract(I); 1136 MadeChange |= foldBitcastShuf(I); 1137 MadeChange |= foldExtractedCmps(I); 1138 MadeChange |= foldShuffleOfBinops(I); 1139 } 1140 MadeChange |= scalarizeBinopOrCmp(I); 1141 MadeChange |= scalarizeLoadExtract(I); 1142 MadeChange |= foldSingleElementStore(I); 1143 }; 1144 for (BasicBlock &BB : F) { 1145 // Ignore unreachable basic blocks. 1146 if (!DT.isReachableFromEntry(&BB)) 1147 continue; 1148 // Use early increment range so that we can erase instructions in loop. 1149 for (Instruction &I : make_early_inc_range(BB)) { 1150 if (I.isDebugOrPseudoInst()) 1151 continue; 1152 FoldInst(I); 1153 } 1154 } 1155 1156 while (!Worklist.isEmpty()) { 1157 Instruction *I = Worklist.removeOne(); 1158 if (!I) 1159 continue; 1160 1161 if (isInstructionTriviallyDead(I)) { 1162 eraseInstruction(*I); 1163 continue; 1164 } 1165 1166 FoldInst(*I); 1167 } 1168 1169 return MadeChange; 1170 } 1171 1172 // Pass manager boilerplate below here. 1173 1174 namespace { 1175 class VectorCombineLegacyPass : public FunctionPass { 1176 public: 1177 static char ID; 1178 VectorCombineLegacyPass() : FunctionPass(ID) { 1179 initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry()); 1180 } 1181 1182 void getAnalysisUsage(AnalysisUsage &AU) const override { 1183 AU.addRequired<AssumptionCacheTracker>(); 1184 AU.addRequired<DominatorTreeWrapperPass>(); 1185 AU.addRequired<TargetTransformInfoWrapperPass>(); 1186 AU.addRequired<AAResultsWrapperPass>(); 1187 AU.setPreservesCFG(); 1188 AU.addPreserved<DominatorTreeWrapperPass>(); 1189 AU.addPreserved<GlobalsAAWrapperPass>(); 1190 AU.addPreserved<AAResultsWrapperPass>(); 1191 AU.addPreserved<BasicAAWrapperPass>(); 1192 FunctionPass::getAnalysisUsage(AU); 1193 } 1194 1195 bool runOnFunction(Function &F) override { 1196 if (skipFunction(F)) 1197 return false; 1198 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1199 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1200 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1201 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1202 VectorCombine Combiner(F, TTI, DT, AA, AC, false); 1203 return Combiner.run(); 1204 } 1205 }; 1206 } // namespace 1207 1208 char VectorCombineLegacyPass::ID = 0; 1209 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine", 1210 "Optimize scalar/vector ops", false, 1211 false) 1212 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1213 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1214 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine", 1215 "Optimize scalar/vector ops", false, false) 1216 Pass *llvm::createVectorCombinePass() { 1217 return new VectorCombineLegacyPass(); 1218 } 1219 1220 PreservedAnalyses VectorCombinePass::run(Function &F, 1221 FunctionAnalysisManager &FAM) { 1222 auto &AC = FAM.getResult<AssumptionAnalysis>(F); 1223 TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F); 1224 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F); 1225 AAResults &AA = FAM.getResult<AAManager>(F); 1226 VectorCombine Combiner(F, TTI, DT, AA, AC, ScalarizationOnly); 1227 if (!Combiner.run()) 1228 return PreservedAnalyses::all(); 1229 PreservedAnalyses PA; 1230 PA.preserveSet<CFGAnalyses>(); 1231 return PA; 1232 } 1233