1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass optimizes scalar/vector interactions using target cost models. The 10 // transforms implemented here may not fit in traditional loop-based or SLP 11 // vectorization passes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Vectorize/VectorCombine.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/AssumptionCache.h" 18 #include "llvm/Analysis/BasicAliasAnalysis.h" 19 #include "llvm/Analysis/GlobalsModRef.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/Analysis/VectorUtils.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/InitializePasses.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Transforms/Utils/Local.h" 32 #include "llvm/Transforms/Vectorize.h" 33 34 #define DEBUG_TYPE "vector-combine" 35 #include "llvm/Transforms/Utils/InstructionWorklist.h" 36 37 using namespace llvm; 38 using namespace llvm::PatternMatch; 39 40 STATISTIC(NumVecLoad, "Number of vector loads formed"); 41 STATISTIC(NumVecCmp, "Number of vector compares formed"); 42 STATISTIC(NumVecBO, "Number of vector binops formed"); 43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed"); 44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast"); 45 STATISTIC(NumScalarBO, "Number of scalar binops formed"); 46 STATISTIC(NumScalarCmp, "Number of scalar compares formed"); 47 48 static cl::opt<bool> DisableVectorCombine( 49 "disable-vector-combine", cl::init(false), cl::Hidden, 50 cl::desc("Disable all vector combine transforms")); 51 52 static cl::opt<bool> DisableBinopExtractShuffle( 53 "disable-binop-extract-shuffle", cl::init(false), cl::Hidden, 54 cl::desc("Disable binop extract to shuffle transforms")); 55 56 static cl::opt<unsigned> MaxInstrsToScan( 57 "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden, 58 cl::desc("Max number of instructions to scan for vector combining.")); 59 60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max(); 61 62 namespace { 63 class VectorCombine { 64 public: 65 VectorCombine(Function &F, const TargetTransformInfo &TTI, 66 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC, 67 bool ScalarizationOnly) 68 : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC), 69 ScalarizationOnly(ScalarizationOnly) {} 70 71 bool run(); 72 73 private: 74 Function &F; 75 IRBuilder<> Builder; 76 const TargetTransformInfo &TTI; 77 const DominatorTree &DT; 78 AAResults &AA; 79 AssumptionCache &AC; 80 81 /// If true only perform scalarization combines and do not introduce new 82 /// vector operations. 83 bool ScalarizationOnly; 84 85 InstructionWorklist Worklist; 86 87 bool vectorizeLoadInsert(Instruction &I); 88 ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0, 89 ExtractElementInst *Ext1, 90 unsigned PreferredExtractIndex) const; 91 bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 92 const Instruction &I, 93 ExtractElementInst *&ConvertToShuffle, 94 unsigned PreferredExtractIndex); 95 void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 96 Instruction &I); 97 void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1, 98 Instruction &I); 99 bool foldExtractExtract(Instruction &I); 100 bool foldBitcastShuf(Instruction &I); 101 bool scalarizeBinopOrCmp(Instruction &I); 102 bool foldExtractedCmps(Instruction &I); 103 bool foldSingleElementStore(Instruction &I); 104 bool scalarizeLoadExtract(Instruction &I); 105 bool foldShuffleOfBinops(Instruction &I); 106 107 void replaceValue(Value &Old, Value &New) { 108 Old.replaceAllUsesWith(&New); 109 New.takeName(&Old); 110 if (auto *NewI = dyn_cast<Instruction>(&New)) { 111 Worklist.pushUsersToWorkList(*NewI); 112 Worklist.pushValue(NewI); 113 } 114 Worklist.pushValue(&Old); 115 } 116 117 void eraseInstruction(Instruction &I) { 118 for (Value *Op : I.operands()) 119 Worklist.pushValue(Op); 120 Worklist.remove(&I); 121 I.eraseFromParent(); 122 } 123 }; 124 } // namespace 125 126 bool VectorCombine::vectorizeLoadInsert(Instruction &I) { 127 // Match insert into fixed vector of scalar value. 128 // TODO: Handle non-zero insert index. 129 auto *Ty = dyn_cast<FixedVectorType>(I.getType()); 130 Value *Scalar; 131 if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) || 132 !Scalar->hasOneUse()) 133 return false; 134 135 // Optionally match an extract from another vector. 136 Value *X; 137 bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt())); 138 if (!HasExtract) 139 X = Scalar; 140 141 // Match source value as load of scalar or vector. 142 // Do not vectorize scalar load (widening) if atomic/volatile or under 143 // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions 144 // or create data races non-existent in the source. 145 auto *Load = dyn_cast<LoadInst>(X); 146 if (!Load || !Load->isSimple() || !Load->hasOneUse() || 147 Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) || 148 mustSuppressSpeculation(*Load)) 149 return false; 150 151 const DataLayout &DL = I.getModule()->getDataLayout(); 152 Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts(); 153 assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type"); 154 155 // If original AS != Load's AS, we can't bitcast the original pointer and have 156 // to use Load's operand instead. Ideally we would want to strip pointer casts 157 // without changing AS, but there's no API to do that ATM. 158 unsigned AS = Load->getPointerAddressSpace(); 159 if (AS != SrcPtr->getType()->getPointerAddressSpace()) 160 SrcPtr = Load->getPointerOperand(); 161 162 // We are potentially transforming byte-sized (8-bit) memory accesses, so make 163 // sure we have all of our type-based constraints in place for this target. 164 Type *ScalarTy = Scalar->getType(); 165 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits(); 166 unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth(); 167 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 || 168 ScalarSize % 8 != 0) 169 return false; 170 171 // Check safety of replacing the scalar load with a larger vector load. 172 // We use minimal alignment (maximum flexibility) because we only care about 173 // the dereferenceable region. When calculating cost and creating a new op, 174 // we may use a larger value based on alignment attributes. 175 unsigned MinVecNumElts = MinVectorSize / ScalarSize; 176 auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false); 177 unsigned OffsetEltIndex = 0; 178 Align Alignment = Load->getAlign(); 179 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) { 180 // It is not safe to load directly from the pointer, but we can still peek 181 // through gep offsets and check if it safe to load from a base address with 182 // updated alignment. If it is, we can shuffle the element(s) into place 183 // after loading. 184 unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType()); 185 APInt Offset(OffsetBitWidth, 0); 186 SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 187 188 // We want to shuffle the result down from a high element of a vector, so 189 // the offset must be positive. 190 if (Offset.isNegative()) 191 return false; 192 193 // The offset must be a multiple of the scalar element to shuffle cleanly 194 // in the element's size. 195 uint64_t ScalarSizeInBytes = ScalarSize / 8; 196 if (Offset.urem(ScalarSizeInBytes) != 0) 197 return false; 198 199 // If we load MinVecNumElts, will our target element still be loaded? 200 OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue(); 201 if (OffsetEltIndex >= MinVecNumElts) 202 return false; 203 204 if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) 205 return false; 206 207 // Update alignment with offset value. Note that the offset could be negated 208 // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but 209 // negation does not change the result of the alignment calculation. 210 Alignment = commonAlignment(Alignment, Offset.getZExtValue()); 211 } 212 213 // Original pattern: insertelt undef, load [free casts of] PtrOp, 0 214 // Use the greater of the alignment on the load or its source pointer. 215 Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment); 216 Type *LoadTy = Load->getType(); 217 InstructionCost OldCost = 218 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS); 219 APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0); 220 OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts, 221 /* Insert */ true, HasExtract); 222 223 // New pattern: load VecPtr 224 InstructionCost NewCost = 225 TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS); 226 // Optionally, we are shuffling the loaded vector element(s) into place. 227 // For the mask set everything but element 0 to undef to prevent poison from 228 // propagating from the extra loaded memory. This will also optionally 229 // shrink/grow the vector from the loaded size to the output size. 230 // We assume this operation has no cost in codegen if there was no offset. 231 // Note that we could use freeze to avoid poison problems, but then we might 232 // still need a shuffle to change the vector size. 233 unsigned OutputNumElts = Ty->getNumElements(); 234 SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem); 235 assert(OffsetEltIndex < MinVecNumElts && "Address offset too big"); 236 Mask[0] = OffsetEltIndex; 237 if (OffsetEltIndex) 238 NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask); 239 240 // We can aggressively convert to the vector form because the backend can 241 // invert this transform if it does not result in a performance win. 242 if (OldCost < NewCost || !NewCost.isValid()) 243 return false; 244 245 // It is safe and potentially profitable to load a vector directly: 246 // inselt undef, load Scalar, 0 --> load VecPtr 247 IRBuilder<> Builder(Load); 248 Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS)); 249 Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment); 250 VecLd = Builder.CreateShuffleVector(VecLd, Mask); 251 252 replaceValue(I, *VecLd); 253 ++NumVecLoad; 254 return true; 255 } 256 257 /// Determine which, if any, of the inputs should be replaced by a shuffle 258 /// followed by extract from a different index. 259 ExtractElementInst *VectorCombine::getShuffleExtract( 260 ExtractElementInst *Ext0, ExtractElementInst *Ext1, 261 unsigned PreferredExtractIndex = InvalidIndex) const { 262 assert(isa<ConstantInt>(Ext0->getIndexOperand()) && 263 isa<ConstantInt>(Ext1->getIndexOperand()) && 264 "Expected constant extract indexes"); 265 266 unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue(); 267 unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue(); 268 269 // If the extract indexes are identical, no shuffle is needed. 270 if (Index0 == Index1) 271 return nullptr; 272 273 Type *VecTy = Ext0->getVectorOperand()->getType(); 274 assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types"); 275 InstructionCost Cost0 = 276 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 277 InstructionCost Cost1 = 278 TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 279 280 // If both costs are invalid no shuffle is needed 281 if (!Cost0.isValid() && !Cost1.isValid()) 282 return nullptr; 283 284 // We are extracting from 2 different indexes, so one operand must be shuffled 285 // before performing a vector operation and/or extract. The more expensive 286 // extract will be replaced by a shuffle. 287 if (Cost0 > Cost1) 288 return Ext0; 289 if (Cost1 > Cost0) 290 return Ext1; 291 292 // If the costs are equal and there is a preferred extract index, shuffle the 293 // opposite operand. 294 if (PreferredExtractIndex == Index0) 295 return Ext1; 296 if (PreferredExtractIndex == Index1) 297 return Ext0; 298 299 // Otherwise, replace the extract with the higher index. 300 return Index0 > Index1 ? Ext0 : Ext1; 301 } 302 303 /// Compare the relative costs of 2 extracts followed by scalar operation vs. 304 /// vector operation(s) followed by extract. Return true if the existing 305 /// instructions are cheaper than a vector alternative. Otherwise, return false 306 /// and if one of the extracts should be transformed to a shufflevector, set 307 /// \p ConvertToShuffle to that extract instruction. 308 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0, 309 ExtractElementInst *Ext1, 310 const Instruction &I, 311 ExtractElementInst *&ConvertToShuffle, 312 unsigned PreferredExtractIndex) { 313 assert(isa<ConstantInt>(Ext0->getOperand(1)) && 314 isa<ConstantInt>(Ext1->getOperand(1)) && 315 "Expected constant extract indexes"); 316 unsigned Opcode = I.getOpcode(); 317 Type *ScalarTy = Ext0->getType(); 318 auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType()); 319 InstructionCost ScalarOpCost, VectorOpCost; 320 321 // Get cost estimates for scalar and vector versions of the operation. 322 bool IsBinOp = Instruction::isBinaryOp(Opcode); 323 if (IsBinOp) { 324 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 325 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 326 } else { 327 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 328 "Expected a compare"); 329 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 330 ScalarOpCost = TTI.getCmpSelInstrCost( 331 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 332 VectorOpCost = TTI.getCmpSelInstrCost( 333 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 334 } 335 336 // Get cost estimates for the extract elements. These costs will factor into 337 // both sequences. 338 unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue(); 339 unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue(); 340 341 InstructionCost Extract0Cost = 342 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index); 343 InstructionCost Extract1Cost = 344 TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index); 345 346 // A more expensive extract will always be replaced by a splat shuffle. 347 // For example, if Ext0 is more expensive: 348 // opcode (extelt V0, Ext0), (ext V1, Ext1) --> 349 // extelt (opcode (splat V0, Ext0), V1), Ext1 350 // TODO: Evaluate whether that always results in lowest cost. Alternatively, 351 // check the cost of creating a broadcast shuffle and shuffling both 352 // operands to element 0. 353 InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost); 354 355 // Extra uses of the extracts mean that we include those costs in the 356 // vector total because those instructions will not be eliminated. 357 InstructionCost OldCost, NewCost; 358 if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) { 359 // Handle a special case. If the 2 extracts are identical, adjust the 360 // formulas to account for that. The extra use charge allows for either the 361 // CSE'd pattern or an unoptimized form with identical values: 362 // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C 363 bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2) 364 : !Ext0->hasOneUse() || !Ext1->hasOneUse(); 365 OldCost = CheapExtractCost + ScalarOpCost; 366 NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost; 367 } else { 368 // Handle the general case. Each extract is actually a different value: 369 // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C 370 OldCost = Extract0Cost + Extract1Cost + ScalarOpCost; 371 NewCost = VectorOpCost + CheapExtractCost + 372 !Ext0->hasOneUse() * Extract0Cost + 373 !Ext1->hasOneUse() * Extract1Cost; 374 } 375 376 ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex); 377 if (ConvertToShuffle) { 378 if (IsBinOp && DisableBinopExtractShuffle) 379 return true; 380 381 // If we are extracting from 2 different indexes, then one operand must be 382 // shuffled before performing the vector operation. The shuffle mask is 383 // undefined except for 1 lane that is being translated to the remaining 384 // extraction lane. Therefore, it is a splat shuffle. Ex: 385 // ShufMask = { undef, undef, 0, undef } 386 // TODO: The cost model has an option for a "broadcast" shuffle 387 // (splat-from-element-0), but no option for a more general splat. 388 NewCost += 389 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 390 } 391 392 // Aggressively form a vector op if the cost is equal because the transform 393 // may enable further optimization. 394 // Codegen can reverse this transform (scalarize) if it was not profitable. 395 return OldCost < NewCost; 396 } 397 398 /// Create a shuffle that translates (shifts) 1 element from the input vector 399 /// to a new element location. 400 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex, 401 unsigned NewIndex, IRBuilder<> &Builder) { 402 // The shuffle mask is undefined except for 1 lane that is being translated 403 // to the new element index. Example for OldIndex == 2 and NewIndex == 0: 404 // ShufMask = { 2, undef, undef, undef } 405 auto *VecTy = cast<FixedVectorType>(Vec->getType()); 406 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 407 ShufMask[NewIndex] = OldIndex; 408 return Builder.CreateShuffleVector(Vec, ShufMask, "shift"); 409 } 410 411 /// Given an extract element instruction with constant index operand, shuffle 412 /// the source vector (shift the scalar element) to a NewIndex for extraction. 413 /// Return null if the input can be constant folded, so that we are not creating 414 /// unnecessary instructions. 415 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt, 416 unsigned NewIndex, 417 IRBuilder<> &Builder) { 418 // If the extract can be constant-folded, this code is unsimplified. Defer 419 // to other passes to handle that. 420 Value *X = ExtElt->getVectorOperand(); 421 Value *C = ExtElt->getIndexOperand(); 422 assert(isa<ConstantInt>(C) && "Expected a constant index operand"); 423 if (isa<Constant>(X)) 424 return nullptr; 425 426 Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(), 427 NewIndex, Builder); 428 return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex)); 429 } 430 431 /// Try to reduce extract element costs by converting scalar compares to vector 432 /// compares followed by extract. 433 /// cmp (ext0 V0, C), (ext1 V1, C) 434 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0, 435 ExtractElementInst *Ext1, Instruction &I) { 436 assert(isa<CmpInst>(&I) && "Expected a compare"); 437 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 438 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 439 "Expected matching constant extract indexes"); 440 441 // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C 442 ++NumVecCmp; 443 CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate(); 444 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 445 Value *VecCmp = Builder.CreateCmp(Pred, V0, V1); 446 Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand()); 447 replaceValue(I, *NewExt); 448 } 449 450 /// Try to reduce extract element costs by converting scalar binops to vector 451 /// binops followed by extract. 452 /// bo (ext0 V0, C), (ext1 V1, C) 453 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0, 454 ExtractElementInst *Ext1, Instruction &I) { 455 assert(isa<BinaryOperator>(&I) && "Expected a binary operator"); 456 assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() == 457 cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() && 458 "Expected matching constant extract indexes"); 459 460 // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C 461 ++NumVecBO; 462 Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand(); 463 Value *VecBO = 464 Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1); 465 466 // All IR flags are safe to back-propagate because any potential poison 467 // created in unused vector elements is discarded by the extract. 468 if (auto *VecBOInst = dyn_cast<Instruction>(VecBO)) 469 VecBOInst->copyIRFlags(&I); 470 471 Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand()); 472 replaceValue(I, *NewExt); 473 } 474 475 /// Match an instruction with extracted vector operands. 476 bool VectorCombine::foldExtractExtract(Instruction &I) { 477 // It is not safe to transform things like div, urem, etc. because we may 478 // create undefined behavior when executing those on unknown vector elements. 479 if (!isSafeToSpeculativelyExecute(&I)) 480 return false; 481 482 Instruction *I0, *I1; 483 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 484 if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) && 485 !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1)))) 486 return false; 487 488 Value *V0, *V1; 489 uint64_t C0, C1; 490 if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) || 491 !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) || 492 V0->getType() != V1->getType()) 493 return false; 494 495 // If the scalar value 'I' is going to be re-inserted into a vector, then try 496 // to create an extract to that same element. The extract/insert can be 497 // reduced to a "select shuffle". 498 // TODO: If we add a larger pattern match that starts from an insert, this 499 // probably becomes unnecessary. 500 auto *Ext0 = cast<ExtractElementInst>(I0); 501 auto *Ext1 = cast<ExtractElementInst>(I1); 502 uint64_t InsertIndex = InvalidIndex; 503 if (I.hasOneUse()) 504 match(I.user_back(), 505 m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex))); 506 507 ExtractElementInst *ExtractToChange; 508 if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex)) 509 return false; 510 511 if (ExtractToChange) { 512 unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0; 513 ExtractElementInst *NewExtract = 514 translateExtract(ExtractToChange, CheapExtractIdx, Builder); 515 if (!NewExtract) 516 return false; 517 if (ExtractToChange == Ext0) 518 Ext0 = NewExtract; 519 else 520 Ext1 = NewExtract; 521 } 522 523 if (Pred != CmpInst::BAD_ICMP_PREDICATE) 524 foldExtExtCmp(Ext0, Ext1, I); 525 else 526 foldExtExtBinop(Ext0, Ext1, I); 527 528 Worklist.push(Ext0); 529 Worklist.push(Ext1); 530 return true; 531 } 532 533 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the 534 /// destination type followed by shuffle. This can enable further transforms by 535 /// moving bitcasts or shuffles together. 536 bool VectorCombine::foldBitcastShuf(Instruction &I) { 537 Value *V; 538 ArrayRef<int> Mask; 539 if (!match(&I, m_BitCast( 540 m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask)))))) 541 return false; 542 543 // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for 544 // scalable type is unknown; Second, we cannot reason if the narrowed shuffle 545 // mask for scalable type is a splat or not. 546 // 2) Disallow non-vector casts and length-changing shuffles. 547 // TODO: We could allow any shuffle. 548 auto *DestTy = dyn_cast<FixedVectorType>(I.getType()); 549 auto *SrcTy = dyn_cast<FixedVectorType>(V->getType()); 550 if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy) 551 return false; 552 553 unsigned DestNumElts = DestTy->getNumElements(); 554 unsigned SrcNumElts = SrcTy->getNumElements(); 555 SmallVector<int, 16> NewMask; 556 if (SrcNumElts <= DestNumElts) { 557 // The bitcast is from wide to narrow/equal elements. The shuffle mask can 558 // always be expanded to the equivalent form choosing narrower elements. 559 assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask"); 560 unsigned ScaleFactor = DestNumElts / SrcNumElts; 561 narrowShuffleMaskElts(ScaleFactor, Mask, NewMask); 562 } else { 563 // The bitcast is from narrow elements to wide elements. The shuffle mask 564 // must choose consecutive elements to allow casting first. 565 assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask"); 566 unsigned ScaleFactor = SrcNumElts / DestNumElts; 567 if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask)) 568 return false; 569 } 570 571 // The new shuffle must not cost more than the old shuffle. The bitcast is 572 // moved ahead of the shuffle, so assume that it has the same cost as before. 573 InstructionCost DestCost = TTI.getShuffleCost( 574 TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask); 575 InstructionCost SrcCost = 576 TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask); 577 if (DestCost > SrcCost || !DestCost.isValid()) 578 return false; 579 580 // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC' 581 ++NumShufOfBitcast; 582 Value *CastV = Builder.CreateBitCast(V, DestTy); 583 Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask); 584 replaceValue(I, *Shuf); 585 return true; 586 } 587 588 /// Match a vector binop or compare instruction with at least one inserted 589 /// scalar operand and convert to scalar binop/cmp followed by insertelement. 590 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) { 591 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 592 Value *Ins0, *Ins1; 593 if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) && 594 !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1)))) 595 return false; 596 597 // Do not convert the vector condition of a vector select into a scalar 598 // condition. That may cause problems for codegen because of differences in 599 // boolean formats and register-file transfers. 600 // TODO: Can we account for that in the cost model? 601 bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE; 602 if (IsCmp) 603 for (User *U : I.users()) 604 if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value()))) 605 return false; 606 607 // Match against one or both scalar values being inserted into constant 608 // vectors: 609 // vec_op VecC0, (inselt VecC1, V1, Index) 610 // vec_op (inselt VecC0, V0, Index), VecC1 611 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) 612 // TODO: Deal with mismatched index constants and variable indexes? 613 Constant *VecC0 = nullptr, *VecC1 = nullptr; 614 Value *V0 = nullptr, *V1 = nullptr; 615 uint64_t Index0 = 0, Index1 = 0; 616 if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0), 617 m_ConstantInt(Index0))) && 618 !match(Ins0, m_Constant(VecC0))) 619 return false; 620 if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1), 621 m_ConstantInt(Index1))) && 622 !match(Ins1, m_Constant(VecC1))) 623 return false; 624 625 bool IsConst0 = !V0; 626 bool IsConst1 = !V1; 627 if (IsConst0 && IsConst1) 628 return false; 629 if (!IsConst0 && !IsConst1 && Index0 != Index1) 630 return false; 631 632 // Bail for single insertion if it is a load. 633 // TODO: Handle this once getVectorInstrCost can cost for load/stores. 634 auto *I0 = dyn_cast_or_null<Instruction>(V0); 635 auto *I1 = dyn_cast_or_null<Instruction>(V1); 636 if ((IsConst0 && I1 && I1->mayReadFromMemory()) || 637 (IsConst1 && I0 && I0->mayReadFromMemory())) 638 return false; 639 640 uint64_t Index = IsConst0 ? Index1 : Index0; 641 Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType(); 642 Type *VecTy = I.getType(); 643 assert(VecTy->isVectorTy() && 644 (IsConst0 || IsConst1 || V0->getType() == V1->getType()) && 645 (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() || 646 ScalarTy->isPointerTy()) && 647 "Unexpected types for insert element into binop or cmp"); 648 649 unsigned Opcode = I.getOpcode(); 650 InstructionCost ScalarOpCost, VectorOpCost; 651 if (IsCmp) { 652 CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate(); 653 ScalarOpCost = TTI.getCmpSelInstrCost( 654 Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred); 655 VectorOpCost = TTI.getCmpSelInstrCost( 656 Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred); 657 } else { 658 ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy); 659 VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 660 } 661 662 // Get cost estimate for the insert element. This cost will factor into 663 // both sequences. 664 InstructionCost InsertCost = 665 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index); 666 InstructionCost OldCost = 667 (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost; 668 InstructionCost NewCost = ScalarOpCost + InsertCost + 669 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) + 670 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost); 671 672 // We want to scalarize unless the vector variant actually has lower cost. 673 if (OldCost < NewCost || !NewCost.isValid()) 674 return false; 675 676 // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) --> 677 // inselt NewVecC, (scalar_op V0, V1), Index 678 if (IsCmp) 679 ++NumScalarCmp; 680 else 681 ++NumScalarBO; 682 683 // For constant cases, extract the scalar element, this should constant fold. 684 if (IsConst0) 685 V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index)); 686 if (IsConst1) 687 V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index)); 688 689 Value *Scalar = 690 IsCmp ? Builder.CreateCmp(Pred, V0, V1) 691 : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1); 692 693 Scalar->setName(I.getName() + ".scalar"); 694 695 // All IR flags are safe to back-propagate. There is no potential for extra 696 // poison to be created by the scalar instruction. 697 if (auto *ScalarInst = dyn_cast<Instruction>(Scalar)) 698 ScalarInst->copyIRFlags(&I); 699 700 // Fold the vector constants in the original vectors into a new base vector. 701 Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1) 702 : ConstantExpr::get(Opcode, VecC0, VecC1); 703 Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index); 704 replaceValue(I, *Insert); 705 return true; 706 } 707 708 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of 709 /// a vector into vector operations followed by extract. Note: The SLP pass 710 /// may miss this pattern because of implementation problems. 711 bool VectorCombine::foldExtractedCmps(Instruction &I) { 712 // We are looking for a scalar binop of booleans. 713 // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1) 714 if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1)) 715 return false; 716 717 // The compare predicates should match, and each compare should have a 718 // constant operand. 719 // TODO: Relax the one-use constraints. 720 Value *B0 = I.getOperand(0), *B1 = I.getOperand(1); 721 Instruction *I0, *I1; 722 Constant *C0, *C1; 723 CmpInst::Predicate P0, P1; 724 if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) || 725 !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) || 726 P0 != P1) 727 return false; 728 729 // The compare operands must be extracts of the same vector with constant 730 // extract indexes. 731 // TODO: Relax the one-use constraints. 732 Value *X; 733 uint64_t Index0, Index1; 734 if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) || 735 !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1))))) 736 return false; 737 738 auto *Ext0 = cast<ExtractElementInst>(I0); 739 auto *Ext1 = cast<ExtractElementInst>(I1); 740 ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1); 741 if (!ConvertToShuf) 742 return false; 743 744 // The original scalar pattern is: 745 // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1) 746 CmpInst::Predicate Pred = P0; 747 unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp 748 : Instruction::ICmp; 749 auto *VecTy = dyn_cast<FixedVectorType>(X->getType()); 750 if (!VecTy) 751 return false; 752 753 InstructionCost OldCost = 754 TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0); 755 OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1); 756 OldCost += 757 TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(), 758 CmpInst::makeCmpResultType(I0->getType()), Pred) * 759 2; 760 OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType()); 761 762 // The proposed vector pattern is: 763 // vcmp = cmp Pred X, VecC 764 // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0 765 int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0; 766 int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1; 767 auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType())); 768 InstructionCost NewCost = TTI.getCmpSelInstrCost( 769 CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred); 770 SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem); 771 ShufMask[CheapIndex] = ExpensiveIndex; 772 NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy, 773 ShufMask); 774 NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy); 775 NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex); 776 777 // Aggressively form vector ops if the cost is equal because the transform 778 // may enable further optimization. 779 // Codegen can reverse this transform (scalarize) if it was not profitable. 780 if (OldCost < NewCost || !NewCost.isValid()) 781 return false; 782 783 // Create a vector constant from the 2 scalar constants. 784 SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(), 785 UndefValue::get(VecTy->getElementType())); 786 CmpC[Index0] = C0; 787 CmpC[Index1] = C1; 788 Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC)); 789 790 Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder); 791 Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 792 VCmp, Shuf); 793 Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex); 794 replaceValue(I, *NewExt); 795 ++NumVecCmpBO; 796 return true; 797 } 798 799 // Check if memory loc modified between two instrs in the same BB 800 static bool isMemModifiedBetween(BasicBlock::iterator Begin, 801 BasicBlock::iterator End, 802 const MemoryLocation &Loc, AAResults &AA) { 803 unsigned NumScanned = 0; 804 return std::any_of(Begin, End, [&](const Instruction &Instr) { 805 return isModSet(AA.getModRefInfo(&Instr, Loc)) || 806 ++NumScanned > MaxInstrsToScan; 807 }); 808 } 809 810 /// Helper class to indicate whether a vector index can be safely scalarized and 811 /// if a freeze needs to be inserted. 812 class ScalarizationResult { 813 enum class StatusTy { Unsafe, Safe, SafeWithFreeze }; 814 815 StatusTy Status; 816 Value *ToFreeze; 817 818 ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr) 819 : Status(Status), ToFreeze(ToFreeze) {} 820 821 public: 822 ScalarizationResult(const ScalarizationResult &Other) = default; 823 ~ScalarizationResult() { 824 assert(!ToFreeze && "freeze() not called with ToFreeze being set"); 825 } 826 827 static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; } 828 static ScalarizationResult safe() { return {StatusTy::Safe}; } 829 static ScalarizationResult safeWithFreeze(Value *ToFreeze) { 830 return {StatusTy::SafeWithFreeze, ToFreeze}; 831 } 832 833 /// Returns true if the index can be scalarize without requiring a freeze. 834 bool isSafe() const { return Status == StatusTy::Safe; } 835 /// Returns true if the index cannot be scalarized. 836 bool isUnsafe() const { return Status == StatusTy::Unsafe; } 837 /// Returns true if the index can be scalarize, but requires inserting a 838 /// freeze. 839 bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; } 840 841 /// Reset the state of Unsafe and clear ToFreze if set. 842 void discard() { 843 ToFreeze = nullptr; 844 Status = StatusTy::Unsafe; 845 } 846 847 /// Freeze the ToFreeze and update the use in \p User to use it. 848 void freeze(IRBuilder<> &Builder, Instruction &UserI) { 849 assert(isSafeWithFreeze() && 850 "should only be used when freezing is required"); 851 assert(is_contained(ToFreeze->users(), &UserI) && 852 "UserI must be a user of ToFreeze"); 853 IRBuilder<>::InsertPointGuard Guard(Builder); 854 Builder.SetInsertPoint(cast<Instruction>(&UserI)); 855 Value *Frozen = 856 Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen"); 857 for (Use &U : make_early_inc_range((UserI.operands()))) 858 if (U.get() == ToFreeze) 859 U.set(Frozen); 860 861 ToFreeze = nullptr; 862 } 863 }; 864 865 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p 866 /// Idx. \p Idx must access a valid vector element. 867 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy, 868 Value *Idx, Instruction *CtxI, 869 AssumptionCache &AC, 870 const DominatorTree &DT) { 871 if (auto *C = dyn_cast<ConstantInt>(Idx)) { 872 if (C->getValue().ult(VecTy->getNumElements())) 873 return ScalarizationResult::safe(); 874 return ScalarizationResult::unsafe(); 875 } 876 877 unsigned IntWidth = Idx->getType()->getScalarSizeInBits(); 878 APInt Zero(IntWidth, 0); 879 APInt MaxElts(IntWidth, VecTy->getNumElements()); 880 ConstantRange ValidIndices(Zero, MaxElts); 881 ConstantRange IdxRange(IntWidth, true); 882 883 if (isGuaranteedNotToBePoison(Idx, &AC)) { 884 if (ValidIndices.contains(computeConstantRange(Idx, true, &AC, CtxI, &DT))) 885 return ScalarizationResult::safe(); 886 return ScalarizationResult::unsafe(); 887 } 888 889 // If the index may be poison, check if we can insert a freeze before the 890 // range of the index is restricted. 891 Value *IdxBase; 892 ConstantInt *CI; 893 if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) { 894 IdxRange = IdxRange.binaryAnd(CI->getValue()); 895 } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) { 896 IdxRange = IdxRange.urem(CI->getValue()); 897 } 898 899 if (ValidIndices.contains(IdxRange)) 900 return ScalarizationResult::safeWithFreeze(IdxBase); 901 return ScalarizationResult::unsafe(); 902 } 903 904 /// The memory operation on a vector of \p ScalarType had alignment of 905 /// \p VectorAlignment. Compute the maximal, but conservatively correct, 906 /// alignment that will be valid for the memory operation on a single scalar 907 /// element of the same type with index \p Idx. 908 static Align computeAlignmentAfterScalarization(Align VectorAlignment, 909 Type *ScalarType, Value *Idx, 910 const DataLayout &DL) { 911 if (auto *C = dyn_cast<ConstantInt>(Idx)) 912 return commonAlignment(VectorAlignment, 913 C->getZExtValue() * DL.getTypeStoreSize(ScalarType)); 914 return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType)); 915 } 916 917 // Combine patterns like: 918 // %0 = load <4 x i32>, <4 x i32>* %a 919 // %1 = insertelement <4 x i32> %0, i32 %b, i32 1 920 // store <4 x i32> %1, <4 x i32>* %a 921 // to: 922 // %0 = bitcast <4 x i32>* %a to i32* 923 // %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1 924 // store i32 %b, i32* %1 925 bool VectorCombine::foldSingleElementStore(Instruction &I) { 926 StoreInst *SI = dyn_cast<StoreInst>(&I); 927 if (!SI || !SI->isSimple() || 928 !isa<FixedVectorType>(SI->getValueOperand()->getType())) 929 return false; 930 931 // TODO: Combine more complicated patterns (multiple insert) by referencing 932 // TargetTransformInfo. 933 Instruction *Source; 934 Value *NewElement; 935 Value *Idx; 936 if (!match(SI->getValueOperand(), 937 m_InsertElt(m_Instruction(Source), m_Value(NewElement), 938 m_Value(Idx)))) 939 return false; 940 941 if (auto *Load = dyn_cast<LoadInst>(Source)) { 942 auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType()); 943 const DataLayout &DL = I.getModule()->getDataLayout(); 944 Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts(); 945 // Don't optimize for atomic/volatile load or store. Ensure memory is not 946 // modified between, vector type matches store size, and index is inbounds. 947 if (!Load->isSimple() || Load->getParent() != SI->getParent() || 948 !DL.typeSizeEqualsStoreSize(Load->getType()) || 949 SrcAddr != SI->getPointerOperand()->stripPointerCasts()) 950 return false; 951 952 auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT); 953 if (ScalarizableIdx.isUnsafe() || 954 isMemModifiedBetween(Load->getIterator(), SI->getIterator(), 955 MemoryLocation::get(SI), AA)) 956 return false; 957 958 if (ScalarizableIdx.isSafeWithFreeze()) 959 ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx)); 960 Value *GEP = Builder.CreateInBoundsGEP( 961 SI->getValueOperand()->getType(), SI->getPointerOperand(), 962 {ConstantInt::get(Idx->getType(), 0), Idx}); 963 StoreInst *NSI = Builder.CreateStore(NewElement, GEP); 964 NSI->copyMetadata(*SI); 965 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 966 std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx, 967 DL); 968 NSI->setAlignment(ScalarOpAlignment); 969 replaceValue(I, *NSI); 970 eraseInstruction(I); 971 return true; 972 } 973 974 return false; 975 } 976 977 /// Try to scalarize vector loads feeding extractelement instructions. 978 bool VectorCombine::scalarizeLoadExtract(Instruction &I) { 979 Value *Ptr; 980 if (!match(&I, m_Load(m_Value(Ptr)))) 981 return false; 982 983 auto *LI = cast<LoadInst>(&I); 984 const DataLayout &DL = I.getModule()->getDataLayout(); 985 if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType())) 986 return false; 987 988 auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType()); 989 if (!FixedVT) 990 return false; 991 992 InstructionCost OriginalCost = TTI.getMemoryOpCost( 993 Instruction::Load, LI->getType(), Align(LI->getAlignment()), 994 LI->getPointerAddressSpace()); 995 InstructionCost ScalarizedCost = 0; 996 997 Instruction *LastCheckedInst = LI; 998 unsigned NumInstChecked = 0; 999 // Check if all users of the load are extracts with no memory modifications 1000 // between the load and the extract. Compute the cost of both the original 1001 // code and the scalarized version. 1002 for (User *U : LI->users()) { 1003 auto *UI = dyn_cast<ExtractElementInst>(U); 1004 if (!UI || UI->getParent() != LI->getParent()) 1005 return false; 1006 1007 if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT)) 1008 return false; 1009 1010 // Check if any instruction between the load and the extract may modify 1011 // memory. 1012 if (LastCheckedInst->comesBefore(UI)) { 1013 for (Instruction &I : 1014 make_range(std::next(LI->getIterator()), UI->getIterator())) { 1015 // Bail out if we reached the check limit or the instruction may write 1016 // to memory. 1017 if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory()) 1018 return false; 1019 NumInstChecked++; 1020 } 1021 } 1022 1023 if (!LastCheckedInst) 1024 LastCheckedInst = UI; 1025 else if (LastCheckedInst->comesBefore(UI)) 1026 LastCheckedInst = UI; 1027 1028 auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT); 1029 if (!ScalarIdx.isSafe()) { 1030 // TODO: Freeze index if it is safe to do so. 1031 ScalarIdx.discard(); 1032 return false; 1033 } 1034 1035 auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1)); 1036 OriginalCost += 1037 TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(), 1038 Index ? Index->getZExtValue() : -1); 1039 ScalarizedCost += 1040 TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(), 1041 Align(1), LI->getPointerAddressSpace()); 1042 ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType()); 1043 } 1044 1045 if (ScalarizedCost >= OriginalCost) 1046 return false; 1047 1048 // Replace extracts with narrow scalar loads. 1049 for (User *U : LI->users()) { 1050 auto *EI = cast<ExtractElementInst>(U); 1051 Builder.SetInsertPoint(EI); 1052 1053 Value *Idx = EI->getOperand(1); 1054 Value *GEP = 1055 Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx}); 1056 auto *NewLoad = cast<LoadInst>(Builder.CreateLoad( 1057 FixedVT->getElementType(), GEP, EI->getName() + ".scalar")); 1058 1059 Align ScalarOpAlignment = computeAlignmentAfterScalarization( 1060 LI->getAlign(), FixedVT->getElementType(), Idx, DL); 1061 NewLoad->setAlignment(ScalarOpAlignment); 1062 1063 replaceValue(*EI, *NewLoad); 1064 } 1065 1066 return true; 1067 } 1068 1069 /// Try to convert "shuffle (binop), (binop)" with a shared binop operand into 1070 /// "binop (shuffle), (shuffle)". 1071 bool VectorCombine::foldShuffleOfBinops(Instruction &I) { 1072 auto *VecTy = dyn_cast<FixedVectorType>(I.getType()); 1073 if (!VecTy) 1074 return false; 1075 1076 BinaryOperator *B0, *B1; 1077 ArrayRef<int> Mask; 1078 if (!match(&I, m_Shuffle(m_OneUse(m_BinOp(B0)), m_OneUse(m_BinOp(B1)), 1079 m_Mask(Mask))) || 1080 B0->getOpcode() != B1->getOpcode() || B0->getType() != VecTy) 1081 return false; 1082 1083 // Try to replace a binop with a shuffle if the shuffle is not costly. 1084 // The new shuffle will choose from a single, common operand, so it may be 1085 // cheaper than the existing two-operand shuffle. 1086 SmallVector<int> UnaryMask = createUnaryMask(Mask, Mask.size()); 1087 Instruction::BinaryOps Opcode = B0->getOpcode(); 1088 InstructionCost BinopCost = TTI.getArithmeticInstrCost(Opcode, VecTy); 1089 InstructionCost ShufCost = TTI.getShuffleCost( 1090 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, UnaryMask); 1091 if (ShufCost > BinopCost) 1092 return false; 1093 1094 // If we have something like "add X, Y" and "add Z, X", swap ops to match. 1095 Value *X = B0->getOperand(0), *Y = B0->getOperand(1); 1096 Value *Z = B1->getOperand(0), *W = B1->getOperand(1); 1097 if (BinaryOperator::isCommutative(Opcode) && X != Z && Y != W) 1098 std::swap(X, Y); 1099 1100 Value *Shuf0, *Shuf1; 1101 if (X == Z) { 1102 // shuf (bo X, Y), (bo X, W) --> bo (shuf X), (shuf Y, W) 1103 Shuf0 = Builder.CreateShuffleVector(X, UnaryMask); 1104 Shuf1 = Builder.CreateShuffleVector(Y, W, Mask); 1105 } else if (Y == W) { 1106 // shuf (bo X, Y), (bo Z, Y) --> bo (shuf X, Z), (shuf Y) 1107 Shuf0 = Builder.CreateShuffleVector(X, Z, Mask); 1108 Shuf1 = Builder.CreateShuffleVector(Y, UnaryMask); 1109 } else { 1110 return false; 1111 } 1112 1113 Value *NewBO = Builder.CreateBinOp(Opcode, Shuf0, Shuf1); 1114 // Intersect flags from the old binops. 1115 if (auto *NewInst = dyn_cast<Instruction>(NewBO)) { 1116 NewInst->copyIRFlags(B0); 1117 NewInst->andIRFlags(B1); 1118 } 1119 replaceValue(I, *NewBO); 1120 return true; 1121 } 1122 1123 /// This is the entry point for all transforms. Pass manager differences are 1124 /// handled in the callers of this function. 1125 bool VectorCombine::run() { 1126 if (DisableVectorCombine) 1127 return false; 1128 1129 // Don't attempt vectorization if the target does not support vectors. 1130 if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true))) 1131 return false; 1132 1133 bool MadeChange = false; 1134 auto FoldInst = [this, &MadeChange](Instruction &I) { 1135 Builder.SetInsertPoint(&I); 1136 if (!ScalarizationOnly) { 1137 MadeChange |= vectorizeLoadInsert(I); 1138 MadeChange |= foldExtractExtract(I); 1139 MadeChange |= foldBitcastShuf(I); 1140 MadeChange |= foldExtractedCmps(I); 1141 MadeChange |= foldShuffleOfBinops(I); 1142 } 1143 MadeChange |= scalarizeBinopOrCmp(I); 1144 MadeChange |= scalarizeLoadExtract(I); 1145 MadeChange |= foldSingleElementStore(I); 1146 }; 1147 for (BasicBlock &BB : F) { 1148 // Ignore unreachable basic blocks. 1149 if (!DT.isReachableFromEntry(&BB)) 1150 continue; 1151 // Use early increment range so that we can erase instructions in loop. 1152 for (Instruction &I : make_early_inc_range(BB)) { 1153 if (I.isDebugOrPseudoInst()) 1154 continue; 1155 FoldInst(I); 1156 } 1157 } 1158 1159 while (!Worklist.isEmpty()) { 1160 Instruction *I = Worklist.removeOne(); 1161 if (!I) 1162 continue; 1163 1164 if (isInstructionTriviallyDead(I)) { 1165 eraseInstruction(*I); 1166 continue; 1167 } 1168 1169 FoldInst(*I); 1170 } 1171 1172 return MadeChange; 1173 } 1174 1175 // Pass manager boilerplate below here. 1176 1177 namespace { 1178 class VectorCombineLegacyPass : public FunctionPass { 1179 public: 1180 static char ID; 1181 VectorCombineLegacyPass() : FunctionPass(ID) { 1182 initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry()); 1183 } 1184 1185 void getAnalysisUsage(AnalysisUsage &AU) const override { 1186 AU.addRequired<AssumptionCacheTracker>(); 1187 AU.addRequired<DominatorTreeWrapperPass>(); 1188 AU.addRequired<TargetTransformInfoWrapperPass>(); 1189 AU.addRequired<AAResultsWrapperPass>(); 1190 AU.setPreservesCFG(); 1191 AU.addPreserved<DominatorTreeWrapperPass>(); 1192 AU.addPreserved<GlobalsAAWrapperPass>(); 1193 AU.addPreserved<AAResultsWrapperPass>(); 1194 AU.addPreserved<BasicAAWrapperPass>(); 1195 FunctionPass::getAnalysisUsage(AU); 1196 } 1197 1198 bool runOnFunction(Function &F) override { 1199 if (skipFunction(F)) 1200 return false; 1201 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1202 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1203 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1204 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1205 VectorCombine Combiner(F, TTI, DT, AA, AC, false); 1206 return Combiner.run(); 1207 } 1208 }; 1209 } // namespace 1210 1211 char VectorCombineLegacyPass::ID = 0; 1212 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine", 1213 "Optimize scalar/vector ops", false, 1214 false) 1215 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1216 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1217 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine", 1218 "Optimize scalar/vector ops", false, false) 1219 Pass *llvm::createVectorCombinePass() { 1220 return new VectorCombineLegacyPass(); 1221 } 1222 1223 PreservedAnalyses VectorCombinePass::run(Function &F, 1224 FunctionAnalysisManager &FAM) { 1225 auto &AC = FAM.getResult<AssumptionAnalysis>(F); 1226 TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F); 1227 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F); 1228 AAResults &AA = FAM.getResult<AAManager>(F); 1229 VectorCombine Combiner(F, TTI, DT, AA, AC, ScalarizationOnly); 1230 if (!Combiner.run()) 1231 return PreservedAnalyses::all(); 1232 PreservedAnalyses PA; 1233 PA.preserveSet<CFGAnalyses>(); 1234 return PA; 1235 } 1236