1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for add, fadd, sub, and fsub. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/Operator.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Support/AlignOf.h" 30 #include "llvm/Support/Casting.h" 31 #include "llvm/Support/KnownBits.h" 32 #include "llvm/Transforms/InstCombine/InstCombiner.h" 33 #include <cassert> 34 #include <utility> 35 36 using namespace llvm; 37 using namespace PatternMatch; 38 39 #define DEBUG_TYPE "instcombine" 40 41 namespace { 42 43 /// Class representing coefficient of floating-point addend. 44 /// This class needs to be highly efficient, which is especially true for 45 /// the constructor. As of I write this comment, the cost of the default 46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to 47 /// perform write-merging). 48 /// 49 class FAddendCoef { 50 public: 51 // The constructor has to initialize a APFloat, which is unnecessary for 52 // most addends which have coefficient either 1 or -1. So, the constructor 53 // is expensive. In order to avoid the cost of the constructor, we should 54 // reuse some instances whenever possible. The pre-created instances 55 // FAddCombine::Add[0-5] embodies this idea. 56 FAddendCoef() = default; 57 ~FAddendCoef(); 58 59 // If possible, don't define operator+/operator- etc because these 60 // operators inevitably call FAddendCoef's constructor which is not cheap. 61 void operator=(const FAddendCoef &A); 62 void operator+=(const FAddendCoef &A); 63 void operator*=(const FAddendCoef &S); 64 65 void set(short C) { 66 assert(!insaneIntVal(C) && "Insane coefficient"); 67 IsFp = false; IntVal = C; 68 } 69 70 void set(const APFloat& C); 71 72 void negate(); 73 74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); } 75 Value *getValue(Type *) const; 76 77 bool isOne() const { return isInt() && IntVal == 1; } 78 bool isTwo() const { return isInt() && IntVal == 2; } 79 bool isMinusOne() const { return isInt() && IntVal == -1; } 80 bool isMinusTwo() const { return isInt() && IntVal == -2; } 81 82 private: 83 bool insaneIntVal(int V) { return V > 4 || V < -4; } 84 85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); } 86 87 const APFloat *getFpValPtr() const { 88 return reinterpret_cast<const APFloat *>(&FpValBuf); 89 } 90 91 const APFloat &getFpVal() const { 92 assert(IsFp && BufHasFpVal && "Incorret state"); 93 return *getFpValPtr(); 94 } 95 96 APFloat &getFpVal() { 97 assert(IsFp && BufHasFpVal && "Incorret state"); 98 return *getFpValPtr(); 99 } 100 101 bool isInt() const { return !IsFp; } 102 103 // If the coefficient is represented by an integer, promote it to a 104 // floating point. 105 void convertToFpType(const fltSemantics &Sem); 106 107 // Construct an APFloat from a signed integer. 108 // TODO: We should get rid of this function when APFloat can be constructed 109 // from an *SIGNED* integer. 110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val); 111 112 bool IsFp = false; 113 114 // True iff FpValBuf contains an instance of APFloat. 115 bool BufHasFpVal = false; 116 117 // The integer coefficient of an individual addend is either 1 or -1, 118 // and we try to simplify at most 4 addends from neighboring at most 119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt 120 // is overkill of this end. 121 short IntVal = 0; 122 123 AlignedCharArrayUnion<APFloat> FpValBuf; 124 }; 125 126 /// FAddend is used to represent floating-point addend. An addend is 127 /// represented as <C, V>, where the V is a symbolic value, and C is a 128 /// constant coefficient. A constant addend is represented as <C, 0>. 129 class FAddend { 130 public: 131 FAddend() = default; 132 133 void operator+=(const FAddend &T) { 134 assert((Val == T.Val) && "Symbolic-values disagree"); 135 Coeff += T.Coeff; 136 } 137 138 Value *getSymVal() const { return Val; } 139 const FAddendCoef &getCoef() const { return Coeff; } 140 141 bool isConstant() const { return Val == nullptr; } 142 bool isZero() const { return Coeff.isZero(); } 143 144 void set(short Coefficient, Value *V) { 145 Coeff.set(Coefficient); 146 Val = V; 147 } 148 void set(const APFloat &Coefficient, Value *V) { 149 Coeff.set(Coefficient); 150 Val = V; 151 } 152 void set(const ConstantFP *Coefficient, Value *V) { 153 Coeff.set(Coefficient->getValueAPF()); 154 Val = V; 155 } 156 157 void negate() { Coeff.negate(); } 158 159 /// Drill down the U-D chain one step to find the definition of V, and 160 /// try to break the definition into one or two addends. 161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1); 162 163 /// Similar to FAddend::drillDownOneStep() except that the value being 164 /// splitted is the addend itself. 165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const; 166 167 private: 168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; } 169 170 // This addend has the value of "Coeff * Val". 171 Value *Val = nullptr; 172 FAddendCoef Coeff; 173 }; 174 175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along 176 /// with its neighboring at most two instructions. 177 /// 178 class FAddCombine { 179 public: 180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {} 181 182 Value *simplify(Instruction *FAdd); 183 184 private: 185 using AddendVect = SmallVector<const FAddend *, 4>; 186 187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota); 188 189 /// Convert given addend to a Value 190 Value *createAddendVal(const FAddend &A, bool& NeedNeg); 191 192 /// Return the number of instructions needed to emit the N-ary addition. 193 unsigned calcInstrNumber(const AddendVect& Vect); 194 195 Value *createFSub(Value *Opnd0, Value *Opnd1); 196 Value *createFAdd(Value *Opnd0, Value *Opnd1); 197 Value *createFMul(Value *Opnd0, Value *Opnd1); 198 Value *createFNeg(Value *V); 199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota); 200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false); 201 202 // Debugging stuff are clustered here. 203 #ifndef NDEBUG 204 unsigned CreateInstrNum; 205 void initCreateInstNum() { CreateInstrNum = 0; } 206 void incCreateInstNum() { CreateInstrNum++; } 207 #else 208 void initCreateInstNum() {} 209 void incCreateInstNum() {} 210 #endif 211 212 InstCombiner::BuilderTy &Builder; 213 Instruction *Instr = nullptr; 214 }; 215 216 } // end anonymous namespace 217 218 //===----------------------------------------------------------------------===// 219 // 220 // Implementation of 221 // {FAddendCoef, FAddend, FAddition, FAddCombine}. 222 // 223 //===----------------------------------------------------------------------===// 224 FAddendCoef::~FAddendCoef() { 225 if (BufHasFpVal) 226 getFpValPtr()->~APFloat(); 227 } 228 229 void FAddendCoef::set(const APFloat& C) { 230 APFloat *P = getFpValPtr(); 231 232 if (isInt()) { 233 // As the buffer is meanless byte stream, we cannot call 234 // APFloat::operator=(). 235 new(P) APFloat(C); 236 } else 237 *P = C; 238 239 IsFp = BufHasFpVal = true; 240 } 241 242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) { 243 if (!isInt()) 244 return; 245 246 APFloat *P = getFpValPtr(); 247 if (IntVal > 0) 248 new(P) APFloat(Sem, IntVal); 249 else { 250 new(P) APFloat(Sem, 0 - IntVal); 251 P->changeSign(); 252 } 253 IsFp = BufHasFpVal = true; 254 } 255 256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) { 257 if (Val >= 0) 258 return APFloat(Sem, Val); 259 260 APFloat T(Sem, 0 - Val); 261 T.changeSign(); 262 263 return T; 264 } 265 266 void FAddendCoef::operator=(const FAddendCoef &That) { 267 if (That.isInt()) 268 set(That.IntVal); 269 else 270 set(That.getFpVal()); 271 } 272 273 void FAddendCoef::operator+=(const FAddendCoef &That) { 274 RoundingMode RndMode = RoundingMode::NearestTiesToEven; 275 if (isInt() == That.isInt()) { 276 if (isInt()) 277 IntVal += That.IntVal; 278 else 279 getFpVal().add(That.getFpVal(), RndMode); 280 return; 281 } 282 283 if (isInt()) { 284 const APFloat &T = That.getFpVal(); 285 convertToFpType(T.getSemantics()); 286 getFpVal().add(T, RndMode); 287 return; 288 } 289 290 APFloat &T = getFpVal(); 291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode); 292 } 293 294 void FAddendCoef::operator*=(const FAddendCoef &That) { 295 if (That.isOne()) 296 return; 297 298 if (That.isMinusOne()) { 299 negate(); 300 return; 301 } 302 303 if (isInt() && That.isInt()) { 304 int Res = IntVal * (int)That.IntVal; 305 assert(!insaneIntVal(Res) && "Insane int value"); 306 IntVal = Res; 307 return; 308 } 309 310 const fltSemantics &Semantic = 311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics(); 312 313 if (isInt()) 314 convertToFpType(Semantic); 315 APFloat &F0 = getFpVal(); 316 317 if (That.isInt()) 318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal), 319 APFloat::rmNearestTiesToEven); 320 else 321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven); 322 } 323 324 void FAddendCoef::negate() { 325 if (isInt()) 326 IntVal = 0 - IntVal; 327 else 328 getFpVal().changeSign(); 329 } 330 331 Value *FAddendCoef::getValue(Type *Ty) const { 332 return isInt() ? 333 ConstantFP::get(Ty, float(IntVal)) : 334 ConstantFP::get(Ty->getContext(), getFpVal()); 335 } 336 337 // The definition of <Val> Addends 338 // ========================================= 339 // A + B <1, A>, <1,B> 340 // A - B <1, A>, <1,B> 341 // 0 - B <-1, B> 342 // C * A, <C, A> 343 // A + C <1, A> <C, NULL> 344 // 0 +/- 0 <0, NULL> (corner case) 345 // 346 // Legend: A and B are not constant, C is constant 347 unsigned FAddend::drillValueDownOneStep 348 (Value *Val, FAddend &Addend0, FAddend &Addend1) { 349 Instruction *I = nullptr; 350 if (!Val || !(I = dyn_cast<Instruction>(Val))) 351 return 0; 352 353 unsigned Opcode = I->getOpcode(); 354 355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) { 356 ConstantFP *C0, *C1; 357 Value *Opnd0 = I->getOperand(0); 358 Value *Opnd1 = I->getOperand(1); 359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero()) 360 Opnd0 = nullptr; 361 362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero()) 363 Opnd1 = nullptr; 364 365 if (Opnd0) { 366 if (!C0) 367 Addend0.set(1, Opnd0); 368 else 369 Addend0.set(C0, nullptr); 370 } 371 372 if (Opnd1) { 373 FAddend &Addend = Opnd0 ? Addend1 : Addend0; 374 if (!C1) 375 Addend.set(1, Opnd1); 376 else 377 Addend.set(C1, nullptr); 378 if (Opcode == Instruction::FSub) 379 Addend.negate(); 380 } 381 382 if (Opnd0 || Opnd1) 383 return Opnd0 && Opnd1 ? 2 : 1; 384 385 // Both operands are zero. Weird! 386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr); 387 return 1; 388 } 389 390 if (I->getOpcode() == Instruction::FMul) { 391 Value *V0 = I->getOperand(0); 392 Value *V1 = I->getOperand(1); 393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) { 394 Addend0.set(C, V1); 395 return 1; 396 } 397 398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) { 399 Addend0.set(C, V0); 400 return 1; 401 } 402 } 403 404 return 0; 405 } 406 407 // Try to break *this* addend into two addends. e.g. Suppose this addend is 408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends, 409 // i.e. <2.3, X> and <2.3, Y>. 410 unsigned FAddend::drillAddendDownOneStep 411 (FAddend &Addend0, FAddend &Addend1) const { 412 if (isConstant()) 413 return 0; 414 415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1); 416 if (!BreakNum || Coeff.isOne()) 417 return BreakNum; 418 419 Addend0.Scale(Coeff); 420 421 if (BreakNum == 2) 422 Addend1.Scale(Coeff); 423 424 return BreakNum; 425 } 426 427 Value *FAddCombine::simplify(Instruction *I) { 428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() && 429 "Expected 'reassoc'+'nsz' instruction"); 430 431 // Currently we are not able to handle vector type. 432 if (I->getType()->isVectorTy()) 433 return nullptr; 434 435 assert((I->getOpcode() == Instruction::FAdd || 436 I->getOpcode() == Instruction::FSub) && "Expect add/sub"); 437 438 // Save the instruction before calling other member-functions. 439 Instr = I; 440 441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1; 442 443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1); 444 445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1. 446 unsigned Opnd0_ExpNum = 0; 447 unsigned Opnd1_ExpNum = 0; 448 449 if (!Opnd0.isConstant()) 450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1); 451 452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1. 453 if (OpndNum == 2 && !Opnd1.isConstant()) 454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1); 455 456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1 457 if (Opnd0_ExpNum && Opnd1_ExpNum) { 458 AddendVect AllOpnds; 459 AllOpnds.push_back(&Opnd0_0); 460 AllOpnds.push_back(&Opnd1_0); 461 if (Opnd0_ExpNum == 2) 462 AllOpnds.push_back(&Opnd0_1); 463 if (Opnd1_ExpNum == 2) 464 AllOpnds.push_back(&Opnd1_1); 465 466 // Compute instruction quota. We should save at least one instruction. 467 unsigned InstQuota = 0; 468 469 Value *V0 = I->getOperand(0); 470 Value *V1 = I->getOperand(1); 471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) && 472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1; 473 474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota)) 475 return R; 476 } 477 478 if (OpndNum != 2) { 479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be 480 // splitted into two addends, say "V = X - Y", the instruction would have 481 // been optimized into "I = Y - X" in the previous steps. 482 // 483 const FAddendCoef &CE = Opnd0.getCoef(); 484 return CE.isOne() ? Opnd0.getSymVal() : nullptr; 485 } 486 487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1] 488 if (Opnd1_ExpNum) { 489 AddendVect AllOpnds; 490 AllOpnds.push_back(&Opnd0); 491 AllOpnds.push_back(&Opnd1_0); 492 if (Opnd1_ExpNum == 2) 493 AllOpnds.push_back(&Opnd1_1); 494 495 if (Value *R = simplifyFAdd(AllOpnds, 1)) 496 return R; 497 } 498 499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1] 500 if (Opnd0_ExpNum) { 501 AddendVect AllOpnds; 502 AllOpnds.push_back(&Opnd1); 503 AllOpnds.push_back(&Opnd0_0); 504 if (Opnd0_ExpNum == 2) 505 AllOpnds.push_back(&Opnd0_1); 506 507 if (Value *R = simplifyFAdd(AllOpnds, 1)) 508 return R; 509 } 510 511 return nullptr; 512 } 513 514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { 515 unsigned AddendNum = Addends.size(); 516 assert(AddendNum <= 4 && "Too many addends"); 517 518 // For saving intermediate results; 519 unsigned NextTmpIdx = 0; 520 FAddend TmpResult[3]; 521 522 // Simplified addends are placed <SimpVect>. 523 AddendVect SimpVect; 524 525 // The outer loop works on one symbolic-value at a time. Suppose the input 526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ... 527 // The symbolic-values will be processed in this order: x, y, z. 528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) { 529 530 const FAddend *ThisAddend = Addends[SymIdx]; 531 if (!ThisAddend) { 532 // This addend was processed before. 533 continue; 534 } 535 536 Value *Val = ThisAddend->getSymVal(); 537 538 // If the resulting expr has constant-addend, this constant-addend is 539 // desirable to reside at the top of the resulting expression tree. Placing 540 // constant close to super-expr(s) will potentially reveal some 541 // optimization opportunities in super-expr(s). Here we do not implement 542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative 543 // call later. 544 545 unsigned StartIdx = SimpVect.size(); 546 SimpVect.push_back(ThisAddend); 547 548 // The inner loop collects addends sharing same symbolic-value, and these 549 // addends will be later on folded into a single addend. Following above 550 // example, if the symbolic value "y" is being processed, the inner loop 551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will 552 // be later on folded into "<b1+b2, y>". 553 for (unsigned SameSymIdx = SymIdx + 1; 554 SameSymIdx < AddendNum; SameSymIdx++) { 555 const FAddend *T = Addends[SameSymIdx]; 556 if (T && T->getSymVal() == Val) { 557 // Set null such that next iteration of the outer loop will not process 558 // this addend again. 559 Addends[SameSymIdx] = nullptr; 560 SimpVect.push_back(T); 561 } 562 } 563 564 // If multiple addends share same symbolic value, fold them together. 565 if (StartIdx + 1 != SimpVect.size()) { 566 FAddend &R = TmpResult[NextTmpIdx ++]; 567 R = *SimpVect[StartIdx]; 568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++) 569 R += *SimpVect[Idx]; 570 571 // Pop all addends being folded and push the resulting folded addend. 572 SimpVect.resize(StartIdx); 573 if (!R.isZero()) { 574 SimpVect.push_back(&R); 575 } 576 } 577 } 578 579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access"); 580 581 Value *Result; 582 if (!SimpVect.empty()) 583 Result = createNaryFAdd(SimpVect, InstrQuota); 584 else { 585 // The addition is folded to 0.0. 586 Result = ConstantFP::get(Instr->getType(), 0.0); 587 } 588 589 return Result; 590 } 591 592 Value *FAddCombine::createNaryFAdd 593 (const AddendVect &Opnds, unsigned InstrQuota) { 594 assert(!Opnds.empty() && "Expect at least one addend"); 595 596 // Step 1: Check if the # of instructions needed exceeds the quota. 597 598 unsigned InstrNeeded = calcInstrNumber(Opnds); 599 if (InstrNeeded > InstrQuota) 600 return nullptr; 601 602 initCreateInstNum(); 603 604 // step 2: Emit the N-ary addition. 605 // Note that at most three instructions are involved in Fadd-InstCombine: the 606 // addition in question, and at most two neighboring instructions. 607 // The resulting optimized addition should have at least one less instruction 608 // than the original addition expression tree. This implies that the resulting 609 // N-ary addition has at most two instructions, and we don't need to worry 610 // about tree-height when constructing the N-ary addition. 611 612 Value *LastVal = nullptr; 613 bool LastValNeedNeg = false; 614 615 // Iterate the addends, creating fadd/fsub using adjacent two addends. 616 for (const FAddend *Opnd : Opnds) { 617 bool NeedNeg; 618 Value *V = createAddendVal(*Opnd, NeedNeg); 619 if (!LastVal) { 620 LastVal = V; 621 LastValNeedNeg = NeedNeg; 622 continue; 623 } 624 625 if (LastValNeedNeg == NeedNeg) { 626 LastVal = createFAdd(LastVal, V); 627 continue; 628 } 629 630 if (LastValNeedNeg) 631 LastVal = createFSub(V, LastVal); 632 else 633 LastVal = createFSub(LastVal, V); 634 635 LastValNeedNeg = false; 636 } 637 638 if (LastValNeedNeg) { 639 LastVal = createFNeg(LastVal); 640 } 641 642 #ifndef NDEBUG 643 assert(CreateInstrNum == InstrNeeded && 644 "Inconsistent in instruction numbers"); 645 #endif 646 647 return LastVal; 648 } 649 650 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) { 651 Value *V = Builder.CreateFSub(Opnd0, Opnd1); 652 if (Instruction *I = dyn_cast<Instruction>(V)) 653 createInstPostProc(I); 654 return V; 655 } 656 657 Value *FAddCombine::createFNeg(Value *V) { 658 Value *NewV = Builder.CreateFNeg(V); 659 if (Instruction *I = dyn_cast<Instruction>(NewV)) 660 createInstPostProc(I, true); // fneg's don't receive instruction numbers. 661 return NewV; 662 } 663 664 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) { 665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1); 666 if (Instruction *I = dyn_cast<Instruction>(V)) 667 createInstPostProc(I); 668 return V; 669 } 670 671 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) { 672 Value *V = Builder.CreateFMul(Opnd0, Opnd1); 673 if (Instruction *I = dyn_cast<Instruction>(V)) 674 createInstPostProc(I); 675 return V; 676 } 677 678 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) { 679 NewInstr->setDebugLoc(Instr->getDebugLoc()); 680 681 // Keep track of the number of instruction created. 682 if (!NoNumber) 683 incCreateInstNum(); 684 685 // Propagate fast-math flags 686 NewInstr->setFastMathFlags(Instr->getFastMathFlags()); 687 } 688 689 // Return the number of instruction needed to emit the N-ary addition. 690 // NOTE: Keep this function in sync with createAddendVal(). 691 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) { 692 unsigned OpndNum = Opnds.size(); 693 unsigned InstrNeeded = OpndNum - 1; 694 695 // Adjust the number of instructions needed to emit the N-ary add. 696 for (const FAddend *Opnd : Opnds) { 697 if (Opnd->isConstant()) 698 continue; 699 700 // The constant check above is really for a few special constant 701 // coefficients. 702 if (isa<UndefValue>(Opnd->getSymVal())) 703 continue; 704 705 const FAddendCoef &CE = Opnd->getCoef(); 706 // Let the addend be "c * x". If "c == +/-1", the value of the addend 707 // is immediately available; otherwise, it needs exactly one instruction 708 // to evaluate the value. 709 if (!CE.isMinusOne() && !CE.isOne()) 710 InstrNeeded++; 711 } 712 return InstrNeeded; 713 } 714 715 // Input Addend Value NeedNeg(output) 716 // ================================================================ 717 // Constant C C false 718 // <+/-1, V> V coefficient is -1 719 // <2/-2, V> "fadd V, V" coefficient is -2 720 // <C, V> "fmul V, C" false 721 // 722 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber. 723 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) { 724 const FAddendCoef &Coeff = Opnd.getCoef(); 725 726 if (Opnd.isConstant()) { 727 NeedNeg = false; 728 return Coeff.getValue(Instr->getType()); 729 } 730 731 Value *OpndVal = Opnd.getSymVal(); 732 733 if (Coeff.isMinusOne() || Coeff.isOne()) { 734 NeedNeg = Coeff.isMinusOne(); 735 return OpndVal; 736 } 737 738 if (Coeff.isTwo() || Coeff.isMinusTwo()) { 739 NeedNeg = Coeff.isMinusTwo(); 740 return createFAdd(OpndVal, OpndVal); 741 } 742 743 NeedNeg = false; 744 return createFMul(OpndVal, Coeff.getValue(Instr->getType())); 745 } 746 747 // Checks if any operand is negative and we can convert add to sub. 748 // This function checks for following negative patterns 749 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) 750 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C)) 751 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even 752 static Value *checkForNegativeOperand(BinaryOperator &I, 753 InstCombiner::BuilderTy &Builder) { 754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 755 756 // This function creates 2 instructions to replace ADD, we need at least one 757 // of LHS or RHS to have one use to ensure benefit in transform. 758 if (!LHS->hasOneUse() && !RHS->hasOneUse()) 759 return nullptr; 760 761 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 762 const APInt *C1 = nullptr, *C2 = nullptr; 763 764 // if ONE is on other side, swap 765 if (match(RHS, m_Add(m_Value(X), m_One()))) 766 std::swap(LHS, RHS); 767 768 if (match(LHS, m_Add(m_Value(X), m_One()))) { 769 // if XOR on other side, swap 770 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) 771 std::swap(X, RHS); 772 773 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) { 774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) 775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) 776 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) { 777 Value *NewAnd = Builder.CreateAnd(Z, *C1); 778 return Builder.CreateSub(RHS, NewAnd, "sub"); 779 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) { 780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1)) 781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1)) 782 Value *NewOr = Builder.CreateOr(Z, ~(*C1)); 783 return Builder.CreateSub(RHS, NewOr, "sub"); 784 } 785 } 786 } 787 788 // Restore LHS and RHS 789 LHS = I.getOperand(0); 790 RHS = I.getOperand(1); 791 792 // if XOR is on other side, swap 793 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) 794 std::swap(LHS, RHS); 795 796 // C2 is ODD 797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2)) 798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2)) 799 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1)))) 800 if (C1->countr_zero() == 0) 801 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) { 802 Value *NewOr = Builder.CreateOr(Z, ~(*C2)); 803 return Builder.CreateSub(RHS, NewOr, "sub"); 804 } 805 return nullptr; 806 } 807 808 /// Wrapping flags may allow combining constants separated by an extend. 809 static Instruction *foldNoWrapAdd(BinaryOperator &Add, 810 InstCombiner::BuilderTy &Builder) { 811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); 812 Type *Ty = Add.getType(); 813 Constant *Op1C; 814 if (!match(Op1, m_Constant(Op1C))) 815 return nullptr; 816 817 // Try this match first because it results in an add in the narrow type. 818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1))) 819 Value *X; 820 const APInt *C1, *C2; 821 if (match(Op1, m_APInt(C1)) && 822 match(Op0, m_ZExt(m_NUWAddLike(m_Value(X), m_APInt(C2)))) && 823 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) { 824 APInt NewC = *C2 + C1->trunc(C2->getBitWidth()); 825 // If the smaller add will fold to zero, we don't need to check one use. 826 if (NewC.isZero()) 827 return new ZExtInst(X, Ty); 828 // Otherwise only do this if the existing zero extend will be removed. 829 if (Op0->hasOneUse()) 830 return new ZExtInst( 831 Builder.CreateNUWAdd(X, ConstantInt::get(X->getType(), NewC)), Ty); 832 } 833 834 // More general combining of constants in the wide type. 835 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C) 836 // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C) 837 Constant *NarrowC; 838 if (match(Op0, m_OneUse(m_SExtLike( 839 m_NSWAddLike(m_Value(X), m_Constant(NarrowC)))))) { 840 Value *WideC = Builder.CreateSExt(NarrowC, Ty); 841 Value *NewC = Builder.CreateAdd(WideC, Op1C); 842 Value *WideX = Builder.CreateSExt(X, Ty); 843 return BinaryOperator::CreateAdd(WideX, NewC); 844 } 845 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C) 846 if (match(Op0, 847 m_OneUse(m_ZExt(m_NUWAddLike(m_Value(X), m_Constant(NarrowC)))))) { 848 Value *WideC = Builder.CreateZExt(NarrowC, Ty); 849 Value *NewC = Builder.CreateAdd(WideC, Op1C); 850 Value *WideX = Builder.CreateZExt(X, Ty); 851 return BinaryOperator::CreateAdd(WideX, NewC); 852 } 853 return nullptr; 854 } 855 856 Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) { 857 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); 858 Type *Ty = Add.getType(); 859 Constant *Op1C; 860 if (!match(Op1, m_ImmConstant(Op1C))) 861 return nullptr; 862 863 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add)) 864 return NV; 865 866 Value *X; 867 Constant *Op00C; 868 869 // add (sub C1, X), C2 --> sub (add C1, C2), X 870 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X)))) 871 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X); 872 873 Value *Y; 874 875 // add (sub X, Y), -1 --> add (not Y), X 876 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) && 877 match(Op1, m_AllOnes())) 878 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X); 879 880 // zext(bool) + C -> bool ? C + 1 : C 881 if (match(Op0, m_ZExt(m_Value(X))) && 882 X->getType()->getScalarSizeInBits() == 1) 883 return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1); 884 // sext(bool) + C -> bool ? C - 1 : C 885 if (match(Op0, m_SExt(m_Value(X))) && 886 X->getType()->getScalarSizeInBits() == 1) 887 return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1); 888 889 // ~X + C --> (C-1) - X 890 if (match(Op0, m_Not(m_Value(X)))) { 891 // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW 892 auto *COne = ConstantInt::get(Op1C->getType(), 1); 893 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add); 894 BinaryOperator *Res = 895 BinaryOperator::CreateSub(ConstantExpr::getSub(Op1C, COne), X); 896 Res->setHasNoSignedWrap(Add.hasNoSignedWrap() && WillNotSOV); 897 return Res; 898 } 899 900 // (iN X s>> (N - 1)) + 1 --> zext (X > -1) 901 const APInt *C; 902 unsigned BitWidth = Ty->getScalarSizeInBits(); 903 if (match(Op0, m_OneUse(m_AShr(m_Value(X), 904 m_SpecificIntAllowPoison(BitWidth - 1)))) && 905 match(Op1, m_One())) 906 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty); 907 908 if (!match(Op1, m_APInt(C))) 909 return nullptr; 910 911 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add` 912 Constant *Op01C; 913 if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C)))) { 914 BinaryOperator *NewAdd = 915 BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C)); 916 NewAdd->setHasNoSignedWrap(Add.hasNoSignedWrap() && 917 willNotOverflowSignedAdd(Op01C, Op1C, Add)); 918 NewAdd->setHasNoUnsignedWrap(Add.hasNoUnsignedWrap()); 919 return NewAdd; 920 } 921 922 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C) 923 const APInt *C2; 924 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C) 925 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2)); 926 927 if (C->isSignMask()) { 928 // If wrapping is not allowed, then the addition must set the sign bit: 929 // X + (signmask) --> X | signmask 930 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap()) 931 return BinaryOperator::CreateOr(Op0, Op1); 932 933 // If wrapping is allowed, then the addition flips the sign bit of LHS: 934 // X + (signmask) --> X ^ signmask 935 return BinaryOperator::CreateXor(Op0, Op1); 936 } 937 938 // Is this add the last step in a convoluted sext? 939 // add(zext(xor i16 X, -32768), -32768) --> sext X 940 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) && 941 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C) 942 return CastInst::Create(Instruction::SExt, X, Ty); 943 944 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) { 945 // (X ^ signmask) + C --> (X + (signmask ^ C)) 946 if (C2->isSignMask()) 947 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C)); 948 949 // If X has no high-bits set above an xor mask: 950 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X 951 if (C2->isMask()) { 952 KnownBits LHSKnown = computeKnownBits(X, 0, &Add); 953 if ((*C2 | LHSKnown.Zero).isAllOnes()) 954 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X); 955 } 956 957 // Look for a math+logic pattern that corresponds to sext-in-register of a 958 // value with cleared high bits. Convert that into a pair of shifts: 959 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC 960 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC 961 if (Op0->hasOneUse() && *C2 == -(*C)) { 962 unsigned BitWidth = Ty->getScalarSizeInBits(); 963 unsigned ShAmt = 0; 964 if (C->isPowerOf2()) 965 ShAmt = BitWidth - C->logBase2() - 1; 966 else if (C2->isPowerOf2()) 967 ShAmt = BitWidth - C2->logBase2() - 1; 968 if (ShAmt && MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt), 969 0, &Add)) { 970 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt); 971 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext"); 972 return BinaryOperator::CreateAShr(NewShl, ShAmtC); 973 } 974 } 975 } 976 977 if (C->isOne() && Op0->hasOneUse()) { 978 // add (sext i1 X), 1 --> zext (not X) 979 // TODO: The smallest IR representation is (select X, 0, 1), and that would 980 // not require the one-use check. But we need to remove a transform in 981 // visitSelect and make sure that IR value tracking for select is equal or 982 // better than for these ops. 983 if (match(Op0, m_SExt(m_Value(X))) && 984 X->getType()->getScalarSizeInBits() == 1) 985 return new ZExtInst(Builder.CreateNot(X), Ty); 986 987 // Shifts and add used to flip and mask off the low bit: 988 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1 989 const APInt *C3; 990 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) && 991 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) { 992 Value *NotX = Builder.CreateNot(X); 993 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1)); 994 } 995 } 996 997 // Fold (add (zext (add X, -1)), 1) -> (zext X) if X is non-zero. 998 // TODO: There's a general form for any constant on the outer add. 999 if (C->isOne()) { 1000 if (match(Op0, m_ZExt(m_Add(m_Value(X), m_AllOnes())))) { 1001 const SimplifyQuery Q = SQ.getWithInstruction(&Add); 1002 if (llvm::isKnownNonZero(X, Q)) 1003 return new ZExtInst(X, Ty); 1004 } 1005 } 1006 1007 return nullptr; 1008 } 1009 1010 // match variations of a^2 + 2*a*b + b^2 1011 // 1012 // to reuse the code between the FP and Int versions, the instruction OpCodes 1013 // and constant types have been turned into template parameters. 1014 // 1015 // Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with; 1016 // should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int 1017 // (we're matching `X<<1` instead of `X*2` for Int) 1018 template <bool FP, typename Mul2Rhs> 1019 static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, 1020 Value *&B) { 1021 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul; 1022 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add; 1023 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl; 1024 1025 // (a * a) + (((a * 2) + b) * b) 1026 if (match(&I, m_c_BinOp( 1027 AddOp, m_OneUse(m_BinOp(MulOp, m_Value(A), m_Deferred(A))), 1028 m_OneUse(m_c_BinOp( 1029 MulOp, 1030 m_c_BinOp(AddOp, m_BinOp(Mul2Op, m_Deferred(A), M2Rhs), 1031 m_Value(B)), 1032 m_Deferred(B)))))) 1033 return true; 1034 1035 // ((a * b) * 2) or ((a * 2) * b) 1036 // + 1037 // (a * a + b * b) or (b * b + a * a) 1038 return match( 1039 &I, m_c_BinOp( 1040 AddOp, 1041 m_CombineOr( 1042 m_OneUse(m_BinOp( 1043 Mul2Op, m_BinOp(MulOp, m_Value(A), m_Value(B)), M2Rhs)), 1044 m_OneUse(m_c_BinOp(MulOp, m_BinOp(Mul2Op, m_Value(A), M2Rhs), 1045 m_Value(B)))), 1046 m_OneUse( 1047 m_c_BinOp(AddOp, m_BinOp(MulOp, m_Deferred(A), m_Deferred(A)), 1048 m_BinOp(MulOp, m_Deferred(B), m_Deferred(B)))))); 1049 } 1050 1051 // Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2 1052 Instruction *InstCombinerImpl::foldSquareSumInt(BinaryOperator &I) { 1053 Value *A, *B; 1054 if (matchesSquareSum</*FP*/ false>(I, m_SpecificInt(1), A, B)) { 1055 Value *AB = Builder.CreateAdd(A, B); 1056 return BinaryOperator::CreateMul(AB, AB); 1057 } 1058 return nullptr; 1059 } 1060 1061 // Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2 1062 // Requires `nsz` and `reassoc`. 1063 Instruction *InstCombinerImpl::foldSquareSumFP(BinaryOperator &I) { 1064 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch"); 1065 Value *A, *B; 1066 if (matchesSquareSum</*FP*/ true>(I, m_SpecificFP(2.0), A, B)) { 1067 Value *AB = Builder.CreateFAddFMF(A, B, &I); 1068 return BinaryOperator::CreateFMulFMF(AB, AB, &I); 1069 } 1070 return nullptr; 1071 } 1072 1073 // Matches multiplication expression Op * C where C is a constant. Returns the 1074 // constant value in C and the other operand in Op. Returns true if such a 1075 // match is found. 1076 static bool MatchMul(Value *E, Value *&Op, APInt &C) { 1077 const APInt *AI; 1078 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) { 1079 C = *AI; 1080 return true; 1081 } 1082 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) { 1083 C = APInt(AI->getBitWidth(), 1); 1084 C <<= *AI; 1085 return true; 1086 } 1087 return false; 1088 } 1089 1090 // Matches remainder expression Op % C where C is a constant. Returns the 1091 // constant value in C and the other operand in Op. Returns the signedness of 1092 // the remainder operation in IsSigned. Returns true if such a match is 1093 // found. 1094 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) { 1095 const APInt *AI; 1096 IsSigned = false; 1097 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) { 1098 IsSigned = true; 1099 C = *AI; 1100 return true; 1101 } 1102 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) { 1103 C = *AI; 1104 return true; 1105 } 1106 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) { 1107 C = *AI + 1; 1108 return true; 1109 } 1110 return false; 1111 } 1112 1113 // Matches division expression Op / C with the given signedness as indicated 1114 // by IsSigned, where C is a constant. Returns the constant value in C and the 1115 // other operand in Op. Returns true if such a match is found. 1116 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) { 1117 const APInt *AI; 1118 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) { 1119 C = *AI; 1120 return true; 1121 } 1122 if (!IsSigned) { 1123 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) { 1124 C = *AI; 1125 return true; 1126 } 1127 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) { 1128 C = APInt(AI->getBitWidth(), 1); 1129 C <<= *AI; 1130 return true; 1131 } 1132 } 1133 return false; 1134 } 1135 1136 // Returns whether C0 * C1 with the given signedness overflows. 1137 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) { 1138 bool overflow; 1139 if (IsSigned) 1140 (void)C0.smul_ov(C1, overflow); 1141 else 1142 (void)C0.umul_ov(C1, overflow); 1143 return overflow; 1144 } 1145 1146 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1) 1147 // does not overflow. 1148 // Simplifies (X / C0) * C1 + (X % C0) * C2 to 1149 // (X / C0) * (C1 - C2 * C0) + X * C2 1150 Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) { 1151 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1152 Value *X, *MulOpV; 1153 APInt C0, MulOpC; 1154 bool IsSigned; 1155 // Match I = X % C0 + MulOpV * C0 1156 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) || 1157 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) && 1158 C0 == MulOpC) { 1159 Value *RemOpV; 1160 APInt C1; 1161 bool Rem2IsSigned; 1162 // Match MulOpC = RemOpV % C1 1163 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) && 1164 IsSigned == Rem2IsSigned) { 1165 Value *DivOpV; 1166 APInt DivOpC; 1167 // Match RemOpV = X / C0 1168 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV && 1169 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) { 1170 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1); 1171 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem") 1172 : Builder.CreateURem(X, NewDivisor, "urem"); 1173 } 1174 } 1175 } 1176 1177 // Match I = (X / C0) * C1 + (X % C0) * C2 1178 Value *Div, *Rem; 1179 APInt C1, C2; 1180 if (!LHS->hasOneUse() || !MatchMul(LHS, Div, C1)) 1181 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1); 1182 if (!RHS->hasOneUse() || !MatchMul(RHS, Rem, C2)) 1183 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1); 1184 if (match(Div, m_IRem(m_Value(), m_Value()))) { 1185 std::swap(Div, Rem); 1186 std::swap(C1, C2); 1187 } 1188 Value *DivOpV; 1189 APInt DivOpC; 1190 if (MatchRem(Rem, X, C0, IsSigned) && 1191 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC) { 1192 APInt NewC = C1 - C2 * C0; 1193 if (!NewC.isZero() && !Rem->hasOneUse()) 1194 return nullptr; 1195 if (!isGuaranteedNotToBeUndef(X, &AC, &I, &DT)) 1196 return nullptr; 1197 Value *MulXC2 = Builder.CreateMul(X, ConstantInt::get(X->getType(), C2)); 1198 if (NewC.isZero()) 1199 return MulXC2; 1200 return Builder.CreateAdd( 1201 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2); 1202 } 1203 1204 return nullptr; 1205 } 1206 1207 /// Fold 1208 /// (1 << NBits) - 1 1209 /// Into: 1210 /// ~(-(1 << NBits)) 1211 /// Because a 'not' is better for bit-tracking analysis and other transforms 1212 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was. 1213 static Instruction *canonicalizeLowbitMask(BinaryOperator &I, 1214 InstCombiner::BuilderTy &Builder) { 1215 Value *NBits; 1216 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes()))) 1217 return nullptr; 1218 1219 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType()); 1220 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask"); 1221 // Be wary of constant folding. 1222 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) { 1223 // Always NSW. But NUW propagates from `add`. 1224 BOp->setHasNoSignedWrap(); 1225 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); 1226 } 1227 1228 return BinaryOperator::CreateNot(NotMask, I.getName()); 1229 } 1230 1231 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) { 1232 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction"); 1233 Type *Ty = I.getType(); 1234 auto getUAddSat = [&]() { 1235 return Intrinsic::getOrInsertDeclaration(I.getModule(), Intrinsic::uadd_sat, 1236 Ty); 1237 }; 1238 1239 // add (umin X, ~Y), Y --> uaddsat X, Y 1240 Value *X, *Y; 1241 if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))), 1242 m_Deferred(Y)))) 1243 return CallInst::Create(getUAddSat(), { X, Y }); 1244 1245 // add (umin X, ~C), C --> uaddsat X, C 1246 const APInt *C, *NotC; 1247 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) && 1248 *C == ~*NotC) 1249 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) }); 1250 1251 return nullptr; 1252 } 1253 1254 // Transform: 1255 // (add A, (shl (neg B), Y)) 1256 // -> (sub A, (shl B, Y)) 1257 static Instruction *combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, 1258 const BinaryOperator &I) { 1259 Value *A, *B, *Cnt; 1260 if (match(&I, 1261 m_c_Add(m_OneUse(m_Shl(m_OneUse(m_Neg(m_Value(B))), m_Value(Cnt))), 1262 m_Value(A)))) { 1263 Value *NewShl = Builder.CreateShl(B, Cnt); 1264 return BinaryOperator::CreateSub(A, NewShl); 1265 } 1266 return nullptr; 1267 } 1268 1269 /// Try to reduce signed division by power-of-2 to an arithmetic shift right. 1270 static Instruction *foldAddToAshr(BinaryOperator &Add) { 1271 // Division must be by power-of-2, but not the minimum signed value. 1272 Value *X; 1273 const APInt *DivC; 1274 if (!match(Add.getOperand(0), m_SDiv(m_Value(X), m_Power2(DivC))) || 1275 DivC->isNegative()) 1276 return nullptr; 1277 1278 // Rounding is done by adding -1 if the dividend (X) is negative and has any 1279 // low bits set. It recognizes two canonical patterns: 1280 // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the 1281 // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN). 1282 // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1). 1283 // Note that, by the time we end up here, if possible, ugt has been 1284 // canonicalized into eq. 1285 const APInt *MaskC, *MaskCCmp; 1286 ICmpInst::Predicate Pred; 1287 if (!match(Add.getOperand(1), 1288 m_SExt(m_ICmp(Pred, m_And(m_Specific(X), m_APInt(MaskC)), 1289 m_APInt(MaskCCmp))))) 1290 return nullptr; 1291 1292 if ((Pred != ICmpInst::ICMP_UGT || !MaskCCmp->isSignMask()) && 1293 (Pred != ICmpInst::ICMP_EQ || *MaskCCmp != *MaskC)) 1294 return nullptr; 1295 1296 APInt SMin = APInt::getSignedMinValue(Add.getType()->getScalarSizeInBits()); 1297 bool IsMaskValid = Pred == ICmpInst::ICMP_UGT 1298 ? (*MaskC == (SMin | (*DivC - 1))) 1299 : (*DivC == 2 && *MaskC == SMin + 1); 1300 if (!IsMaskValid) 1301 return nullptr; 1302 1303 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC) 1304 return BinaryOperator::CreateAShr( 1305 X, ConstantInt::get(Add.getType(), DivC->exactLogBase2())); 1306 } 1307 1308 Instruction *InstCombinerImpl::foldAddLikeCommutative(Value *LHS, Value *RHS, 1309 bool NSW, bool NUW) { 1310 Value *A, *B, *C; 1311 if (match(LHS, m_Sub(m_Value(A), m_Value(B))) && 1312 match(RHS, m_Sub(m_Value(C), m_Specific(A)))) { 1313 Instruction *R = BinaryOperator::CreateSub(C, B); 1314 bool NSWOut = NSW && match(LHS, m_NSWSub(m_Value(), m_Value())) && 1315 match(RHS, m_NSWSub(m_Value(), m_Value())); 1316 1317 bool NUWOut = match(LHS, m_NUWSub(m_Value(), m_Value())) && 1318 match(RHS, m_NUWSub(m_Value(), m_Value())); 1319 R->setHasNoSignedWrap(NSWOut); 1320 R->setHasNoUnsignedWrap(NUWOut); 1321 return R; 1322 } 1323 return nullptr; 1324 } 1325 1326 Instruction *InstCombinerImpl:: 1327 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract( 1328 BinaryOperator &I) { 1329 assert((I.getOpcode() == Instruction::Add || 1330 I.getOpcode() == Instruction::Or || 1331 I.getOpcode() == Instruction::Sub) && 1332 "Expecting add/or/sub instruction"); 1333 1334 // We have a subtraction/addition between a (potentially truncated) *logical* 1335 // right-shift of X and a "select". 1336 Value *X, *Select; 1337 Instruction *LowBitsToSkip, *Extract; 1338 if (!match(&I, m_c_BinOp(m_TruncOrSelf(m_CombineAnd( 1339 m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)), 1340 m_Instruction(Extract))), 1341 m_Value(Select)))) 1342 return nullptr; 1343 1344 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS. 1345 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select) 1346 return nullptr; 1347 1348 Type *XTy = X->getType(); 1349 bool HadTrunc = I.getType() != XTy; 1350 1351 // If there was a truncation of extracted value, then we'll need to produce 1352 // one extra instruction, so we need to ensure one instruction will go away. 1353 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value()))) 1354 return nullptr; 1355 1356 // Extraction should extract high NBits bits, with shift amount calculated as: 1357 // low bits to skip = shift bitwidth - high bits to extract 1358 // The shift amount itself may be extended, and we need to look past zero-ext 1359 // when matching NBits, that will matter for matching later. 1360 Constant *C; 1361 Value *NBits; 1362 if (!match( 1363 LowBitsToSkip, 1364 m_ZExtOrSelf(m_Sub(m_Constant(C), m_ZExtOrSelf(m_Value(NBits))))) || 1365 !match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ, 1366 APInt(C->getType()->getScalarSizeInBits(), 1367 X->getType()->getScalarSizeInBits())))) 1368 return nullptr; 1369 1370 // Sign-extending value can be zero-extended if we `sub`tract it, 1371 // or sign-extended otherwise. 1372 auto SkipExtInMagic = [&I](Value *&V) { 1373 if (I.getOpcode() == Instruction::Sub) 1374 match(V, m_ZExtOrSelf(m_Value(V))); 1375 else 1376 match(V, m_SExtOrSelf(m_Value(V))); 1377 }; 1378 1379 // Now, finally validate the sign-extending magic. 1380 // `select` itself may be appropriately extended, look past that. 1381 SkipExtInMagic(Select); 1382 1383 ICmpInst::Predicate Pred; 1384 const APInt *Thr; 1385 Value *SignExtendingValue, *Zero; 1386 bool ShouldSignext; 1387 // It must be a select between two values we will later establish to be a 1388 // sign-extending value and a zero constant. The condition guarding the 1389 // sign-extension must be based on a sign bit of the same X we had in `lshr`. 1390 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)), 1391 m_Value(SignExtendingValue), m_Value(Zero))) || 1392 !isSignBitCheck(Pred, *Thr, ShouldSignext)) 1393 return nullptr; 1394 1395 // icmp-select pair is commutative. 1396 if (!ShouldSignext) 1397 std::swap(SignExtendingValue, Zero); 1398 1399 // If we should not perform sign-extension then we must add/or/subtract zero. 1400 if (!match(Zero, m_Zero())) 1401 return nullptr; 1402 // Otherwise, it should be some constant, left-shifted by the same NBits we 1403 // had in `lshr`. Said left-shift can also be appropriately extended. 1404 // Again, we must look past zero-ext when looking for NBits. 1405 SkipExtInMagic(SignExtendingValue); 1406 Constant *SignExtendingValueBaseConstant; 1407 if (!match(SignExtendingValue, 1408 m_Shl(m_Constant(SignExtendingValueBaseConstant), 1409 m_ZExtOrSelf(m_Specific(NBits))))) 1410 return nullptr; 1411 // If we `sub`, then the constant should be one, else it should be all-ones. 1412 if (I.getOpcode() == Instruction::Sub 1413 ? !match(SignExtendingValueBaseConstant, m_One()) 1414 : !match(SignExtendingValueBaseConstant, m_AllOnes())) 1415 return nullptr; 1416 1417 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip, 1418 Extract->getName() + ".sext"); 1419 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness. 1420 if (!HadTrunc) 1421 return NewAShr; 1422 1423 Builder.Insert(NewAShr); 1424 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType()); 1425 } 1426 1427 /// This is a specialization of a more general transform from 1428 /// foldUsingDistributiveLaws. If that code can be made to work optimally 1429 /// for multi-use cases or propagating nsw/nuw, then we would not need this. 1430 static Instruction *factorizeMathWithShlOps(BinaryOperator &I, 1431 InstCombiner::BuilderTy &Builder) { 1432 // TODO: Also handle mul by doubling the shift amount? 1433 assert((I.getOpcode() == Instruction::Add || 1434 I.getOpcode() == Instruction::Sub) && 1435 "Expected add/sub"); 1436 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 1437 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 1438 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse())) 1439 return nullptr; 1440 1441 Value *X, *Y, *ShAmt; 1442 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) || 1443 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt)))) 1444 return nullptr; 1445 1446 // No-wrap propagates only when all ops have no-wrap. 1447 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() && 1448 Op1->hasNoSignedWrap(); 1449 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() && 1450 Op1->hasNoUnsignedWrap(); 1451 1452 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt 1453 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y); 1454 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) { 1455 NewI->setHasNoSignedWrap(HasNSW); 1456 NewI->setHasNoUnsignedWrap(HasNUW); 1457 } 1458 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt); 1459 NewShl->setHasNoSignedWrap(HasNSW); 1460 NewShl->setHasNoUnsignedWrap(HasNUW); 1461 return NewShl; 1462 } 1463 1464 /// Reduce a sequence of masked half-width multiplies to a single multiply. 1465 /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y 1466 static Instruction *foldBoxMultiply(BinaryOperator &I) { 1467 unsigned BitWidth = I.getType()->getScalarSizeInBits(); 1468 // Skip the odd bitwidth types. 1469 if ((BitWidth & 0x1)) 1470 return nullptr; 1471 1472 unsigned HalfBits = BitWidth >> 1; 1473 APInt HalfMask = APInt::getMaxValue(HalfBits); 1474 1475 // ResLo = (CrossSum << HalfBits) + (YLo * XLo) 1476 Value *XLo, *YLo; 1477 Value *CrossSum; 1478 // Require one-use on the multiply to avoid increasing the number of 1479 // multiplications. 1480 if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)), 1481 m_OneUse(m_Mul(m_Value(YLo), m_Value(XLo)))))) 1482 return nullptr; 1483 1484 // XLo = X & HalfMask 1485 // YLo = Y & HalfMask 1486 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros 1487 // to enhance robustness 1488 Value *X, *Y; 1489 if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) || 1490 !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask)))) 1491 return nullptr; 1492 1493 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits)) 1494 // X' can be either X or XLo in the pattern (and the same for Y') 1495 if (match(CrossSum, 1496 m_c_Add(m_c_Mul(m_LShr(m_Specific(Y), m_SpecificInt(HalfBits)), 1497 m_CombineOr(m_Specific(X), m_Specific(XLo))), 1498 m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(HalfBits)), 1499 m_CombineOr(m_Specific(Y), m_Specific(YLo)))))) 1500 return BinaryOperator::CreateMul(X, Y); 1501 1502 return nullptr; 1503 } 1504 1505 Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) { 1506 if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1), 1507 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), 1508 SQ.getWithInstruction(&I))) 1509 return replaceInstUsesWith(I, V); 1510 1511 if (SimplifyAssociativeOrCommutative(I)) 1512 return &I; 1513 1514 if (Instruction *X = foldVectorBinop(I)) 1515 return X; 1516 1517 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 1518 return Phi; 1519 1520 // (A*B)+(A*C) -> A*(B+C) etc 1521 if (Value *V = foldUsingDistributiveLaws(I)) 1522 return replaceInstUsesWith(I, V); 1523 1524 if (Instruction *R = foldBoxMultiply(I)) 1525 return R; 1526 1527 if (Instruction *R = factorizeMathWithShlOps(I, Builder)) 1528 return R; 1529 1530 if (Instruction *X = foldAddWithConstant(I)) 1531 return X; 1532 1533 if (Instruction *X = foldNoWrapAdd(I, Builder)) 1534 return X; 1535 1536 if (Instruction *R = foldBinOpShiftWithShift(I)) 1537 return R; 1538 1539 if (Instruction *R = combineAddSubWithShlAddSub(Builder, I)) 1540 return R; 1541 1542 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1543 if (Instruction *R = foldAddLikeCommutative(LHS, RHS, I.hasNoSignedWrap(), 1544 I.hasNoUnsignedWrap())) 1545 return R; 1546 if (Instruction *R = foldAddLikeCommutative(RHS, LHS, I.hasNoSignedWrap(), 1547 I.hasNoUnsignedWrap())) 1548 return R; 1549 Type *Ty = I.getType(); 1550 if (Ty->isIntOrIntVectorTy(1)) 1551 return BinaryOperator::CreateXor(LHS, RHS); 1552 1553 // X + X --> X << 1 1554 if (LHS == RHS) { 1555 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1)); 1556 Shl->setHasNoSignedWrap(I.hasNoSignedWrap()); 1557 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); 1558 return Shl; 1559 } 1560 1561 Value *A, *B; 1562 if (match(LHS, m_Neg(m_Value(A)))) { 1563 // -A + -B --> -(A + B) 1564 if (match(RHS, m_Neg(m_Value(B)))) 1565 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B)); 1566 1567 // -A + B --> B - A 1568 auto *Sub = BinaryOperator::CreateSub(RHS, A); 1569 auto *OB0 = cast<OverflowingBinaryOperator>(LHS); 1570 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap()); 1571 1572 return Sub; 1573 } 1574 1575 // A + -B --> A - B 1576 if (match(RHS, m_Neg(m_Value(B)))) { 1577 auto *Sub = BinaryOperator::CreateSub(LHS, B); 1578 auto *OBO = cast<OverflowingBinaryOperator>(RHS); 1579 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap()); 1580 return Sub; 1581 } 1582 1583 if (Value *V = checkForNegativeOperand(I, Builder)) 1584 return replaceInstUsesWith(I, V); 1585 1586 // (A + 1) + ~B --> A - B 1587 // ~B + (A + 1) --> A - B 1588 // (~B + A) + 1 --> A - B 1589 // (A + ~B) + 1 --> A - B 1590 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) || 1591 match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One()))) 1592 return BinaryOperator::CreateSub(A, B); 1593 1594 // (A + RHS) + RHS --> A + (RHS << 1) 1595 if (match(LHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(RHS))))) 1596 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add")); 1597 1598 // LHS + (A + LHS) --> A + (LHS << 1) 1599 if (match(RHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(LHS))))) 1600 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add")); 1601 1602 { 1603 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2) 1604 Constant *C1, *C2; 1605 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)), 1606 m_Sub(m_ImmConstant(C2), m_Value(B)))) && 1607 (LHS->hasOneUse() || RHS->hasOneUse())) { 1608 Value *Sub = Builder.CreateSub(A, B); 1609 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2)); 1610 } 1611 1612 // Canonicalize a constant sub operand as an add operand for better folding: 1613 // (C1 - A) + B --> (B - A) + C1 1614 if (match(&I, m_c_Add(m_OneUse(m_Sub(m_ImmConstant(C1), m_Value(A))), 1615 m_Value(B)))) { 1616 Value *Sub = Builder.CreateSub(B, A, "reass.sub"); 1617 return BinaryOperator::CreateAdd(Sub, C1); 1618 } 1619 } 1620 1621 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1) 1622 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V); 1623 1624 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2 1625 const APInt *C1, *C2; 1626 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) { 1627 APInt one(C2->getBitWidth(), 1); 1628 APInt minusC1 = -(*C1); 1629 if (minusC1 == (one << *C2)) { 1630 Constant *NewRHS = ConstantInt::get(RHS->getType(), minusC1); 1631 return BinaryOperator::CreateSRem(RHS, NewRHS); 1632 } 1633 } 1634 1635 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit 1636 if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) && 1637 C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countl_zero())) { 1638 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1); 1639 return BinaryOperator::CreateAnd(A, NewMask); 1640 } 1641 1642 // ZExt (B - A) + ZExt(A) --> ZExt(B) 1643 if ((match(RHS, m_ZExt(m_Value(A))) && 1644 match(LHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))) || 1645 (match(LHS, m_ZExt(m_Value(A))) && 1646 match(RHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A)))))) 1647 return new ZExtInst(B, LHS->getType()); 1648 1649 // zext(A) + sext(A) --> 0 if A is i1 1650 if (match(&I, m_c_BinOp(m_ZExt(m_Value(A)), m_SExt(m_Deferred(A)))) && 1651 A->getType()->isIntOrIntVectorTy(1)) 1652 return replaceInstUsesWith(I, Constant::getNullValue(I.getType())); 1653 1654 // sext(A < B) + zext(A > B) => ucmp/scmp(A, B) 1655 ICmpInst::Predicate LTPred, GTPred; 1656 if (match(&I, 1657 m_c_Add(m_SExt(m_c_ICmp(LTPred, m_Value(A), m_Value(B))), 1658 m_ZExt(m_c_ICmp(GTPred, m_Deferred(A), m_Deferred(B))))) && 1659 A->getType()->isIntOrIntVectorTy()) { 1660 if (ICmpInst::isGT(LTPred)) { 1661 std::swap(LTPred, GTPred); 1662 std::swap(A, B); 1663 } 1664 1665 if (ICmpInst::isLT(LTPred) && ICmpInst::isGT(GTPred) && 1666 ICmpInst::isSigned(LTPred) == ICmpInst::isSigned(GTPred)) 1667 return replaceInstUsesWith( 1668 I, Builder.CreateIntrinsic( 1669 Ty, 1670 ICmpInst::isSigned(LTPred) ? Intrinsic::scmp : Intrinsic::ucmp, 1671 {A, B})); 1672 } 1673 1674 // A+B --> A|B iff A and B have no bits set in common. 1675 WithCache<const Value *> LHSCache(LHS), RHSCache(RHS); 1676 if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ.getWithInstruction(&I))) 1677 return BinaryOperator::CreateDisjointOr(LHS, RHS); 1678 1679 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 1680 return Ext; 1681 1682 // (add (xor A, B) (and A, B)) --> (or A, B) 1683 // (add (and A, B) (xor A, B)) --> (or A, B) 1684 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)), 1685 m_c_And(m_Deferred(A), m_Deferred(B))))) 1686 return BinaryOperator::CreateOr(A, B); 1687 1688 // (add (or A, B) (and A, B)) --> (add A, B) 1689 // (add (and A, B) (or A, B)) --> (add A, B) 1690 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)), 1691 m_c_And(m_Deferred(A), m_Deferred(B))))) { 1692 // Replacing operands in-place to preserve nuw/nsw flags. 1693 replaceOperand(I, 0, A); 1694 replaceOperand(I, 1, B); 1695 return &I; 1696 } 1697 1698 // (add A (or A, -A)) --> (and (add A, -1) A) 1699 // (add A (or -A, A)) --> (and (add A, -1) A) 1700 // (add (or A, -A) A) --> (and (add A, -1) A) 1701 // (add (or -A, A) A) --> (and (add A, -1) A) 1702 if (match(&I, m_c_BinOp(m_Value(A), m_OneUse(m_c_Or(m_Neg(m_Deferred(A)), 1703 m_Deferred(A)))))) { 1704 Value *Add = 1705 Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()), "", 1706 I.hasNoUnsignedWrap(), I.hasNoSignedWrap()); 1707 return BinaryOperator::CreateAnd(Add, A); 1708 } 1709 1710 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A) 1711 // Forms all commutable operations, and simplifies ctpop -> cttz folds. 1712 if (match(&I, 1713 m_Add(m_OneUse(m_c_And(m_Value(A), m_OneUse(m_Neg(m_Deferred(A))))), 1714 m_AllOnes()))) { 1715 Constant *AllOnes = ConstantInt::getAllOnesValue(RHS->getType()); 1716 Value *Dec = Builder.CreateAdd(A, AllOnes); 1717 Value *Not = Builder.CreateXor(A, AllOnes); 1718 return BinaryOperator::CreateAnd(Dec, Not); 1719 } 1720 1721 // Disguised reassociation/factorization: 1722 // ~(A * C1) + A 1723 // ((A * -C1) - 1) + A 1724 // ((A * -C1) + A) - 1 1725 // (A * (1 - C1)) - 1 1726 if (match(&I, 1727 m_c_Add(m_OneUse(m_Not(m_OneUse(m_Mul(m_Value(A), m_APInt(C1))))), 1728 m_Deferred(A)))) { 1729 Type *Ty = I.getType(); 1730 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1); 1731 Value *NewMul = Builder.CreateMul(A, NewMulC); 1732 return BinaryOperator::CreateAdd(NewMul, ConstantInt::getAllOnesValue(Ty)); 1733 } 1734 1735 // (A * -2**C) + B --> B - (A << C) 1736 const APInt *NegPow2C; 1737 if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))), 1738 m_Value(B)))) { 1739 Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countr_zero()); 1740 Value *Shl = Builder.CreateShl(A, ShiftAmtC); 1741 return BinaryOperator::CreateSub(B, Shl); 1742 } 1743 1744 // Canonicalize signum variant that ends in add: 1745 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0)) 1746 uint64_t BitWidth = Ty->getScalarSizeInBits(); 1747 if (match(LHS, m_AShr(m_Value(A), m_SpecificIntAllowPoison(BitWidth - 1))) && 1748 match(RHS, m_OneUse(m_ZExt(m_OneUse(m_SpecificICmp( 1749 CmpInst::ICMP_SGT, m_Specific(A), m_ZeroInt())))))) { 1750 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull"); 1751 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext"); 1752 return BinaryOperator::CreateOr(LHS, Zext); 1753 } 1754 1755 { 1756 Value *Cond, *Ext; 1757 Constant *C; 1758 // (add X, (sext/zext (icmp eq X, C))) 1759 // -> (select (icmp eq X, C), (add C, (sext/zext 1)), X) 1760 auto CondMatcher = m_CombineAnd( 1761 m_Value(Cond), 1762 m_SpecificICmp(ICmpInst::ICMP_EQ, m_Deferred(A), m_ImmConstant(C))); 1763 1764 if (match(&I, 1765 m_c_Add(m_Value(A), 1766 m_CombineAnd(m_Value(Ext), m_ZExtOrSExt(CondMatcher)))) && 1767 Ext->hasOneUse()) { 1768 Value *Add = isa<ZExtInst>(Ext) ? InstCombiner::AddOne(C) 1769 : InstCombiner::SubOne(C); 1770 return replaceInstUsesWith(I, Builder.CreateSelect(Cond, Add, A)); 1771 } 1772 } 1773 1774 if (Instruction *Ashr = foldAddToAshr(I)) 1775 return Ashr; 1776 1777 // (~X) + (~Y) --> -2 - (X + Y) 1778 { 1779 // To ensure we can save instructions we need to ensure that we consume both 1780 // LHS/RHS (i.e they have a `not`). 1781 bool ConsumesLHS, ConsumesRHS; 1782 if (isFreeToInvert(LHS, LHS->hasOneUse(), ConsumesLHS) && ConsumesLHS && 1783 isFreeToInvert(RHS, RHS->hasOneUse(), ConsumesRHS) && ConsumesRHS) { 1784 Value *NotLHS = getFreelyInverted(LHS, LHS->hasOneUse(), &Builder); 1785 Value *NotRHS = getFreelyInverted(RHS, RHS->hasOneUse(), &Builder); 1786 assert(NotLHS != nullptr && NotRHS != nullptr && 1787 "isFreeToInvert desynced with getFreelyInverted"); 1788 Value *LHSPlusRHS = Builder.CreateAdd(NotLHS, NotRHS); 1789 return BinaryOperator::CreateSub( 1790 ConstantInt::getSigned(RHS->getType(), -2), LHSPlusRHS); 1791 } 1792 } 1793 1794 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I)) 1795 return R; 1796 1797 // TODO(jingyue): Consider willNotOverflowSignedAdd and 1798 // willNotOverflowUnsignedAdd to reduce the number of invocations of 1799 // computeKnownBits. 1800 bool Changed = false; 1801 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) { 1802 Changed = true; 1803 I.setHasNoSignedWrap(true); 1804 } 1805 if (!I.hasNoUnsignedWrap() && 1806 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) { 1807 Changed = true; 1808 I.setHasNoUnsignedWrap(true); 1809 } 1810 1811 if (Instruction *V = canonicalizeLowbitMask(I, Builder)) 1812 return V; 1813 1814 if (Instruction *V = 1815 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) 1816 return V; 1817 1818 if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I)) 1819 return SatAdd; 1820 1821 // usub.sat(A, B) + B => umax(A, B) 1822 if (match(&I, m_c_BinOp( 1823 m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))), 1824 m_Deferred(B)))) { 1825 return replaceInstUsesWith(I, 1826 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B})); 1827 } 1828 1829 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common. 1830 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) && 1831 match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) && 1832 haveNoCommonBitsSet(A, B, SQ.getWithInstruction(&I))) 1833 return replaceInstUsesWith( 1834 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()}, 1835 {Builder.CreateOr(A, B)})); 1836 1837 // Fold the log2_ceil idiom: 1838 // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1)) 1839 // --> 1840 // BW - ctlz(A - 1, false) 1841 const APInt *XorC; 1842 ICmpInst::Predicate Pred; 1843 if (match(&I, 1844 m_c_Add( 1845 m_ZExt(m_ICmp(Pred, m_Intrinsic<Intrinsic::ctpop>(m_Value(A)), 1846 m_One())), 1847 m_OneUse(m_ZExtOrSelf(m_OneUse(m_Xor( 1848 m_OneUse(m_TruncOrSelf(m_OneUse( 1849 m_Intrinsic<Intrinsic::ctlz>(m_Deferred(A), m_One())))), 1850 m_APInt(XorC))))))) && 1851 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_NE) && 1852 *XorC == A->getType()->getScalarSizeInBits() - 1) { 1853 Value *Sub = Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType())); 1854 Value *Ctlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {A->getType()}, 1855 {Sub, Builder.getFalse()}); 1856 Value *Ret = Builder.CreateSub( 1857 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()), 1858 Ctlz, "", /*HasNUW*/ true, /*HasNSW*/ true); 1859 return replaceInstUsesWith(I, Builder.CreateZExtOrTrunc(Ret, I.getType())); 1860 } 1861 1862 if (Instruction *Res = foldSquareSumInt(I)) 1863 return Res; 1864 1865 if (Instruction *Res = foldBinOpOfDisplacedShifts(I)) 1866 return Res; 1867 1868 if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) 1869 return Res; 1870 1871 return Changed ? &I : nullptr; 1872 } 1873 1874 /// Eliminate an op from a linear interpolation (lerp) pattern. 1875 static Instruction *factorizeLerp(BinaryOperator &I, 1876 InstCombiner::BuilderTy &Builder) { 1877 Value *X, *Y, *Z; 1878 if (!match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_Value(Y), 1879 m_OneUse(m_FSub(m_FPOne(), 1880 m_Value(Z))))), 1881 m_OneUse(m_c_FMul(m_Value(X), m_Deferred(Z)))))) 1882 return nullptr; 1883 1884 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants] 1885 Value *XY = Builder.CreateFSubFMF(X, Y, &I); 1886 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I); 1887 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I); 1888 } 1889 1890 /// Factor a common operand out of fadd/fsub of fmul/fdiv. 1891 static Instruction *factorizeFAddFSub(BinaryOperator &I, 1892 InstCombiner::BuilderTy &Builder) { 1893 assert((I.getOpcode() == Instruction::FAdd || 1894 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub"); 1895 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && 1896 "FP factorization requires FMF"); 1897 1898 if (Instruction *Lerp = factorizeLerp(I, Builder)) 1899 return Lerp; 1900 1901 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1902 if (!Op0->hasOneUse() || !Op1->hasOneUse()) 1903 return nullptr; 1904 1905 Value *X, *Y, *Z; 1906 bool IsFMul; 1907 if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) && 1908 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) || 1909 (match(Op0, m_FMul(m_Value(Z), m_Value(X))) && 1910 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z))))) 1911 IsFMul = true; 1912 else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) && 1913 match(Op1, m_FDiv(m_Value(Y), m_Specific(Z)))) 1914 IsFMul = false; 1915 else 1916 return nullptr; 1917 1918 // (X * Z) + (Y * Z) --> (X + Y) * Z 1919 // (X * Z) - (Y * Z) --> (X - Y) * Z 1920 // (X / Z) + (Y / Z) --> (X + Y) / Z 1921 // (X / Z) - (Y / Z) --> (X - Y) / Z 1922 bool IsFAdd = I.getOpcode() == Instruction::FAdd; 1923 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I) 1924 : Builder.CreateFSubFMF(X, Y, &I); 1925 1926 // Bail out if we just created a denormal constant. 1927 // TODO: This is copied from a previous implementation. Is it necessary? 1928 const APFloat *C; 1929 if (match(XY, m_APFloat(C)) && !C->isNormal()) 1930 return nullptr; 1931 1932 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I) 1933 : BinaryOperator::CreateFDivFMF(XY, Z, &I); 1934 } 1935 1936 Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) { 1937 if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1), 1938 I.getFastMathFlags(), 1939 SQ.getWithInstruction(&I))) 1940 return replaceInstUsesWith(I, V); 1941 1942 if (SimplifyAssociativeOrCommutative(I)) 1943 return &I; 1944 1945 if (Instruction *X = foldVectorBinop(I)) 1946 return X; 1947 1948 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 1949 return Phi; 1950 1951 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I)) 1952 return FoldedFAdd; 1953 1954 // (-X) + Y --> Y - X 1955 Value *X, *Y; 1956 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y)))) 1957 return BinaryOperator::CreateFSubFMF(Y, X, &I); 1958 1959 // Similar to above, but look through fmul/fdiv for the negated term. 1960 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants] 1961 Value *Z; 1962 if (match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))), 1963 m_Value(Z)))) { 1964 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 1965 return BinaryOperator::CreateFSubFMF(Z, XY, &I); 1966 } 1967 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants] 1968 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants] 1969 if (match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y))), 1970 m_Value(Z))) || 1971 match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))), 1972 m_Value(Z)))) { 1973 Value *XY = Builder.CreateFDivFMF(X, Y, &I); 1974 return BinaryOperator::CreateFSubFMF(Z, XY, &I); 1975 } 1976 1977 // Check for (fadd double (sitofp x), y), see if we can merge this into an 1978 // integer add followed by a promotion. 1979 if (Instruction *R = foldFBinOpOfIntCasts(I)) 1980 return R; 1981 1982 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1983 // Handle specials cases for FAdd with selects feeding the operation 1984 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS)) 1985 return replaceInstUsesWith(I, V); 1986 1987 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { 1988 if (Instruction *F = factorizeFAddFSub(I, Builder)) 1989 return F; 1990 1991 if (Instruction *F = foldSquareSumFP(I)) 1992 return F; 1993 1994 // Try to fold fadd into start value of reduction intrinsic. 1995 if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>( 1996 m_AnyZeroFP(), m_Value(X))), 1997 m_Value(Y)))) { 1998 // fadd (rdx 0.0, X), Y --> rdx Y, X 1999 return replaceInstUsesWith( 2000 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd, 2001 {X->getType()}, {Y, X}, &I)); 2002 } 2003 const APFloat *StartC, *C; 2004 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>( 2005 m_APFloat(StartC), m_Value(X)))) && 2006 match(RHS, m_APFloat(C))) { 2007 // fadd (rdx StartC, X), C --> rdx (C + StartC), X 2008 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC); 2009 return replaceInstUsesWith( 2010 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd, 2011 {X->getType()}, {NewStartC, X}, &I)); 2012 } 2013 2014 // (X * MulC) + X --> X * (MulC + 1.0) 2015 Constant *MulC; 2016 if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)), 2017 m_Deferred(X)))) { 2018 if (Constant *NewMulC = ConstantFoldBinaryOpOperands( 2019 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL)) 2020 return BinaryOperator::CreateFMulFMF(X, NewMulC, &I); 2021 } 2022 2023 // (-X - Y) + (X + Z) --> Z - Y 2024 if (match(&I, m_c_FAdd(m_FSub(m_FNeg(m_Value(X)), m_Value(Y)), 2025 m_c_FAdd(m_Deferred(X), m_Value(Z))))) 2026 return BinaryOperator::CreateFSubFMF(Z, Y, &I); 2027 2028 if (Value *V = FAddCombine(Builder).simplify(&I)) 2029 return replaceInstUsesWith(I, V); 2030 } 2031 2032 // minumum(X, Y) + maximum(X, Y) => X + Y. 2033 if (match(&I, 2034 m_c_FAdd(m_Intrinsic<Intrinsic::maximum>(m_Value(X), m_Value(Y)), 2035 m_c_Intrinsic<Intrinsic::minimum>(m_Deferred(X), 2036 m_Deferred(Y))))) { 2037 BinaryOperator *Result = BinaryOperator::CreateFAddFMF(X, Y, &I); 2038 // We cannot preserve ninf if nnan flag is not set. 2039 // If X is NaN and Y is Inf then in original program we had NaN + NaN, 2040 // while in optimized version NaN + Inf and this is a poison with ninf flag. 2041 if (!Result->hasNoNaNs()) 2042 Result->setHasNoInfs(false); 2043 return Result; 2044 } 2045 2046 return nullptr; 2047 } 2048 2049 /// Optimize pointer differences into the same array into a size. Consider: 2050 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer 2051 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract. 2052 Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS, 2053 Type *Ty, bool IsNUW) { 2054 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize 2055 // this. 2056 bool Swapped = false; 2057 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr; 2058 if (!isa<GEPOperator>(LHS) && isa<GEPOperator>(RHS)) { 2059 std::swap(LHS, RHS); 2060 Swapped = true; 2061 } 2062 2063 // Require at least one GEP with a common base pointer on both sides. 2064 if (auto *LHSGEP = dyn_cast<GEPOperator>(LHS)) { 2065 // (gep X, ...) - X 2066 if (LHSGEP->getOperand(0)->stripPointerCasts() == 2067 RHS->stripPointerCasts()) { 2068 GEP1 = LHSGEP; 2069 } else if (auto *RHSGEP = dyn_cast<GEPOperator>(RHS)) { 2070 // (gep X, ...) - (gep X, ...) 2071 if (LHSGEP->getOperand(0)->stripPointerCasts() == 2072 RHSGEP->getOperand(0)->stripPointerCasts()) { 2073 GEP1 = LHSGEP; 2074 GEP2 = RHSGEP; 2075 } 2076 } 2077 } 2078 2079 if (!GEP1) 2080 return nullptr; 2081 2082 // To avoid duplicating the offset arithmetic, rewrite the GEP to use the 2083 // computed offset. This may erase the original GEP, so be sure to cache the 2084 // inbounds flag before emitting the offset. 2085 // TODO: We should probably do this even if there is only one GEP. 2086 bool RewriteGEPs = GEP2 != nullptr; 2087 2088 // Emit the offset of the GEP and an intptr_t. 2089 bool GEP1IsInBounds = GEP1->isInBounds(); 2090 Value *Result = EmitGEPOffset(GEP1, RewriteGEPs); 2091 2092 // If this is a single inbounds GEP and the original sub was nuw, 2093 // then the final multiplication is also nuw. 2094 if (auto *I = dyn_cast<Instruction>(Result)) 2095 if (IsNUW && !GEP2 && !Swapped && GEP1IsInBounds && 2096 I->getOpcode() == Instruction::Mul) 2097 I->setHasNoUnsignedWrap(); 2098 2099 // If we have a 2nd GEP of the same base pointer, subtract the offsets. 2100 // If both GEPs are inbounds, then the subtract does not have signed overflow. 2101 if (GEP2) { 2102 bool GEP2IsInBounds = GEP2->isInBounds(); 2103 Value *Offset = EmitGEPOffset(GEP2, RewriteGEPs); 2104 Result = Builder.CreateSub(Result, Offset, "gepdiff", /* NUW */ false, 2105 GEP1IsInBounds && GEP2IsInBounds); 2106 } 2107 2108 // If we have p - gep(p, ...) then we have to negate the result. 2109 if (Swapped) 2110 Result = Builder.CreateNeg(Result, "diff.neg"); 2111 2112 return Builder.CreateIntCast(Result, Ty, true); 2113 } 2114 2115 static Instruction *foldSubOfMinMax(BinaryOperator &I, 2116 InstCombiner::BuilderTy &Builder) { 2117 Value *Op0 = I.getOperand(0); 2118 Value *Op1 = I.getOperand(1); 2119 Type *Ty = I.getType(); 2120 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op1); 2121 if (!MinMax) 2122 return nullptr; 2123 2124 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y) 2125 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y) 2126 Value *X = MinMax->getLHS(); 2127 Value *Y = MinMax->getRHS(); 2128 if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) && 2129 (Op0->hasOneUse() || Op1->hasOneUse())) { 2130 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID()); 2131 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty); 2132 return CallInst::Create(F, {X, Y}); 2133 } 2134 2135 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z)) 2136 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y)) 2137 Value *Z; 2138 if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) { 2139 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X))))) { 2140 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z}); 2141 return BinaryOperator::CreateAdd(X, USub); 2142 } 2143 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X))))) { 2144 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y}); 2145 return BinaryOperator::CreateAdd(X, USub); 2146 } 2147 } 2148 2149 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z 2150 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z 2151 if (MinMax->isSigned() && match(Y, m_ZeroInt()) && 2152 match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) { 2153 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID()); 2154 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty); 2155 return CallInst::Create(F, {Op0, Z}); 2156 } 2157 2158 return nullptr; 2159 } 2160 2161 Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) { 2162 if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1), 2163 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), 2164 SQ.getWithInstruction(&I))) 2165 return replaceInstUsesWith(I, V); 2166 2167 if (Instruction *X = foldVectorBinop(I)) 2168 return X; 2169 2170 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 2171 return Phi; 2172 2173 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2174 2175 // If this is a 'B = x-(-A)', change to B = x+A. 2176 // We deal with this without involving Negator to preserve NSW flag. 2177 if (Value *V = dyn_castNegVal(Op1)) { 2178 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V); 2179 2180 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) { 2181 assert(BO->getOpcode() == Instruction::Sub && 2182 "Expected a subtraction operator!"); 2183 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap()) 2184 Res->setHasNoSignedWrap(true); 2185 } else { 2186 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap()) 2187 Res->setHasNoSignedWrap(true); 2188 } 2189 2190 return Res; 2191 } 2192 2193 // Try this before Negator to preserve NSW flag. 2194 if (Instruction *R = factorizeMathWithShlOps(I, Builder)) 2195 return R; 2196 2197 Constant *C; 2198 if (match(Op0, m_ImmConstant(C))) { 2199 Value *X; 2200 Constant *C2; 2201 2202 // C-(X+C2) --> (C-C2)-X 2203 if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2)))) { 2204 // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW 2205 // => (C-C2)-X can have NSW/NUW 2206 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I); 2207 BinaryOperator *Res = 2208 BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X); 2209 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2210 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() && 2211 WillNotSOV); 2212 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() && 2213 OBO1->hasNoUnsignedWrap()); 2214 return Res; 2215 } 2216 } 2217 2218 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * { 2219 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 2220 return Ext; 2221 2222 bool Changed = false; 2223 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) { 2224 Changed = true; 2225 I.setHasNoSignedWrap(true); 2226 } 2227 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) { 2228 Changed = true; 2229 I.setHasNoUnsignedWrap(true); 2230 } 2231 2232 return Changed ? &I : nullptr; 2233 }; 2234 2235 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`, 2236 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't 2237 // a pure negation used by a select that looks like abs/nabs. 2238 bool IsNegation = match(Op0, m_ZeroInt()); 2239 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) { 2240 const Instruction *UI = dyn_cast<Instruction>(U); 2241 if (!UI) 2242 return false; 2243 return match(UI, 2244 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) || 2245 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1))); 2246 })) { 2247 if (Value *NegOp1 = Negator::Negate(IsNegation, /* IsNSW */ IsNegation && 2248 I.hasNoSignedWrap(), 2249 Op1, *this)) 2250 return BinaryOperator::CreateAdd(NegOp1, Op0); 2251 } 2252 if (IsNegation) 2253 return TryToNarrowDeduceFlags(); // Should have been handled in Negator! 2254 2255 // (A*B)-(A*C) -> A*(B-C) etc 2256 if (Value *V = foldUsingDistributiveLaws(I)) 2257 return replaceInstUsesWith(I, V); 2258 2259 if (I.getType()->isIntOrIntVectorTy(1)) 2260 return BinaryOperator::CreateXor(Op0, Op1); 2261 2262 // Replace (-1 - A) with (~A). 2263 if (match(Op0, m_AllOnes())) 2264 return BinaryOperator::CreateNot(Op1); 2265 2266 // (X + -1) - Y --> ~Y + X 2267 Value *X, *Y; 2268 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes())))) 2269 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X); 2270 2271 // Reassociate sub/add sequences to create more add instructions and 2272 // reduce dependency chains: 2273 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1) 2274 Value *Z; 2275 if (match(Op0, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X), m_Value(Y))), 2276 m_Value(Z))))) { 2277 Value *XZ = Builder.CreateAdd(X, Z); 2278 Value *YW = Builder.CreateAdd(Y, Op1); 2279 return BinaryOperator::CreateSub(XZ, YW); 2280 } 2281 2282 // ((X - Y) - Op1) --> X - (Y + Op1) 2283 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) { 2284 OverflowingBinaryOperator *LHSSub = cast<OverflowingBinaryOperator>(Op0); 2285 bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap(); 2286 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap(); 2287 Value *Add = Builder.CreateAdd(Y, Op1, "", /* HasNUW */ HasNUW, 2288 /* HasNSW */ HasNSW); 2289 BinaryOperator *Sub = BinaryOperator::CreateSub(X, Add); 2290 Sub->setHasNoUnsignedWrap(HasNUW); 2291 Sub->setHasNoSignedWrap(HasNSW); 2292 return Sub; 2293 } 2294 2295 { 2296 // (X + Z) - (Y + Z) --> (X - Y) 2297 // This is done in other passes, but we want to be able to consume this 2298 // pattern in InstCombine so we can generate it without creating infinite 2299 // loops. 2300 if (match(Op0, m_Add(m_Value(X), m_Value(Z))) && 2301 match(Op1, m_c_Add(m_Value(Y), m_Specific(Z)))) 2302 return BinaryOperator::CreateSub(X, Y); 2303 2304 // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1) 2305 Constant *CX, *CY; 2306 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(CX)))) && 2307 match(Op1, m_OneUse(m_Add(m_Value(Y), m_ImmConstant(CY))))) { 2308 Value *OpsSub = Builder.CreateSub(X, Y); 2309 Constant *ConstsSub = ConstantExpr::getSub(CX, CY); 2310 return BinaryOperator::CreateAdd(OpsSub, ConstsSub); 2311 } 2312 } 2313 2314 { 2315 Value *W, *Z; 2316 if (match(Op0, m_AddLike(m_Value(W), m_Value(X))) && 2317 match(Op1, m_AddLike(m_Value(Y), m_Value(Z)))) { 2318 Instruction *R = nullptr; 2319 if (W == Y) 2320 R = BinaryOperator::CreateSub(X, Z); 2321 else if (W == Z) 2322 R = BinaryOperator::CreateSub(X, Y); 2323 else if (X == Y) 2324 R = BinaryOperator::CreateSub(W, Z); 2325 else if (X == Z) 2326 R = BinaryOperator::CreateSub(W, Y); 2327 if (R) { 2328 bool NSW = I.hasNoSignedWrap() && 2329 match(Op0, m_NSWAddLike(m_Value(), m_Value())) && 2330 match(Op1, m_NSWAddLike(m_Value(), m_Value())); 2331 2332 bool NUW = I.hasNoUnsignedWrap() && 2333 match(Op1, m_NUWAddLike(m_Value(), m_Value())); 2334 R->setHasNoSignedWrap(NSW); 2335 R->setHasNoUnsignedWrap(NUW); 2336 return R; 2337 } 2338 } 2339 } 2340 2341 // (~X) - (~Y) --> Y - X 2342 { 2343 // Need to ensure we can consume at least one of the `not` instructions, 2344 // otherwise this can inf loop. 2345 bool ConsumesOp0, ConsumesOp1; 2346 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) && 2347 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) && 2348 (ConsumesOp0 || ConsumesOp1)) { 2349 Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder); 2350 Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder); 2351 assert(NotOp0 != nullptr && NotOp1 != nullptr && 2352 "isFreeToInvert desynced with getFreelyInverted"); 2353 return BinaryOperator::CreateSub(NotOp1, NotOp0); 2354 } 2355 } 2356 2357 auto m_AddRdx = [](Value *&Vec) { 2358 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec))); 2359 }; 2360 Value *V0, *V1; 2361 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) && 2362 V0->getType() == V1->getType()) { 2363 // Difference of sums is sum of differences: 2364 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1) 2365 Value *Sub = Builder.CreateSub(V0, V1); 2366 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add, 2367 {Sub->getType()}, {Sub}); 2368 return replaceInstUsesWith(I, Rdx); 2369 } 2370 2371 if (Constant *C = dyn_cast<Constant>(Op0)) { 2372 Value *X; 2373 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 2374 // C - (zext bool) --> bool ? C - 1 : C 2375 return SelectInst::Create(X, InstCombiner::SubOne(C), C); 2376 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 2377 // C - (sext bool) --> bool ? C + 1 : C 2378 return SelectInst::Create(X, InstCombiner::AddOne(C), C); 2379 2380 // C - ~X == X + (1+C) 2381 if (match(Op1, m_Not(m_Value(X)))) 2382 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C)); 2383 2384 // Try to fold constant sub into select arguments. 2385 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 2386 if (Instruction *R = FoldOpIntoSelect(I, SI)) 2387 return R; 2388 2389 // Try to fold constant sub into PHI values. 2390 if (PHINode *PN = dyn_cast<PHINode>(Op1)) 2391 if (Instruction *R = foldOpIntoPhi(I, PN)) 2392 return R; 2393 2394 Constant *C2; 2395 2396 // C-(C2-X) --> X+(C-C2) 2397 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X)))) 2398 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2)); 2399 } 2400 2401 const APInt *Op0C; 2402 if (match(Op0, m_APInt(Op0C))) { 2403 if (Op0C->isMask()) { 2404 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known 2405 // zero. We don't use information from dominating conditions so this 2406 // transform is easier to reverse if necessary. 2407 KnownBits RHSKnown = llvm::computeKnownBits( 2408 Op1, 0, SQ.getWithInstruction(&I).getWithoutDomCondCache()); 2409 if ((*Op0C | RHSKnown.Zero).isAllOnes()) 2410 return BinaryOperator::CreateXor(Op1, Op0); 2411 } 2412 2413 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when: 2414 // (C3 - ((C2 & C3) - 1)) is pow2 2415 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1) 2416 // C2 is negative pow2 || sub nuw 2417 const APInt *C2, *C3; 2418 BinaryOperator *InnerSub; 2419 if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) && 2420 match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) && 2421 (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) { 2422 APInt C2AndC3 = *C2 & *C3; 2423 APInt C2AndC3Minus1 = C2AndC3 - 1; 2424 APInt C2AddC3 = *C2 + *C3; 2425 if ((*C3 - C2AndC3Minus1).isPowerOf2() && 2426 C2AndC3Minus1.isSubsetOf(C2AddC3)) { 2427 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2)); 2428 return BinaryOperator::CreateAdd( 2429 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3)); 2430 } 2431 } 2432 } 2433 2434 { 2435 Value *Y; 2436 // X-(X+Y) == -Y X-(Y+X) == -Y 2437 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y)))) 2438 return BinaryOperator::CreateNeg(Y); 2439 2440 // (X-Y)-X == -Y 2441 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y)))) 2442 return BinaryOperator::CreateNeg(Y); 2443 } 2444 2445 // (sub (or A, B) (and A, B)) --> (xor A, B) 2446 { 2447 Value *A, *B; 2448 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 2449 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 2450 return BinaryOperator::CreateXor(A, B); 2451 } 2452 2453 // (sub (add A, B) (or A, B)) --> (and A, B) 2454 { 2455 Value *A, *B; 2456 if (match(Op0, m_Add(m_Value(A), m_Value(B))) && 2457 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) 2458 return BinaryOperator::CreateAnd(A, B); 2459 } 2460 2461 // (sub (add A, B) (and A, B)) --> (or A, B) 2462 { 2463 Value *A, *B; 2464 if (match(Op0, m_Add(m_Value(A), m_Value(B))) && 2465 match(Op1, m_c_And(m_Specific(A), m_Specific(B)))) 2466 return BinaryOperator::CreateOr(A, B); 2467 } 2468 2469 // (sub (and A, B) (or A, B)) --> neg (xor A, B) 2470 { 2471 Value *A, *B; 2472 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2473 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) && 2474 (Op0->hasOneUse() || Op1->hasOneUse())) 2475 return BinaryOperator::CreateNeg(Builder.CreateXor(A, B)); 2476 } 2477 2478 // (sub (or A, B), (xor A, B)) --> (and A, B) 2479 { 2480 Value *A, *B; 2481 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2482 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 2483 return BinaryOperator::CreateAnd(A, B); 2484 } 2485 2486 // (sub (xor A, B) (or A, B)) --> neg (and A, B) 2487 { 2488 Value *A, *B; 2489 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2490 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) && 2491 (Op0->hasOneUse() || Op1->hasOneUse())) 2492 return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B)); 2493 } 2494 2495 { 2496 Value *Y; 2497 // ((X | Y) - X) --> (~X & Y) 2498 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1))))) 2499 return BinaryOperator::CreateAnd( 2500 Y, Builder.CreateNot(Op1, Op1->getName() + ".not")); 2501 } 2502 2503 { 2504 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1)) 2505 Value *X; 2506 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1), 2507 m_OneUse(m_Neg(m_Value(X))))))) { 2508 return BinaryOperator::CreateNeg(Builder.CreateAnd( 2509 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType())))); 2510 } 2511 } 2512 2513 { 2514 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C) 2515 Constant *C; 2516 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) { 2517 return BinaryOperator::CreateNeg( 2518 Builder.CreateAnd(Op1, Builder.CreateNot(C))); 2519 } 2520 } 2521 2522 { 2523 // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X) 2524 // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X)) 2525 Value *C, *X; 2526 auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) { 2527 return match(LHS, m_OneUse(m_c_Xor(m_Value(X), m_Specific(RHS)))) && 2528 match(RHS, m_SExt(m_Value(C))) && 2529 (C->getType()->getScalarSizeInBits() == 1); 2530 }; 2531 if (m_SubXorCmp(Op0, Op1)) 2532 return SelectInst::Create(C, Builder.CreateNeg(X), X); 2533 if (m_SubXorCmp(Op1, Op0)) 2534 return SelectInst::Create(C, X, Builder.CreateNeg(X)); 2535 } 2536 2537 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I)) 2538 return R; 2539 2540 if (Instruction *R = foldSubOfMinMax(I, Builder)) 2541 return R; 2542 2543 { 2544 // If we have a subtraction between some value and a select between 2545 // said value and something else, sink subtraction into select hands, i.e.: 2546 // sub (select %Cond, %TrueVal, %FalseVal), %Op1 2547 // -> 2548 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1) 2549 // or 2550 // sub %Op0, (select %Cond, %TrueVal, %FalseVal) 2551 // -> 2552 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal) 2553 // This will result in select between new subtraction and 0. 2554 auto SinkSubIntoSelect = 2555 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub, 2556 auto SubBuilder) -> Instruction * { 2557 Value *Cond, *TrueVal, *FalseVal; 2558 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal), 2559 m_Value(FalseVal))))) 2560 return nullptr; 2561 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal) 2562 return nullptr; 2563 // While it is really tempting to just create two subtractions and let 2564 // InstCombine fold one of those to 0, it isn't possible to do so 2565 // because of worklist visitation order. So ugly it is. 2566 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal; 2567 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal); 2568 Constant *Zero = Constant::getNullValue(Ty); 2569 SelectInst *NewSel = 2570 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub, 2571 OtherHandOfSubIsTrueVal ? NewSub : Zero); 2572 // Preserve prof metadata if any. 2573 NewSel->copyMetadata(cast<Instruction>(*Select)); 2574 return NewSel; 2575 }; 2576 if (Instruction *NewSel = SinkSubIntoSelect( 2577 /*Select=*/Op0, /*OtherHandOfSub=*/Op1, 2578 [Builder = &Builder, Op1](Value *OtherHandOfSelect) { 2579 return Builder->CreateSub(OtherHandOfSelect, 2580 /*OtherHandOfSub=*/Op1); 2581 })) 2582 return NewSel; 2583 if (Instruction *NewSel = SinkSubIntoSelect( 2584 /*Select=*/Op1, /*OtherHandOfSub=*/Op0, 2585 [Builder = &Builder, Op0](Value *OtherHandOfSelect) { 2586 return Builder->CreateSub(/*OtherHandOfSub=*/Op0, 2587 OtherHandOfSelect); 2588 })) 2589 return NewSel; 2590 } 2591 2592 // (X - (X & Y)) --> (X & ~Y) 2593 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) && 2594 (Op1->hasOneUse() || isa<Constant>(Y))) 2595 return BinaryOperator::CreateAnd( 2596 Op0, Builder.CreateNot(Y, Y->getName() + ".not")); 2597 2598 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X 2599 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X 2600 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y) 2601 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y) 2602 // As long as Y is freely invertible, this will be neutral or a win. 2603 // Note: We don't generate the inverse max/min, just create the 'not' of 2604 // it and let other folds do the rest. 2605 if (match(Op0, m_Not(m_Value(X))) && 2606 match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) && 2607 !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) { 2608 Value *Not = Builder.CreateNot(Op1); 2609 return BinaryOperator::CreateSub(Not, X); 2610 } 2611 if (match(Op1, m_Not(m_Value(X))) && 2612 match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) && 2613 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) { 2614 Value *Not = Builder.CreateNot(Op0); 2615 return BinaryOperator::CreateSub(X, Not); 2616 } 2617 2618 // Optimize pointer differences into the same array into a size. Consider: 2619 // &A[10] - &A[0]: we should compile this to "10". 2620 Value *LHSOp, *RHSOp; 2621 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) && 2622 match(Op1, m_PtrToInt(m_Value(RHSOp)))) 2623 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(), 2624 I.hasNoUnsignedWrap())) 2625 return replaceInstUsesWith(I, Res); 2626 2627 // trunc(p)-trunc(q) -> trunc(p-q) 2628 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) && 2629 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp))))) 2630 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(), 2631 /* IsNUW */ false)) 2632 return replaceInstUsesWith(I, Res); 2633 2634 if (match(Op0, m_ZExt(m_PtrToIntSameSize(DL, m_Value(LHSOp)))) && 2635 match(Op1, m_ZExtOrSelf(m_PtrToInt(m_Value(RHSOp))))) { 2636 if (auto *GEP = dyn_cast<GEPOperator>(LHSOp)) { 2637 if (GEP->getPointerOperand() == RHSOp) { 2638 if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) { 2639 Value *Offset = EmitGEPOffset(GEP); 2640 Value *Res = GEP->hasNoUnsignedWrap() 2641 ? Builder.CreateZExt( 2642 Offset, I.getType(), "", 2643 /*IsNonNeg=*/GEP->hasNoUnsignedSignedWrap()) 2644 : Builder.CreateSExt(Offset, I.getType()); 2645 return replaceInstUsesWith(I, Res); 2646 } 2647 } 2648 } 2649 } 2650 2651 // Canonicalize a shifty way to code absolute value to the common pattern. 2652 // There are 2 potential commuted variants. 2653 // We're relying on the fact that we only do this transform when the shift has 2654 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase 2655 // instructions). 2656 Value *A; 2657 const APInt *ShAmt; 2658 Type *Ty = I.getType(); 2659 unsigned BitWidth = Ty->getScalarSizeInBits(); 2660 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 2661 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 && 2662 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) { 2663 // B = ashr i32 A, 31 ; smear the sign bit 2664 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1) 2665 // --> (A < 0) ? -A : A 2666 Value *IsNeg = Builder.CreateIsNeg(A); 2667 // Copy the nsw flags from the sub to the negate. 2668 Value *NegA = I.hasNoUnsignedWrap() 2669 ? Constant::getNullValue(A->getType()) 2670 : Builder.CreateNeg(A, "", I.hasNoSignedWrap()); 2671 return SelectInst::Create(IsNeg, NegA, A); 2672 } 2673 2674 // If we are subtracting a low-bit masked subset of some value from an add 2675 // of that same value with no low bits changed, that is clearing some low bits 2676 // of the sum: 2677 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC 2678 const APInt *AddC, *AndC; 2679 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) && 2680 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) { 2681 unsigned Cttz = AddC->countr_zero(); 2682 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz)); 2683 if ((HighMask & *AndC).isZero()) 2684 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC))); 2685 } 2686 2687 if (Instruction *V = 2688 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) 2689 return V; 2690 2691 // X - usub.sat(X, Y) => umin(X, Y) 2692 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0), 2693 m_Value(Y))))) 2694 return replaceInstUsesWith( 2695 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y})); 2696 2697 // umax(X, Op1) - Op1 --> usub.sat(X, Op1) 2698 // TODO: The one-use restriction is not strictly necessary, but it may 2699 // require improving other pattern matching and/or codegen. 2700 if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1))))) 2701 return replaceInstUsesWith( 2702 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1})); 2703 2704 // Op0 - umin(X, Op0) --> usub.sat(Op0, X) 2705 if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0))))) 2706 return replaceInstUsesWith( 2707 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X})); 2708 2709 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0) 2710 if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) { 2711 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0}); 2712 return BinaryOperator::CreateNeg(USub); 2713 } 2714 2715 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X) 2716 if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) { 2717 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X}); 2718 return BinaryOperator::CreateNeg(USub); 2719 } 2720 2721 // C - ctpop(X) => ctpop(~X) if C is bitwidth 2722 if (match(Op0, m_SpecificInt(BitWidth)) && 2723 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X))))) 2724 return replaceInstUsesWith( 2725 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()}, 2726 {Builder.CreateNot(X)})); 2727 2728 // Reduce multiplies for difference-of-squares by factoring: 2729 // (X * X) - (Y * Y) --> (X + Y) * (X - Y) 2730 if (match(Op0, m_OneUse(m_Mul(m_Value(X), m_Deferred(X)))) && 2731 match(Op1, m_OneUse(m_Mul(m_Value(Y), m_Deferred(Y))))) { 2732 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0); 2733 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2734 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() && 2735 OBO1->hasNoSignedWrap() && BitWidth > 2; 2736 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() && 2737 OBO1->hasNoUnsignedWrap() && BitWidth > 1; 2738 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW); 2739 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW); 2740 Value *Mul = Builder.CreateMul(Add, Sub, "", PropagateNUW, PropagateNSW); 2741 return replaceInstUsesWith(I, Mul); 2742 } 2743 2744 // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y) 2745 if (match(Op0, m_OneUse(m_c_SMax(m_Value(X), m_Value(Y)))) && 2746 match(Op1, m_OneUse(m_c_SMin(m_Specific(X), m_Specific(Y))))) { 2747 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) { 2748 Value *Sub = 2749 Builder.CreateSub(X, Y, "sub", /*HasNUW=*/false, /*HasNSW=*/true); 2750 Value *Call = 2751 Builder.CreateBinaryIntrinsic(Intrinsic::abs, Sub, Builder.getTrue()); 2752 return replaceInstUsesWith(I, Call); 2753 } 2754 } 2755 2756 if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) 2757 return Res; 2758 2759 return TryToNarrowDeduceFlags(); 2760 } 2761 2762 /// This eliminates floating-point negation in either 'fneg(X)' or 2763 /// 'fsub(-0.0, X)' form by combining into a constant operand. 2764 static Instruction *foldFNegIntoConstant(Instruction &I, const DataLayout &DL) { 2765 // This is limited with one-use because fneg is assumed better for 2766 // reassociation and cheaper in codegen than fmul/fdiv. 2767 // TODO: Should the m_OneUse restriction be removed? 2768 Instruction *FNegOp; 2769 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp))))) 2770 return nullptr; 2771 2772 Value *X; 2773 Constant *C; 2774 2775 // Fold negation into constant operand. 2776 // -(X * C) --> X * (-C) 2777 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C)))) 2778 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) 2779 return BinaryOperator::CreateFMulFMF(X, NegC, &I); 2780 // -(X / C) --> X / (-C) 2781 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C)))) 2782 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) 2783 return BinaryOperator::CreateFDivFMF(X, NegC, &I); 2784 // -(C / X) --> (-C) / X 2785 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X)))) 2786 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) { 2787 Instruction *FDiv = BinaryOperator::CreateFDivFMF(NegC, X, &I); 2788 2789 // Intersect 'nsz' and 'ninf' because those special value exceptions may 2790 // not apply to the fdiv. Everything else propagates from the fneg. 2791 // TODO: We could propagate nsz/ninf from fdiv alone? 2792 FastMathFlags FMF = I.getFastMathFlags(); 2793 FastMathFlags OpFMF = FNegOp->getFastMathFlags(); 2794 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros()); 2795 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs()); 2796 return FDiv; 2797 } 2798 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]: 2799 // -(X + C) --> -X + -C --> -C - X 2800 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C)))) 2801 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) 2802 return BinaryOperator::CreateFSubFMF(NegC, X, &I); 2803 2804 return nullptr; 2805 } 2806 2807 Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp, 2808 Instruction &FMFSource) { 2809 Value *X, *Y; 2810 if (match(FNegOp, m_FMul(m_Value(X), m_Value(Y)))) { 2811 // Push into RHS which is more likely to simplify (const or another fneg). 2812 // FIXME: It would be better to invert the transform. 2813 return cast<Instruction>(Builder.CreateFMulFMF( 2814 X, Builder.CreateFNegFMF(Y, &FMFSource), &FMFSource)); 2815 } 2816 2817 if (match(FNegOp, m_FDiv(m_Value(X), m_Value(Y)))) { 2818 return cast<Instruction>(Builder.CreateFDivFMF( 2819 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource)); 2820 } 2821 2822 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(FNegOp)) { 2823 // Make sure to preserve flags and metadata on the call. 2824 if (II->getIntrinsicID() == Intrinsic::ldexp) { 2825 FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags(); 2826 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 2827 Builder.setFastMathFlags(FMF); 2828 2829 CallInst *New = Builder.CreateCall( 2830 II->getCalledFunction(), 2831 {Builder.CreateFNeg(II->getArgOperand(0)), II->getArgOperand(1)}); 2832 New->copyMetadata(*II); 2833 return New; 2834 } 2835 } 2836 2837 return nullptr; 2838 } 2839 2840 Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) { 2841 Value *Op = I.getOperand(0); 2842 2843 if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(), 2844 getSimplifyQuery().getWithInstruction(&I))) 2845 return replaceInstUsesWith(I, V); 2846 2847 if (Instruction *X = foldFNegIntoConstant(I, DL)) 2848 return X; 2849 2850 Value *X, *Y; 2851 2852 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X) 2853 if (I.hasNoSignedZeros() && 2854 match(Op, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) 2855 return BinaryOperator::CreateFSubFMF(Y, X, &I); 2856 2857 Value *OneUse; 2858 if (!match(Op, m_OneUse(m_Value(OneUse)))) 2859 return nullptr; 2860 2861 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I)) 2862 return replaceInstUsesWith(I, R); 2863 2864 // Try to eliminate fneg if at least 1 arm of the select is negated. 2865 Value *Cond; 2866 if (match(OneUse, m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))) { 2867 // Unlike most transforms, this one is not safe to propagate nsz unless 2868 // it is present on the original select. We union the flags from the select 2869 // and fneg and then remove nsz if needed. 2870 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) { 2871 S->copyFastMathFlags(&I); 2872 if (auto *OldSel = dyn_cast<SelectInst>(Op)) { 2873 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags(); 2874 S->setFastMathFlags(FMF); 2875 if (!OldSel->hasNoSignedZeros() && !CommonOperand && 2876 !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition())) 2877 S->setHasNoSignedZeros(false); 2878 } 2879 }; 2880 // -(Cond ? -P : Y) --> Cond ? P : -Y 2881 Value *P; 2882 if (match(X, m_FNeg(m_Value(P)))) { 2883 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg"); 2884 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY); 2885 propagateSelectFMF(NewSel, P == Y); 2886 return NewSel; 2887 } 2888 // -(Cond ? X : -P) --> Cond ? -X : P 2889 if (match(Y, m_FNeg(m_Value(P)))) { 2890 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg"); 2891 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P); 2892 propagateSelectFMF(NewSel, P == X); 2893 return NewSel; 2894 } 2895 2896 // -(Cond ? X : C) --> Cond ? -X : -C 2897 // -(Cond ? C : Y) --> Cond ? -C : -Y 2898 if (match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) { 2899 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg"); 2900 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg"); 2901 SelectInst *NewSel = SelectInst::Create(Cond, NegX, NegY); 2902 propagateSelectFMF(NewSel, /*CommonOperand=*/true); 2903 return NewSel; 2904 } 2905 } 2906 2907 // fneg (copysign x, y) -> copysign x, (fneg y) 2908 if (match(OneUse, m_CopySign(m_Value(X), m_Value(Y)))) { 2909 // The source copysign has an additional value input, so we can't propagate 2910 // flags the copysign doesn't also have. 2911 FastMathFlags FMF = I.getFastMathFlags(); 2912 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags(); 2913 2914 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 2915 Builder.setFastMathFlags(FMF); 2916 2917 Value *NegY = Builder.CreateFNeg(Y); 2918 Value *NewCopySign = Builder.CreateCopySign(X, NegY); 2919 return replaceInstUsesWith(I, NewCopySign); 2920 } 2921 2922 return nullptr; 2923 } 2924 2925 Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) { 2926 if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1), 2927 I.getFastMathFlags(), 2928 getSimplifyQuery().getWithInstruction(&I))) 2929 return replaceInstUsesWith(I, V); 2930 2931 if (Instruction *X = foldVectorBinop(I)) 2932 return X; 2933 2934 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 2935 return Phi; 2936 2937 // Subtraction from -0.0 is the canonical form of fneg. 2938 // fsub -0.0, X ==> fneg X 2939 // fsub nsz 0.0, X ==> fneg nsz X 2940 // 2941 // FIXME This matcher does not respect FTZ or DAZ yet: 2942 // fsub -0.0, Denorm ==> +-0 2943 // fneg Denorm ==> -Denorm 2944 Value *Op; 2945 if (match(&I, m_FNeg(m_Value(Op)))) 2946 return UnaryOperator::CreateFNegFMF(Op, &I); 2947 2948 if (Instruction *X = foldFNegIntoConstant(I, DL)) 2949 return X; 2950 2951 if (Instruction *R = foldFBinOpOfIntCasts(I)) 2952 return R; 2953 2954 Value *X, *Y; 2955 Constant *C; 2956 2957 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2958 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X) 2959 // Canonicalize to fadd to make analysis easier. 2960 // This can also help codegen because fadd is commutative. 2961 // Note that if this fsub was really an fneg, the fadd with -0.0 will get 2962 // killed later. We still limit that particular transform with 'hasOneUse' 2963 // because an fneg is assumed better/cheaper than a generic fsub. 2964 if (I.hasNoSignedZeros() || 2965 cannotBeNegativeZero(Op0, 0, getSimplifyQuery().getWithInstruction(&I))) { 2966 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) { 2967 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I); 2968 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I); 2969 } 2970 } 2971 2972 // (-X) - Op1 --> -(X + Op1) 2973 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) && 2974 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) { 2975 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I); 2976 return UnaryOperator::CreateFNegFMF(FAdd, &I); 2977 } 2978 2979 if (isa<Constant>(Op0)) 2980 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 2981 if (Instruction *NV = FoldOpIntoSelect(I, SI)) 2982 return NV; 2983 2984 // X - C --> X + (-C) 2985 // But don't transform constant expressions because there's an inverse fold 2986 // for X + (-Y) --> X - Y. 2987 if (match(Op1, m_ImmConstant(C))) 2988 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) 2989 return BinaryOperator::CreateFAddFMF(Op0, NegC, &I); 2990 2991 // X - (-Y) --> X + Y 2992 if (match(Op1, m_FNeg(m_Value(Y)))) 2993 return BinaryOperator::CreateFAddFMF(Op0, Y, &I); 2994 2995 // Similar to above, but look through a cast of the negated value: 2996 // X - (fptrunc(-Y)) --> X + fptrunc(Y) 2997 Type *Ty = I.getType(); 2998 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y)))))) 2999 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I); 3000 3001 // X - (fpext(-Y)) --> X + fpext(Y) 3002 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y)))))) 3003 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I); 3004 3005 // Similar to above, but look through fmul/fdiv of the negated value: 3006 // Op0 - (-X * Y) --> Op0 + (X * Y) 3007 // Op0 - (Y * -X) --> Op0 + (X * Y) 3008 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) { 3009 Value *FMul = Builder.CreateFMulFMF(X, Y, &I); 3010 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I); 3011 } 3012 // Op0 - (-X / Y) --> Op0 + (X / Y) 3013 // Op0 - (X / -Y) --> Op0 + (X / Y) 3014 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) || 3015 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) { 3016 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I); 3017 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I); 3018 } 3019 3020 // Handle special cases for FSub with selects feeding the operation 3021 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 3022 return replaceInstUsesWith(I, V); 3023 3024 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { 3025 // (Y - X) - Y --> -X 3026 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X)))) 3027 return UnaryOperator::CreateFNegFMF(X, &I); 3028 3029 // Y - (X + Y) --> -X 3030 // Y - (Y + X) --> -X 3031 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X)))) 3032 return UnaryOperator::CreateFNegFMF(X, &I); 3033 3034 // (X * C) - X --> X * (C - 1.0) 3035 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) { 3036 if (Constant *CSubOne = ConstantFoldBinaryOpOperands( 3037 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL)) 3038 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I); 3039 } 3040 // X - (X * C) --> X * (1.0 - C) 3041 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) { 3042 if (Constant *OneSubC = ConstantFoldBinaryOpOperands( 3043 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL)) 3044 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I); 3045 } 3046 3047 // Reassociate fsub/fadd sequences to create more fadd instructions and 3048 // reduce dependency chains: 3049 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1) 3050 Value *Z; 3051 if (match(Op0, m_OneUse(m_c_FAdd(m_OneUse(m_FSub(m_Value(X), m_Value(Y))), 3052 m_Value(Z))))) { 3053 Value *XZ = Builder.CreateFAddFMF(X, Z, &I); 3054 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I); 3055 return BinaryOperator::CreateFSubFMF(XZ, YW, &I); 3056 } 3057 3058 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) { 3059 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum), 3060 m_Value(Vec))); 3061 }; 3062 Value *A0, *A1, *V0, *V1; 3063 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) && 3064 V0->getType() == V1->getType()) { 3065 // Difference of sums is sum of differences: 3066 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1 3067 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I); 3068 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd, 3069 {Sub->getType()}, {A0, Sub}, &I); 3070 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I); 3071 } 3072 3073 if (Instruction *F = factorizeFAddFSub(I, Builder)) 3074 return F; 3075 3076 // TODO: This performs reassociative folds for FP ops. Some fraction of the 3077 // functionality has been subsumed by simple pattern matching here and in 3078 // InstSimplify. We should let a dedicated reassociation pass handle more 3079 // complex pattern matching and remove this from InstCombine. 3080 if (Value *V = FAddCombine(Builder).simplify(&I)) 3081 return replaceInstUsesWith(I, V); 3082 3083 // (X - Y) - Op1 --> X - (Y + Op1) 3084 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) { 3085 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I); 3086 return BinaryOperator::CreateFSubFMF(X, FAdd, &I); 3087 } 3088 } 3089 3090 return nullptr; 3091 } 3092