1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for add, fadd, sub, and fsub.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/AlignOf.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Transforms/InstCombine/InstCombiner.h"
33 #include <cassert>
34 #include <utility>
35
36 using namespace llvm;
37 using namespace PatternMatch;
38
39 #define DEBUG_TYPE "instcombine"
40
41 namespace {
42
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
48 ///
49 class FAddendCoef {
50 public:
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
57 ~FAddendCoef();
58
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
64
set(short C)65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
68 }
69
70 void set(const APFloat& C);
71
72 void negate();
73
isZero() const74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
76
isOne() const77 bool isOne() const { return isInt() && IntVal == 1; }
isTwo() const78 bool isTwo() const { return isInt() && IntVal == 2; }
isMinusOne() const79 bool isMinusOne() const { return isInt() && IntVal == -1; }
isMinusTwo() const80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
81
82 private:
insaneIntVal(int V)83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
84
getFpValPtr()85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86
getFpValPtr() const87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
89 }
90
getFpVal() const91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
94 }
95
getFpVal()96 APFloat &getFpVal() {
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
99 }
100
isInt() const101 bool isInt() const { return !IsFp; }
102
103 // If the coefficient is represented by an integer, promote it to a
104 // floating point.
105 void convertToFpType(const fltSemantics &Sem);
106
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
111
112 bool IsFp = false;
113
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
116
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
121 short IntVal = 0;
122
123 AlignedCharArrayUnion<APFloat> FpValBuf;
124 };
125
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
129 class FAddend {
130 public:
131 FAddend() = default;
132
operator +=(const FAddend & T)133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
136 }
137
getSymVal() const138 Value *getSymVal() const { return Val; }
getCoef() const139 const FAddendCoef &getCoef() const { return Coeff; }
140
isConstant() const141 bool isConstant() const { return Val == nullptr; }
isZero() const142 bool isZero() const { return Coeff.isZero(); }
143
set(short Coefficient,Value * V)144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
147 }
set(const APFloat & Coefficient,Value * V)148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
151 }
set(const ConstantFP * Coefficient,Value * V)152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
155 }
156
negate()157 void negate() { Coeff.negate(); }
158
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166
167 private:
Scale(const FAddendCoef & ScaleAmt)168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
173 };
174
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
177 ///
178 class FAddCombine {
179 public:
FAddCombine(InstCombiner::BuilderTy & B)180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
181
182 Value *simplify(Instruction *FAdd);
183
184 private:
185 using AddendVect = SmallVector<const FAddend *, 4>;
186
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
194
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201
202 // Debugging stuff are clustered here.
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
initCreateInstNum()205 void initCreateInstNum() { CreateInstrNum = 0; }
incCreateInstNum()206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
initCreateInstNum()208 void initCreateInstNum() {}
incCreateInstNum()209 void incCreateInstNum() {}
210 #endif
211
212 InstCombiner::BuilderTy &Builder;
213 Instruction *Instr = nullptr;
214 };
215
216 } // end anonymous namespace
217
218 //===----------------------------------------------------------------------===//
219 //
220 // Implementation of
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
222 //
223 //===----------------------------------------------------------------------===//
~FAddendCoef()224 FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
227 }
228
set(const APFloat & C)229 void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
231
232 if (isInt()) {
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
235 new(P) APFloat(C);
236 } else
237 *P = C;
238
239 IsFp = BufHasFpVal = true;
240 }
241
convertToFpType(const fltSemantics & Sem)242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243 if (!isInt())
244 return;
245
246 APFloat *P = getFpValPtr();
247 if (IntVal > 0)
248 new(P) APFloat(Sem, IntVal);
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
252 }
253 IsFp = BufHasFpVal = true;
254 }
255
createAPFloatFromInt(const fltSemantics & Sem,int Val)256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
259
260 APFloat T(Sem, 0 - Val);
261 T.changeSign();
262
263 return T;
264 }
265
operator =(const FAddendCoef & That)266 void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
271 }
272
operator +=(const FAddendCoef & That)273 void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
276 if (isInt())
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
281 }
282
283 if (isInt()) {
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
288 }
289
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292 }
293
operator *=(const FAddendCoef & That)294 void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
297
298 if (That.isMinusOne()) {
299 negate();
300 return;
301 }
302
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
306 IntVal = Res;
307 return;
308 }
309
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312
313 if (isInt())
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
316
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322 }
323
negate()324 void FAddendCoef::negate() {
325 if (isInt())
326 IntVal = 0 - IntVal;
327 else
328 getFpVal().changeSign();
329 }
330
getValue(Type * Ty) const331 Value *FAddendCoef::getValue(Type *Ty) const {
332 return isInt() ?
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
335 }
336
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
341 // 0 - B <-1, B>
342 // C * A, <C, A>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
345 //
346 // Legend: A and B are not constant, C is constant
drillValueDownOneStep(Value * Val,FAddend & Addend0,FAddend & Addend1)347 unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
351 return 0;
352
353 unsigned Opcode = I->getOpcode();
354
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356 ConstantFP *C0, *C1;
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
361
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
364
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
370 }
371
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
380 }
381
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
384
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387 return 1;
388 }
389
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394 Addend0.set(C, V1);
395 return 1;
396 }
397
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399 Addend0.set(C, V0);
400 return 1;
401 }
402 }
403
404 return 0;
405 }
406
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
drillAddendDownOneStep(FAddend & Addend0,FAddend & Addend1) const410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
412 if (isConstant())
413 return 0;
414
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
418
419 Addend0.Scale(Coeff);
420
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
423
424 return BreakNum;
425 }
426
simplify(Instruction * I)427 Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
430
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
433 return nullptr;
434
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437
438 // Save the instruction before calling other member-functions.
439 Instr = I;
440
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
448
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
465
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
468
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
473
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
476 }
477
478 if (OpndNum != 2) {
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
482 //
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485 }
486
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
494
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
497 }
498
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
506
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
509 }
510
511 return nullptr;
512 }
513
simplifyFAdd(AddendVect & Addends,unsigned InstrQuota)514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
517
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
521
522 // Simplified addends are placed <SimpVect>.
523 AddendVect SimpVect;
524
525 // The outer loop works on one symbolic-value at a time. Suppose the input
526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527 // The symbolic-values will be processed in this order: x, y, z.
528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529
530 const FAddend *ThisAddend = Addends[SymIdx];
531 if (!ThisAddend) {
532 // This addend was processed before.
533 continue;
534 }
535
536 Value *Val = ThisAddend->getSymVal();
537
538 // If the resulting expr has constant-addend, this constant-addend is
539 // desirable to reside at the top of the resulting expression tree. Placing
540 // constant close to super-expr(s) will potentially reveal some
541 // optimization opportunities in super-expr(s). Here we do not implement
542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative
543 // call later.
544
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
547
548 // The inner loop collects addends sharing same symbolic-value, and these
549 // addends will be later on folded into a single addend. Following above
550 // example, if the symbolic value "y" is being processed, the inner loop
551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552 // be later on folded into "<b1+b2, y>".
553 for (unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *T = Addends[SameSymIdx];
556 if (T && T->getSymVal() == Val) {
557 // Set null such that next iteration of the outer loop will not process
558 // this addend again.
559 Addends[SameSymIdx] = nullptr;
560 SimpVect.push_back(T);
561 }
562 }
563
564 // If multiple addends share same symbolic value, fold them together.
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569 R += *SimpVect[Idx];
570
571 // Pop all addends being folded and push the resulting folded addend.
572 SimpVect.resize(StartIdx);
573 if (!R.isZero()) {
574 SimpVect.push_back(&R);
575 }
576 }
577 }
578
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
580
581 Value *Result;
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
584 else {
585 // The addition is folded to 0.0.
586 Result = ConstantFP::get(Instr->getType(), 0.0);
587 }
588
589 return Result;
590 }
591
createNaryFAdd(const AddendVect & Opnds,unsigned InstrQuota)592 Value *FAddCombine::createNaryFAdd
593 (const AddendVect &Opnds, unsigned InstrQuota) {
594 assert(!Opnds.empty() && "Expect at least one addend");
595
596 // Step 1: Check if the # of instructions needed exceeds the quota.
597
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
600 return nullptr;
601
602 initCreateInstNum();
603
604 // step 2: Emit the N-ary addition.
605 // Note that at most three instructions are involved in Fadd-InstCombine: the
606 // addition in question, and at most two neighboring instructions.
607 // The resulting optimized addition should have at least one less instruction
608 // than the original addition expression tree. This implies that the resulting
609 // N-ary addition has at most two instructions, and we don't need to worry
610 // about tree-height when constructing the N-ary addition.
611
612 Value *LastVal = nullptr;
613 bool LastValNeedNeg = false;
614
615 // Iterate the addends, creating fadd/fsub using adjacent two addends.
616 for (const FAddend *Opnd : Opnds) {
617 bool NeedNeg;
618 Value *V = createAddendVal(*Opnd, NeedNeg);
619 if (!LastVal) {
620 LastVal = V;
621 LastValNeedNeg = NeedNeg;
622 continue;
623 }
624
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
627 continue;
628 }
629
630 if (LastValNeedNeg)
631 LastVal = createFSub(V, LastVal);
632 else
633 LastVal = createFSub(LastVal, V);
634
635 LastValNeedNeg = false;
636 }
637
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
640 }
641
642 #ifndef NDEBUG
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
645 #endif
646
647 return LastVal;
648 }
649
createFSub(Value * Opnd0,Value * Opnd1)650 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
652 if (Instruction *I = dyn_cast<Instruction>(V))
653 createInstPostProc(I);
654 return V;
655 }
656
createFNeg(Value * V)657 Value *FAddCombine::createFNeg(Value *V) {
658 Value *NewV = Builder.CreateFNeg(V);
659 if (Instruction *I = dyn_cast<Instruction>(NewV))
660 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
661 return NewV;
662 }
663
createFAdd(Value * Opnd0,Value * Opnd1)664 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
666 if (Instruction *I = dyn_cast<Instruction>(V))
667 createInstPostProc(I);
668 return V;
669 }
670
createFMul(Value * Opnd0,Value * Opnd1)671 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
673 if (Instruction *I = dyn_cast<Instruction>(V))
674 createInstPostProc(I);
675 return V;
676 }
677
createInstPostProc(Instruction * NewInstr,bool NoNumber)678 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
679 NewInstr->setDebugLoc(Instr->getDebugLoc());
680
681 // Keep track of the number of instruction created.
682 if (!NoNumber)
683 incCreateInstNum();
684
685 // Propagate fast-math flags
686 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
687 }
688
689 // Return the number of instruction needed to emit the N-ary addition.
690 // NOTE: Keep this function in sync with createAddendVal().
calcInstrNumber(const AddendVect & Opnds)691 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
694
695 // Adjust the number of instructions needed to emit the N-ary add.
696 for (const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
698 continue;
699
700 // The constant check above is really for a few special constant
701 // coefficients.
702 if (isa<UndefValue>(Opnd->getSymVal()))
703 continue;
704
705 const FAddendCoef &CE = Opnd->getCoef();
706 // Let the addend be "c * x". If "c == +/-1", the value of the addend
707 // is immediately available; otherwise, it needs exactly one instruction
708 // to evaluate the value.
709 if (!CE.isMinusOne() && !CE.isOne())
710 InstrNeeded++;
711 }
712 return InstrNeeded;
713 }
714
715 // Input Addend Value NeedNeg(output)
716 // ================================================================
717 // Constant C C false
718 // <+/-1, V> V coefficient is -1
719 // <2/-2, V> "fadd V, V" coefficient is -2
720 // <C, V> "fmul V, C" false
721 //
722 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
createAddendVal(const FAddend & Opnd,bool & NeedNeg)723 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
725
726 if (Opnd.isConstant()) {
727 NeedNeg = false;
728 return Coeff.getValue(Instr->getType());
729 }
730
731 Value *OpndVal = Opnd.getSymVal();
732
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
735 return OpndVal;
736 }
737
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
741 }
742
743 NeedNeg = false;
744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
745 }
746
747 // Checks if any operand is negative and we can convert add to sub.
748 // This function checks for following negative patterns
749 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
750 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
751 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
checkForNegativeOperand(BinaryOperator & I,InstCombiner::BuilderTy & Builder)752 static Value *checkForNegativeOperand(BinaryOperator &I,
753 InstCombiner::BuilderTy &Builder) {
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755
756 // This function creates 2 instructions to replace ADD, we need at least one
757 // of LHS or RHS to have one use to ensure benefit in transform.
758 if (!LHS->hasOneUse() && !RHS->hasOneUse())
759 return nullptr;
760
761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
762 const APInt *C1 = nullptr, *C2 = nullptr;
763
764 // if ONE is on other side, swap
765 if (match(RHS, m_Add(m_Value(X), m_One())))
766 std::swap(LHS, RHS);
767
768 if (match(LHS, m_Add(m_Value(X), m_One()))) {
769 // if XOR on other side, swap
770 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
771 std::swap(X, RHS);
772
773 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
776 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
777 Value *NewAnd = Builder.CreateAnd(Z, *C1);
778 return Builder.CreateSub(RHS, NewAnd, "sub");
779 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
782 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
783 return Builder.CreateSub(RHS, NewOr, "sub");
784 }
785 }
786 }
787
788 // Restore LHS and RHS
789 LHS = I.getOperand(0);
790 RHS = I.getOperand(1);
791
792 // if XOR is on other side, swap
793 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
794 std::swap(LHS, RHS);
795
796 // C2 is ODD
797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
799 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
800 if (C1->countTrailingZeros() == 0)
801 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
802 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
803 return Builder.CreateSub(RHS, NewOr, "sub");
804 }
805 return nullptr;
806 }
807
808 /// Wrapping flags may allow combining constants separated by an extend.
foldNoWrapAdd(BinaryOperator & Add,InstCombiner::BuilderTy & Builder)809 static Instruction *foldNoWrapAdd(BinaryOperator &Add,
810 InstCombiner::BuilderTy &Builder) {
811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
812 Type *Ty = Add.getType();
813 Constant *Op1C;
814 if (!match(Op1, m_Constant(Op1C)))
815 return nullptr;
816
817 // Try this match first because it results in an add in the narrow type.
818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
819 Value *X;
820 const APInt *C1, *C2;
821 if (match(Op1, m_APInt(C1)) &&
822 match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
823 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
824 Constant *NewC =
825 ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
826 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
827 }
828
829 // More general combining of constants in the wide type.
830 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
831 Constant *NarrowC;
832 if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) {
833 Constant *WideC = ConstantExpr::getSExt(NarrowC, Ty);
834 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
835 Value *WideX = Builder.CreateSExt(X, Ty);
836 return BinaryOperator::CreateAdd(WideX, NewC);
837 }
838 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
839 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) {
840 Constant *WideC = ConstantExpr::getZExt(NarrowC, Ty);
841 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
842 Value *WideX = Builder.CreateZExt(X, Ty);
843 return BinaryOperator::CreateAdd(WideX, NewC);
844 }
845
846 return nullptr;
847 }
848
foldAddWithConstant(BinaryOperator & Add)849 Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
850 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
851 Type *Ty = Add.getType();
852 Constant *Op1C;
853 if (!match(Op1, m_ImmConstant(Op1C)))
854 return nullptr;
855
856 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add))
857 return NV;
858
859 Value *X;
860 Constant *Op00C;
861
862 // add (sub C1, X), C2 --> sub (add C1, C2), X
863 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
864 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
865
866 Value *Y;
867
868 // add (sub X, Y), -1 --> add (not Y), X
869 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
870 match(Op1, m_AllOnes()))
871 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
872
873 // zext(bool) + C -> bool ? C + 1 : C
874 if (match(Op0, m_ZExt(m_Value(X))) &&
875 X->getType()->getScalarSizeInBits() == 1)
876 return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1);
877 // sext(bool) + C -> bool ? C - 1 : C
878 if (match(Op0, m_SExt(m_Value(X))) &&
879 X->getType()->getScalarSizeInBits() == 1)
880 return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1);
881
882 // ~X + C --> (C-1) - X
883 if (match(Op0, m_Not(m_Value(X))))
884 return BinaryOperator::CreateSub(InstCombiner::SubOne(Op1C), X);
885
886 // (iN X s>> (N - 1)) + 1 --> zext (X > -1)
887 const APInt *C;
888 unsigned BitWidth = Ty->getScalarSizeInBits();
889 if (match(Op0, m_OneUse(m_AShr(m_Value(X),
890 m_SpecificIntAllowUndef(BitWidth - 1)))) &&
891 match(Op1, m_One()))
892 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
893
894 if (!match(Op1, m_APInt(C)))
895 return nullptr;
896
897 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
898 Constant *Op01C;
899 if (match(Op0, m_Or(m_Value(X), m_ImmConstant(Op01C))) &&
900 haveNoCommonBitsSet(X, Op01C, DL, &AC, &Add, &DT))
901 return BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
902
903 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
904 const APInt *C2;
905 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
906 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
907
908 if (C->isSignMask()) {
909 // If wrapping is not allowed, then the addition must set the sign bit:
910 // X + (signmask) --> X | signmask
911 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
912 return BinaryOperator::CreateOr(Op0, Op1);
913
914 // If wrapping is allowed, then the addition flips the sign bit of LHS:
915 // X + (signmask) --> X ^ signmask
916 return BinaryOperator::CreateXor(Op0, Op1);
917 }
918
919 // Is this add the last step in a convoluted sext?
920 // add(zext(xor i16 X, -32768), -32768) --> sext X
921 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
922 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
923 return CastInst::Create(Instruction::SExt, X, Ty);
924
925 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
926 // (X ^ signmask) + C --> (X + (signmask ^ C))
927 if (C2->isSignMask())
928 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
929
930 // If X has no high-bits set above an xor mask:
931 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
932 if (C2->isMask()) {
933 KnownBits LHSKnown = computeKnownBits(X, 0, &Add);
934 if ((*C2 | LHSKnown.Zero).isAllOnes())
935 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
936 }
937
938 // Look for a math+logic pattern that corresponds to sext-in-register of a
939 // value with cleared high bits. Convert that into a pair of shifts:
940 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
941 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
942 if (Op0->hasOneUse() && *C2 == -(*C)) {
943 unsigned BitWidth = Ty->getScalarSizeInBits();
944 unsigned ShAmt = 0;
945 if (C->isPowerOf2())
946 ShAmt = BitWidth - C->logBase2() - 1;
947 else if (C2->isPowerOf2())
948 ShAmt = BitWidth - C2->logBase2() - 1;
949 if (ShAmt && MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt),
950 0, &Add)) {
951 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
952 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
953 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
954 }
955 }
956 }
957
958 if (C->isOne() && Op0->hasOneUse()) {
959 // add (sext i1 X), 1 --> zext (not X)
960 // TODO: The smallest IR representation is (select X, 0, 1), and that would
961 // not require the one-use check. But we need to remove a transform in
962 // visitSelect and make sure that IR value tracking for select is equal or
963 // better than for these ops.
964 if (match(Op0, m_SExt(m_Value(X))) &&
965 X->getType()->getScalarSizeInBits() == 1)
966 return new ZExtInst(Builder.CreateNot(X), Ty);
967
968 // Shifts and add used to flip and mask off the low bit:
969 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
970 const APInt *C3;
971 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
972 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
973 Value *NotX = Builder.CreateNot(X);
974 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
975 }
976 }
977
978 return nullptr;
979 }
980
981 // Matches multiplication expression Op * C where C is a constant. Returns the
982 // constant value in C and the other operand in Op. Returns true if such a
983 // match is found.
MatchMul(Value * E,Value * & Op,APInt & C)984 static bool MatchMul(Value *E, Value *&Op, APInt &C) {
985 const APInt *AI;
986 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
987 C = *AI;
988 return true;
989 }
990 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
991 C = APInt(AI->getBitWidth(), 1);
992 C <<= *AI;
993 return true;
994 }
995 return false;
996 }
997
998 // Matches remainder expression Op % C where C is a constant. Returns the
999 // constant value in C and the other operand in Op. Returns the signedness of
1000 // the remainder operation in IsSigned. Returns true if such a match is
1001 // found.
MatchRem(Value * E,Value * & Op,APInt & C,bool & IsSigned)1002 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1003 const APInt *AI;
1004 IsSigned = false;
1005 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1006 IsSigned = true;
1007 C = *AI;
1008 return true;
1009 }
1010 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1011 C = *AI;
1012 return true;
1013 }
1014 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1015 C = *AI + 1;
1016 return true;
1017 }
1018 return false;
1019 }
1020
1021 // Matches division expression Op / C with the given signedness as indicated
1022 // by IsSigned, where C is a constant. Returns the constant value in C and the
1023 // other operand in Op. Returns true if such a match is found.
MatchDiv(Value * E,Value * & Op,APInt & C,bool IsSigned)1024 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1025 const APInt *AI;
1026 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1027 C = *AI;
1028 return true;
1029 }
1030 if (!IsSigned) {
1031 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1032 C = *AI;
1033 return true;
1034 }
1035 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1036 C = APInt(AI->getBitWidth(), 1);
1037 C <<= *AI;
1038 return true;
1039 }
1040 }
1041 return false;
1042 }
1043
1044 // Returns whether C0 * C1 with the given signedness overflows.
MulWillOverflow(APInt & C0,APInt & C1,bool IsSigned)1045 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1046 bool overflow;
1047 if (IsSigned)
1048 (void)C0.smul_ov(C1, overflow);
1049 else
1050 (void)C0.umul_ov(C1, overflow);
1051 return overflow;
1052 }
1053
1054 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1055 // does not overflow.
SimplifyAddWithRemainder(BinaryOperator & I)1056 Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) {
1057 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1058 Value *X, *MulOpV;
1059 APInt C0, MulOpC;
1060 bool IsSigned;
1061 // Match I = X % C0 + MulOpV * C0
1062 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1063 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1064 C0 == MulOpC) {
1065 Value *RemOpV;
1066 APInt C1;
1067 bool Rem2IsSigned;
1068 // Match MulOpC = RemOpV % C1
1069 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1070 IsSigned == Rem2IsSigned) {
1071 Value *DivOpV;
1072 APInt DivOpC;
1073 // Match RemOpV = X / C0
1074 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1075 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1076 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1077 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1078 : Builder.CreateURem(X, NewDivisor, "urem");
1079 }
1080 }
1081 }
1082
1083 return nullptr;
1084 }
1085
1086 /// Fold
1087 /// (1 << NBits) - 1
1088 /// Into:
1089 /// ~(-(1 << NBits))
1090 /// Because a 'not' is better for bit-tracking analysis and other transforms
1091 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
canonicalizeLowbitMask(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1092 static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
1093 InstCombiner::BuilderTy &Builder) {
1094 Value *NBits;
1095 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1096 return nullptr;
1097
1098 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1099 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1100 // Be wary of constant folding.
1101 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1102 // Always NSW. But NUW propagates from `add`.
1103 BOp->setHasNoSignedWrap();
1104 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1105 }
1106
1107 return BinaryOperator::CreateNot(NotMask, I.getName());
1108 }
1109
foldToUnsignedSaturatedAdd(BinaryOperator & I)1110 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) {
1111 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1112 Type *Ty = I.getType();
1113 auto getUAddSat = [&]() {
1114 return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty);
1115 };
1116
1117 // add (umin X, ~Y), Y --> uaddsat X, Y
1118 Value *X, *Y;
1119 if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))),
1120 m_Deferred(Y))))
1121 return CallInst::Create(getUAddSat(), { X, Y });
1122
1123 // add (umin X, ~C), C --> uaddsat X, C
1124 const APInt *C, *NotC;
1125 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1126 *C == ~*NotC)
1127 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1128
1129 return nullptr;
1130 }
1131
1132 /// Try to reduce signed division by power-of-2 to an arithmetic shift right.
foldAddToAshr(BinaryOperator & Add)1133 static Instruction *foldAddToAshr(BinaryOperator &Add) {
1134 // Division must be by power-of-2, but not the minimum signed value.
1135 Value *X;
1136 const APInt *DivC;
1137 if (!match(Add.getOperand(0), m_SDiv(m_Value(X), m_Power2(DivC))) ||
1138 DivC->isNegative())
1139 return nullptr;
1140
1141 // Rounding is done by adding -1 if the dividend (X) is negative and has any
1142 // low bits set. The canonical pattern for that is an "ugt" compare with SMIN:
1143 // sext (icmp ugt (X & (DivC - 1)), SMIN)
1144 const APInt *MaskC;
1145 ICmpInst::Predicate Pred;
1146 if (!match(Add.getOperand(1),
1147 m_SExt(m_ICmp(Pred, m_And(m_Specific(X), m_APInt(MaskC)),
1148 m_SignMask()))) ||
1149 Pred != ICmpInst::ICMP_UGT)
1150 return nullptr;
1151
1152 APInt SMin = APInt::getSignedMinValue(Add.getType()->getScalarSizeInBits());
1153 if (*MaskC != (SMin | (*DivC - 1)))
1154 return nullptr;
1155
1156 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC)
1157 return BinaryOperator::CreateAShr(
1158 X, ConstantInt::get(Add.getType(), DivC->exactLogBase2()));
1159 }
1160
1161 Instruction *InstCombinerImpl::
canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator & I)1162 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
1163 BinaryOperator &I) {
1164 assert((I.getOpcode() == Instruction::Add ||
1165 I.getOpcode() == Instruction::Or ||
1166 I.getOpcode() == Instruction::Sub) &&
1167 "Expecting add/or/sub instruction");
1168
1169 // We have a subtraction/addition between a (potentially truncated) *logical*
1170 // right-shift of X and a "select".
1171 Value *X, *Select;
1172 Instruction *LowBitsToSkip, *Extract;
1173 if (!match(&I, m_c_BinOp(m_TruncOrSelf(m_CombineAnd(
1174 m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)),
1175 m_Instruction(Extract))),
1176 m_Value(Select))))
1177 return nullptr;
1178
1179 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1180 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1181 return nullptr;
1182
1183 Type *XTy = X->getType();
1184 bool HadTrunc = I.getType() != XTy;
1185
1186 // If there was a truncation of extracted value, then we'll need to produce
1187 // one extra instruction, so we need to ensure one instruction will go away.
1188 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1189 return nullptr;
1190
1191 // Extraction should extract high NBits bits, with shift amount calculated as:
1192 // low bits to skip = shift bitwidth - high bits to extract
1193 // The shift amount itself may be extended, and we need to look past zero-ext
1194 // when matching NBits, that will matter for matching later.
1195 Constant *C;
1196 Value *NBits;
1197 if (!match(
1198 LowBitsToSkip,
1199 m_ZExtOrSelf(m_Sub(m_Constant(C), m_ZExtOrSelf(m_Value(NBits))))) ||
1200 !match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1201 APInt(C->getType()->getScalarSizeInBits(),
1202 X->getType()->getScalarSizeInBits()))))
1203 return nullptr;
1204
1205 // Sign-extending value can be zero-extended if we `sub`tract it,
1206 // or sign-extended otherwise.
1207 auto SkipExtInMagic = [&I](Value *&V) {
1208 if (I.getOpcode() == Instruction::Sub)
1209 match(V, m_ZExtOrSelf(m_Value(V)));
1210 else
1211 match(V, m_SExtOrSelf(m_Value(V)));
1212 };
1213
1214 // Now, finally validate the sign-extending magic.
1215 // `select` itself may be appropriately extended, look past that.
1216 SkipExtInMagic(Select);
1217
1218 ICmpInst::Predicate Pred;
1219 const APInt *Thr;
1220 Value *SignExtendingValue, *Zero;
1221 bool ShouldSignext;
1222 // It must be a select between two values we will later establish to be a
1223 // sign-extending value and a zero constant. The condition guarding the
1224 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1225 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1226 m_Value(SignExtendingValue), m_Value(Zero))) ||
1227 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1228 return nullptr;
1229
1230 // icmp-select pair is commutative.
1231 if (!ShouldSignext)
1232 std::swap(SignExtendingValue, Zero);
1233
1234 // If we should not perform sign-extension then we must add/or/subtract zero.
1235 if (!match(Zero, m_Zero()))
1236 return nullptr;
1237 // Otherwise, it should be some constant, left-shifted by the same NBits we
1238 // had in `lshr`. Said left-shift can also be appropriately extended.
1239 // Again, we must look past zero-ext when looking for NBits.
1240 SkipExtInMagic(SignExtendingValue);
1241 Constant *SignExtendingValueBaseConstant;
1242 if (!match(SignExtendingValue,
1243 m_Shl(m_Constant(SignExtendingValueBaseConstant),
1244 m_ZExtOrSelf(m_Specific(NBits)))))
1245 return nullptr;
1246 // If we `sub`, then the constant should be one, else it should be all-ones.
1247 if (I.getOpcode() == Instruction::Sub
1248 ? !match(SignExtendingValueBaseConstant, m_One())
1249 : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1250 return nullptr;
1251
1252 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1253 Extract->getName() + ".sext");
1254 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1255 if (!HadTrunc)
1256 return NewAShr;
1257
1258 Builder.Insert(NewAShr);
1259 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1260 }
1261
1262 /// This is a specialization of a more general transform from
1263 /// foldUsingDistributiveLaws. If that code can be made to work optimally
1264 /// for multi-use cases or propagating nsw/nuw, then we would not need this.
factorizeMathWithShlOps(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1265 static Instruction *factorizeMathWithShlOps(BinaryOperator &I,
1266 InstCombiner::BuilderTy &Builder) {
1267 // TODO: Also handle mul by doubling the shift amount?
1268 assert((I.getOpcode() == Instruction::Add ||
1269 I.getOpcode() == Instruction::Sub) &&
1270 "Expected add/sub");
1271 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1272 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1273 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1274 return nullptr;
1275
1276 Value *X, *Y, *ShAmt;
1277 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1278 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1279 return nullptr;
1280
1281 // No-wrap propagates only when all ops have no-wrap.
1282 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1283 Op1->hasNoSignedWrap();
1284 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1285 Op1->hasNoUnsignedWrap();
1286
1287 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1288 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1289 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1290 NewI->setHasNoSignedWrap(HasNSW);
1291 NewI->setHasNoUnsignedWrap(HasNUW);
1292 }
1293 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1294 NewShl->setHasNoSignedWrap(HasNSW);
1295 NewShl->setHasNoUnsignedWrap(HasNUW);
1296 return NewShl;
1297 }
1298
1299 /// Reduce a sequence of masked half-width multiplies to a single multiply.
1300 /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
foldBoxMultiply(BinaryOperator & I)1301 static Instruction *foldBoxMultiply(BinaryOperator &I) {
1302 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1303 // Skip the odd bitwidth types.
1304 if ((BitWidth & 0x1))
1305 return nullptr;
1306
1307 unsigned HalfBits = BitWidth >> 1;
1308 APInt HalfMask = APInt::getMaxValue(HalfBits);
1309
1310 // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
1311 Value *XLo, *YLo;
1312 Value *CrossSum;
1313 if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)),
1314 m_Mul(m_Value(YLo), m_Value(XLo)))))
1315 return nullptr;
1316
1317 // XLo = X & HalfMask
1318 // YLo = Y & HalfMask
1319 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
1320 // to enhance robustness
1321 Value *X, *Y;
1322 if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) ||
1323 !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask))))
1324 return nullptr;
1325
1326 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
1327 // X' can be either X or XLo in the pattern (and the same for Y')
1328 if (match(CrossSum,
1329 m_c_Add(m_c_Mul(m_LShr(m_Specific(Y), m_SpecificInt(HalfBits)),
1330 m_CombineOr(m_Specific(X), m_Specific(XLo))),
1331 m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(HalfBits)),
1332 m_CombineOr(m_Specific(Y), m_Specific(YLo))))))
1333 return BinaryOperator::CreateMul(X, Y);
1334
1335 return nullptr;
1336 }
1337
visitAdd(BinaryOperator & I)1338 Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
1339 if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
1340 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1341 SQ.getWithInstruction(&I)))
1342 return replaceInstUsesWith(I, V);
1343
1344 if (SimplifyAssociativeOrCommutative(I))
1345 return &I;
1346
1347 if (Instruction *X = foldVectorBinop(I))
1348 return X;
1349
1350 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1351 return Phi;
1352
1353 // (A*B)+(A*C) -> A*(B+C) etc
1354 if (Value *V = foldUsingDistributiveLaws(I))
1355 return replaceInstUsesWith(I, V);
1356
1357 if (Instruction *R = foldBoxMultiply(I))
1358 return R;
1359
1360 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
1361 return R;
1362
1363 if (Instruction *X = foldAddWithConstant(I))
1364 return X;
1365
1366 if (Instruction *X = foldNoWrapAdd(I, Builder))
1367 return X;
1368
1369 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1370 Type *Ty = I.getType();
1371 if (Ty->isIntOrIntVectorTy(1))
1372 return BinaryOperator::CreateXor(LHS, RHS);
1373
1374 // X + X --> X << 1
1375 if (LHS == RHS) {
1376 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1377 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1378 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1379 return Shl;
1380 }
1381
1382 Value *A, *B;
1383 if (match(LHS, m_Neg(m_Value(A)))) {
1384 // -A + -B --> -(A + B)
1385 if (match(RHS, m_Neg(m_Value(B))))
1386 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1387
1388 // -A + B --> B - A
1389 return BinaryOperator::CreateSub(RHS, A);
1390 }
1391
1392 // A + -B --> A - B
1393 if (match(RHS, m_Neg(m_Value(B))))
1394 return BinaryOperator::CreateSub(LHS, B);
1395
1396 if (Value *V = checkForNegativeOperand(I, Builder))
1397 return replaceInstUsesWith(I, V);
1398
1399 // (A + 1) + ~B --> A - B
1400 // ~B + (A + 1) --> A - B
1401 // (~B + A) + 1 --> A - B
1402 // (A + ~B) + 1 --> A - B
1403 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1404 match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One())))
1405 return BinaryOperator::CreateSub(A, B);
1406
1407 // (A + RHS) + RHS --> A + (RHS << 1)
1408 if (match(LHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(RHS)))))
1409 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1410
1411 // LHS + (A + LHS) --> A + (LHS << 1)
1412 if (match(RHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(LHS)))))
1413 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1414
1415 {
1416 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1417 Constant *C1, *C2;
1418 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1419 m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1420 (LHS->hasOneUse() || RHS->hasOneUse())) {
1421 Value *Sub = Builder.CreateSub(A, B);
1422 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2));
1423 }
1424 }
1425
1426 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1427 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
1428
1429 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1430 const APInt *C1, *C2;
1431 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1432 APInt one(C2->getBitWidth(), 1);
1433 APInt minusC1 = -(*C1);
1434 if (minusC1 == (one << *C2)) {
1435 Constant *NewRHS = ConstantInt::get(RHS->getType(), minusC1);
1436 return BinaryOperator::CreateSRem(RHS, NewRHS);
1437 }
1438 }
1439
1440 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1441 if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) &&
1442 C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countLeadingZeros())) {
1443 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);
1444 return BinaryOperator::CreateAnd(A, NewMask);
1445 }
1446
1447 // ZExt (B - A) + ZExt(A) --> ZExt(B)
1448 if ((match(RHS, m_ZExt(m_Value(A))) &&
1449 match(LHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))) ||
1450 (match(LHS, m_ZExt(m_Value(A))) &&
1451 match(RHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))))
1452 return new ZExtInst(B, LHS->getType());
1453
1454 // A+B --> A|B iff A and B have no bits set in common.
1455 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1456 return BinaryOperator::CreateOr(LHS, RHS);
1457
1458 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1459 return Ext;
1460
1461 // (add (xor A, B) (and A, B)) --> (or A, B)
1462 // (add (and A, B) (xor A, B)) --> (or A, B)
1463 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1464 m_c_And(m_Deferred(A), m_Deferred(B)))))
1465 return BinaryOperator::CreateOr(A, B);
1466
1467 // (add (or A, B) (and A, B)) --> (add A, B)
1468 // (add (and A, B) (or A, B)) --> (add A, B)
1469 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1470 m_c_And(m_Deferred(A), m_Deferred(B))))) {
1471 // Replacing operands in-place to preserve nuw/nsw flags.
1472 replaceOperand(I, 0, A);
1473 replaceOperand(I, 1, B);
1474 return &I;
1475 }
1476
1477 // (add A (or A, -A)) --> (and (add A, -1) A)
1478 // (add A (or -A, A)) --> (and (add A, -1) A)
1479 // (add (or A, -A) A) --> (and (add A, -1) A)
1480 // (add (or -A, A) A) --> (and (add A, -1) A)
1481 if (match(&I, m_c_BinOp(m_Value(A), m_OneUse(m_c_Or(m_Neg(m_Deferred(A)),
1482 m_Deferred(A)))))) {
1483 Value *Add =
1484 Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()), "",
1485 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());
1486 return BinaryOperator::CreateAnd(Add, A);
1487 }
1488
1489 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
1490 // Forms all commutable operations, and simplifies ctpop -> cttz folds.
1491 if (match(&I,
1492 m_Add(m_OneUse(m_c_And(m_Value(A), m_OneUse(m_Neg(m_Deferred(A))))),
1493 m_AllOnes()))) {
1494 Constant *AllOnes = ConstantInt::getAllOnesValue(RHS->getType());
1495 Value *Dec = Builder.CreateAdd(A, AllOnes);
1496 Value *Not = Builder.CreateXor(A, AllOnes);
1497 return BinaryOperator::CreateAnd(Dec, Not);
1498 }
1499
1500 // Disguised reassociation/factorization:
1501 // ~(A * C1) + A
1502 // ((A * -C1) - 1) + A
1503 // ((A * -C1) + A) - 1
1504 // (A * (1 - C1)) - 1
1505 if (match(&I,
1506 m_c_Add(m_OneUse(m_Not(m_OneUse(m_Mul(m_Value(A), m_APInt(C1))))),
1507 m_Deferred(A)))) {
1508 Type *Ty = I.getType();
1509 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
1510 Value *NewMul = Builder.CreateMul(A, NewMulC);
1511 return BinaryOperator::CreateAdd(NewMul, ConstantInt::getAllOnesValue(Ty));
1512 }
1513
1514 // (A * -2**C) + B --> B - (A << C)
1515 const APInt *NegPow2C;
1516 if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
1517 m_Value(B)))) {
1518 Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countTrailingZeros());
1519 Value *Shl = Builder.CreateShl(A, ShiftAmtC);
1520 return BinaryOperator::CreateSub(B, Shl);
1521 }
1522
1523 // Canonicalize signum variant that ends in add:
1524 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
1525 ICmpInst::Predicate Pred;
1526 uint64_t BitWidth = Ty->getScalarSizeInBits();
1527 if (match(LHS, m_AShr(m_Value(A), m_SpecificIntAllowUndef(BitWidth - 1))) &&
1528 match(RHS, m_OneUse(m_ZExt(
1529 m_OneUse(m_ICmp(Pred, m_Specific(A), m_ZeroInt()))))) &&
1530 Pred == CmpInst::ICMP_SGT) {
1531 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull");
1532 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext");
1533 return BinaryOperator::CreateOr(LHS, Zext);
1534 }
1535
1536 if (Instruction *Ashr = foldAddToAshr(I))
1537 return Ashr;
1538
1539 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1540 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1541 // computeKnownBits.
1542 bool Changed = false;
1543 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
1544 Changed = true;
1545 I.setHasNoSignedWrap(true);
1546 }
1547 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {
1548 Changed = true;
1549 I.setHasNoUnsignedWrap(true);
1550 }
1551
1552 if (Instruction *V = canonicalizeLowbitMask(I, Builder))
1553 return V;
1554
1555 if (Instruction *V =
1556 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
1557 return V;
1558
1559 if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I))
1560 return SatAdd;
1561
1562 // usub.sat(A, B) + B => umax(A, B)
1563 if (match(&I, m_c_BinOp(
1564 m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))),
1565 m_Deferred(B)))) {
1566 return replaceInstUsesWith(I,
1567 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1568 }
1569
1570 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1571 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) &&
1572 match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) &&
1573 haveNoCommonBitsSet(A, B, DL, &AC, &I, &DT))
1574 return replaceInstUsesWith(
1575 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1576 {Builder.CreateOr(A, B)}));
1577
1578 return Changed ? &I : nullptr;
1579 }
1580
1581 /// Eliminate an op from a linear interpolation (lerp) pattern.
factorizeLerp(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1582 static Instruction *factorizeLerp(BinaryOperator &I,
1583 InstCombiner::BuilderTy &Builder) {
1584 Value *X, *Y, *Z;
1585 if (!match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_Value(Y),
1586 m_OneUse(m_FSub(m_FPOne(),
1587 m_Value(Z))))),
1588 m_OneUse(m_c_FMul(m_Value(X), m_Deferred(Z))))))
1589 return nullptr;
1590
1591 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1592 Value *XY = Builder.CreateFSubFMF(X, Y, &I);
1593 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
1594 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
1595 }
1596
1597 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
factorizeFAddFSub(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1598 static Instruction *factorizeFAddFSub(BinaryOperator &I,
1599 InstCombiner::BuilderTy &Builder) {
1600 assert((I.getOpcode() == Instruction::FAdd ||
1601 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1602 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1603 "FP factorization requires FMF");
1604
1605 if (Instruction *Lerp = factorizeLerp(I, Builder))
1606 return Lerp;
1607
1608 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1609 if (!Op0->hasOneUse() || !Op1->hasOneUse())
1610 return nullptr;
1611
1612 Value *X, *Y, *Z;
1613 bool IsFMul;
1614 if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
1615 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
1616 (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
1617 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
1618 IsFMul = true;
1619 else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
1620 match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
1621 IsFMul = false;
1622 else
1623 return nullptr;
1624
1625 // (X * Z) + (Y * Z) --> (X + Y) * Z
1626 // (X * Z) - (Y * Z) --> (X - Y) * Z
1627 // (X / Z) + (Y / Z) --> (X + Y) / Z
1628 // (X / Z) - (Y / Z) --> (X - Y) / Z
1629 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1630 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1631 : Builder.CreateFSubFMF(X, Y, &I);
1632
1633 // Bail out if we just created a denormal constant.
1634 // TODO: This is copied from a previous implementation. Is it necessary?
1635 const APFloat *C;
1636 if (match(XY, m_APFloat(C)) && !C->isNormal())
1637 return nullptr;
1638
1639 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1640 : BinaryOperator::CreateFDivFMF(XY, Z, &I);
1641 }
1642
visitFAdd(BinaryOperator & I)1643 Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
1644 if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1),
1645 I.getFastMathFlags(),
1646 SQ.getWithInstruction(&I)))
1647 return replaceInstUsesWith(I, V);
1648
1649 if (SimplifyAssociativeOrCommutative(I))
1650 return &I;
1651
1652 if (Instruction *X = foldVectorBinop(I))
1653 return X;
1654
1655 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1656 return Phi;
1657
1658 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
1659 return FoldedFAdd;
1660
1661 // (-X) + Y --> Y - X
1662 Value *X, *Y;
1663 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
1664 return BinaryOperator::CreateFSubFMF(Y, X, &I);
1665
1666 // Similar to above, but look through fmul/fdiv for the negated term.
1667 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
1668 Value *Z;
1669 if (match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))),
1670 m_Value(Z)))) {
1671 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
1672 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1673 }
1674 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
1675 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
1676 if (match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y))),
1677 m_Value(Z))) ||
1678 match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))),
1679 m_Value(Z)))) {
1680 Value *XY = Builder.CreateFDivFMF(X, Y, &I);
1681 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1682 }
1683
1684 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1685 // integer add followed by a promotion.
1686 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1687 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1688 Value *LHSIntVal = LHSConv->getOperand(0);
1689 Type *FPType = LHSConv->getType();
1690
1691 // TODO: This check is overly conservative. In many cases known bits
1692 // analysis can tell us that the result of the addition has less significant
1693 // bits than the integer type can hold.
1694 auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1695 Type *FScalarTy = FTy->getScalarType();
1696 Type *IScalarTy = ITy->getScalarType();
1697
1698 // Do we have enough bits in the significand to represent the result of
1699 // the integer addition?
1700 unsigned MaxRepresentableBits =
1701 APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
1702 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1703 };
1704
1705 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1706 // ... if the constant fits in the integer value. This is useful for things
1707 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1708 // requires a constant pool load, and generally allows the add to be better
1709 // instcombined.
1710 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1711 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1712 Constant *CI =
1713 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1714 if (LHSConv->hasOneUse() &&
1715 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1716 willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1717 // Insert the new integer add.
1718 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
1719 return new SIToFPInst(NewAdd, I.getType());
1720 }
1721 }
1722
1723 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1724 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1725 Value *RHSIntVal = RHSConv->getOperand(0);
1726 // It's enough to check LHS types only because we require int types to
1727 // be the same for this transform.
1728 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1729 // Only do this if x/y have the same type, if at least one of them has a
1730 // single use (so we don't increase the number of int->fp conversions),
1731 // and if the integer add will not overflow.
1732 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1733 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1734 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1735 // Insert the new integer add.
1736 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
1737 return new SIToFPInst(NewAdd, I.getType());
1738 }
1739 }
1740 }
1741 }
1742
1743 // Handle specials cases for FAdd with selects feeding the operation
1744 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
1745 return replaceInstUsesWith(I, V);
1746
1747 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1748 if (Instruction *F = factorizeFAddFSub(I, Builder))
1749 return F;
1750
1751 // Try to fold fadd into start value of reduction intrinsic.
1752 if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1753 m_AnyZeroFP(), m_Value(X))),
1754 m_Value(Y)))) {
1755 // fadd (rdx 0.0, X), Y --> rdx Y, X
1756 return replaceInstUsesWith(
1757 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1758 {X->getType()}, {Y, X}, &I));
1759 }
1760 const APFloat *StartC, *C;
1761 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1762 m_APFloat(StartC), m_Value(X)))) &&
1763 match(RHS, m_APFloat(C))) {
1764 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
1765 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
1766 return replaceInstUsesWith(
1767 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1768 {X->getType()}, {NewStartC, X}, &I));
1769 }
1770
1771 // (X * MulC) + X --> X * (MulC + 1.0)
1772 Constant *MulC;
1773 if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
1774 m_Deferred(X)))) {
1775 if (Constant *NewMulC = ConstantFoldBinaryOpOperands(
1776 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))
1777 return BinaryOperator::CreateFMulFMF(X, NewMulC, &I);
1778 }
1779
1780 // (-X - Y) + (X + Z) --> Z - Y
1781 if (match(&I, m_c_FAdd(m_FSub(m_FNeg(m_Value(X)), m_Value(Y)),
1782 m_c_FAdd(m_Deferred(X), m_Value(Z)))))
1783 return BinaryOperator::CreateFSubFMF(Z, Y, &I);
1784
1785 if (Value *V = FAddCombine(Builder).simplify(&I))
1786 return replaceInstUsesWith(I, V);
1787 }
1788
1789 return nullptr;
1790 }
1791
1792 /// Optimize pointer differences into the same array into a size. Consider:
1793 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1794 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
OptimizePointerDifference(Value * LHS,Value * RHS,Type * Ty,bool IsNUW)1795 Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS,
1796 Type *Ty, bool IsNUW) {
1797 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1798 // this.
1799 bool Swapped = false;
1800 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1801 if (!isa<GEPOperator>(LHS) && isa<GEPOperator>(RHS)) {
1802 std::swap(LHS, RHS);
1803 Swapped = true;
1804 }
1805
1806 // Require at least one GEP with a common base pointer on both sides.
1807 if (auto *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1808 // (gep X, ...) - X
1809 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1810 RHS->stripPointerCasts()) {
1811 GEP1 = LHSGEP;
1812 } else if (auto *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1813 // (gep X, ...) - (gep X, ...)
1814 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1815 RHSGEP->getOperand(0)->stripPointerCasts()) {
1816 GEP1 = LHSGEP;
1817 GEP2 = RHSGEP;
1818 }
1819 }
1820 }
1821
1822 if (!GEP1)
1823 return nullptr;
1824
1825 if (GEP2) {
1826 // (gep X, ...) - (gep X, ...)
1827 //
1828 // Avoid duplicating the arithmetic if there are more than one non-constant
1829 // indices between the two GEPs and either GEP has a non-constant index and
1830 // multiple users. If zero non-constant index, the result is a constant and
1831 // there is no duplication. If one non-constant index, the result is an add
1832 // or sub with a constant, which is no larger than the original code, and
1833 // there's no duplicated arithmetic, even if either GEP has multiple
1834 // users. If more than one non-constant indices combined, as long as the GEP
1835 // with at least one non-constant index doesn't have multiple users, there
1836 // is no duplication.
1837 unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices();
1838 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1839 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1840 ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) ||
1841 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1842 return nullptr;
1843 }
1844 }
1845
1846 // Emit the offset of the GEP and an intptr_t.
1847 Value *Result = EmitGEPOffset(GEP1);
1848
1849 // If this is a single inbounds GEP and the original sub was nuw,
1850 // then the final multiplication is also nuw.
1851 if (auto *I = dyn_cast<Instruction>(Result))
1852 if (IsNUW && !GEP2 && !Swapped && GEP1->isInBounds() &&
1853 I->getOpcode() == Instruction::Mul)
1854 I->setHasNoUnsignedWrap();
1855
1856 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
1857 // If both GEPs are inbounds, then the subtract does not have signed overflow.
1858 if (GEP2) {
1859 Value *Offset = EmitGEPOffset(GEP2);
1860 Result = Builder.CreateSub(Result, Offset, "gepdiff", /* NUW */ false,
1861 GEP1->isInBounds() && GEP2->isInBounds());
1862 }
1863
1864 // If we have p - gep(p, ...) then we have to negate the result.
1865 if (Swapped)
1866 Result = Builder.CreateNeg(Result, "diff.neg");
1867
1868 return Builder.CreateIntCast(Result, Ty, true);
1869 }
1870
foldSubOfMinMax(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1871 static Instruction *foldSubOfMinMax(BinaryOperator &I,
1872 InstCombiner::BuilderTy &Builder) {
1873 Value *Op0 = I.getOperand(0);
1874 Value *Op1 = I.getOperand(1);
1875 Type *Ty = I.getType();
1876 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op1);
1877 if (!MinMax)
1878 return nullptr;
1879
1880 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
1881 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
1882 Value *X = MinMax->getLHS();
1883 Value *Y = MinMax->getRHS();
1884 if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
1885 (Op0->hasOneUse() || Op1->hasOneUse())) {
1886 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
1887 Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
1888 return CallInst::Create(F, {X, Y});
1889 }
1890
1891 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
1892 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
1893 Value *Z;
1894 if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) {
1895 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X))))) {
1896 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z});
1897 return BinaryOperator::CreateAdd(X, USub);
1898 }
1899 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X))))) {
1900 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y});
1901 return BinaryOperator::CreateAdd(X, USub);
1902 }
1903 }
1904
1905 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z
1906 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z
1907 if (MinMax->isSigned() && match(Y, m_ZeroInt()) &&
1908 match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) {
1909 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
1910 Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
1911 return CallInst::Create(F, {Op0, Z});
1912 }
1913
1914 return nullptr;
1915 }
1916
visitSub(BinaryOperator & I)1917 Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
1918 if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
1919 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1920 SQ.getWithInstruction(&I)))
1921 return replaceInstUsesWith(I, V);
1922
1923 if (Instruction *X = foldVectorBinop(I))
1924 return X;
1925
1926 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1927 return Phi;
1928
1929 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1930
1931 // If this is a 'B = x-(-A)', change to B = x+A.
1932 // We deal with this without involving Negator to preserve NSW flag.
1933 if (Value *V = dyn_castNegVal(Op1)) {
1934 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1935
1936 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1937 assert(BO->getOpcode() == Instruction::Sub &&
1938 "Expected a subtraction operator!");
1939 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1940 Res->setHasNoSignedWrap(true);
1941 } else {
1942 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1943 Res->setHasNoSignedWrap(true);
1944 }
1945
1946 return Res;
1947 }
1948
1949 // Try this before Negator to preserve NSW flag.
1950 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
1951 return R;
1952
1953 Constant *C;
1954 if (match(Op0, m_ImmConstant(C))) {
1955 Value *X;
1956 Constant *C2;
1957
1958 // C-(X+C2) --> (C-C2)-X
1959 if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2))))
1960 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1961 }
1962
1963 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
1964 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1965 return Ext;
1966
1967 bool Changed = false;
1968 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
1969 Changed = true;
1970 I.setHasNoSignedWrap(true);
1971 }
1972 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
1973 Changed = true;
1974 I.setHasNoUnsignedWrap(true);
1975 }
1976
1977 return Changed ? &I : nullptr;
1978 };
1979
1980 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
1981 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
1982 // a pure negation used by a select that looks like abs/nabs.
1983 bool IsNegation = match(Op0, m_ZeroInt());
1984 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
1985 const Instruction *UI = dyn_cast<Instruction>(U);
1986 if (!UI)
1987 return false;
1988 return match(UI,
1989 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
1990 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
1991 })) {
1992 if (Value *NegOp1 = Negator::Negate(IsNegation, Op1, *this))
1993 return BinaryOperator::CreateAdd(NegOp1, Op0);
1994 }
1995 if (IsNegation)
1996 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
1997
1998 // (A*B)-(A*C) -> A*(B-C) etc
1999 if (Value *V = foldUsingDistributiveLaws(I))
2000 return replaceInstUsesWith(I, V);
2001
2002 if (I.getType()->isIntOrIntVectorTy(1))
2003 return BinaryOperator::CreateXor(Op0, Op1);
2004
2005 // Replace (-1 - A) with (~A).
2006 if (match(Op0, m_AllOnes()))
2007 return BinaryOperator::CreateNot(Op1);
2008
2009 // (X + -1) - Y --> ~Y + X
2010 Value *X, *Y;
2011 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
2012 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
2013
2014 // Reassociate sub/add sequences to create more add instructions and
2015 // reduce dependency chains:
2016 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2017 Value *Z;
2018 if (match(Op0, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X), m_Value(Y))),
2019 m_Value(Z))))) {
2020 Value *XZ = Builder.CreateAdd(X, Z);
2021 Value *YW = Builder.CreateAdd(Y, Op1);
2022 return BinaryOperator::CreateSub(XZ, YW);
2023 }
2024
2025 // ((X - Y) - Op1) --> X - (Y + Op1)
2026 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
2027 Value *Add = Builder.CreateAdd(Y, Op1);
2028 return BinaryOperator::CreateSub(X, Add);
2029 }
2030
2031 // (~X) - (~Y) --> Y - X
2032 // This is placed after the other reassociations and explicitly excludes a
2033 // sub-of-sub pattern to avoid infinite looping.
2034 if (isFreeToInvert(Op0, Op0->hasOneUse()) &&
2035 isFreeToInvert(Op1, Op1->hasOneUse()) &&
2036 !match(Op0, m_Sub(m_ImmConstant(), m_Value()))) {
2037 Value *NotOp0 = Builder.CreateNot(Op0);
2038 Value *NotOp1 = Builder.CreateNot(Op1);
2039 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2040 }
2041
2042 auto m_AddRdx = [](Value *&Vec) {
2043 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec)));
2044 };
2045 Value *V0, *V1;
2046 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
2047 V0->getType() == V1->getType()) {
2048 // Difference of sums is sum of differences:
2049 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
2050 Value *Sub = Builder.CreateSub(V0, V1);
2051 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
2052 {Sub->getType()}, {Sub});
2053 return replaceInstUsesWith(I, Rdx);
2054 }
2055
2056 if (Constant *C = dyn_cast<Constant>(Op0)) {
2057 Value *X;
2058 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2059 // C - (zext bool) --> bool ? C - 1 : C
2060 return SelectInst::Create(X, InstCombiner::SubOne(C), C);
2061 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2062 // C - (sext bool) --> bool ? C + 1 : C
2063 return SelectInst::Create(X, InstCombiner::AddOne(C), C);
2064
2065 // C - ~X == X + (1+C)
2066 if (match(Op1, m_Not(m_Value(X))))
2067 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C));
2068
2069 // Try to fold constant sub into select arguments.
2070 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2071 if (Instruction *R = FoldOpIntoSelect(I, SI))
2072 return R;
2073
2074 // Try to fold constant sub into PHI values.
2075 if (PHINode *PN = dyn_cast<PHINode>(Op1))
2076 if (Instruction *R = foldOpIntoPhi(I, PN))
2077 return R;
2078
2079 Constant *C2;
2080
2081 // C-(C2-X) --> X+(C-C2)
2082 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
2083 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2));
2084 }
2085
2086 const APInt *Op0C;
2087 if (match(Op0, m_APInt(Op0C))) {
2088 if (Op0C->isMask()) {
2089 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
2090 // zero.
2091 KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
2092 if ((*Op0C | RHSKnown.Zero).isAllOnes())
2093 return BinaryOperator::CreateXor(Op1, Op0);
2094 }
2095
2096 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when:
2097 // (C3 - ((C2 & C3) - 1)) is pow2
2098 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1)
2099 // C2 is negative pow2 || sub nuw
2100 const APInt *C2, *C3;
2101 BinaryOperator *InnerSub;
2102 if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
2103 match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) &&
2104 (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) {
2105 APInt C2AndC3 = *C2 & *C3;
2106 APInt C2AndC3Minus1 = C2AndC3 - 1;
2107 APInt C2AddC3 = *C2 + *C3;
2108 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2109 C2AndC3Minus1.isSubsetOf(C2AddC3)) {
2110 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2));
2111 return BinaryOperator::CreateAdd(
2112 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));
2113 }
2114 }
2115 }
2116
2117 {
2118 Value *Y;
2119 // X-(X+Y) == -Y X-(Y+X) == -Y
2120 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
2121 return BinaryOperator::CreateNeg(Y);
2122
2123 // (X-Y)-X == -Y
2124 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
2125 return BinaryOperator::CreateNeg(Y);
2126 }
2127
2128 // (sub (or A, B) (and A, B)) --> (xor A, B)
2129 {
2130 Value *A, *B;
2131 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2132 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2133 return BinaryOperator::CreateXor(A, B);
2134 }
2135
2136 // (sub (add A, B) (or A, B)) --> (and A, B)
2137 {
2138 Value *A, *B;
2139 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2140 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2141 return BinaryOperator::CreateAnd(A, B);
2142 }
2143
2144 // (sub (add A, B) (and A, B)) --> (or A, B)
2145 {
2146 Value *A, *B;
2147 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2148 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2149 return BinaryOperator::CreateOr(A, B);
2150 }
2151
2152 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
2153 {
2154 Value *A, *B;
2155 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2156 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2157 (Op0->hasOneUse() || Op1->hasOneUse()))
2158 return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
2159 }
2160
2161 // (sub (or A, B), (xor A, B)) --> (and A, B)
2162 {
2163 Value *A, *B;
2164 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2165 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2166 return BinaryOperator::CreateAnd(A, B);
2167 }
2168
2169 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
2170 {
2171 Value *A, *B;
2172 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2173 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2174 (Op0->hasOneUse() || Op1->hasOneUse()))
2175 return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
2176 }
2177
2178 {
2179 Value *Y;
2180 // ((X | Y) - X) --> (~X & Y)
2181 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
2182 return BinaryOperator::CreateAnd(
2183 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
2184 }
2185
2186 {
2187 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2188 Value *X;
2189 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
2190 m_OneUse(m_Neg(m_Value(X))))))) {
2191 return BinaryOperator::CreateNeg(Builder.CreateAnd(
2192 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
2193 }
2194 }
2195
2196 {
2197 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2198 Constant *C;
2199 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
2200 return BinaryOperator::CreateNeg(
2201 Builder.CreateAnd(Op1, Builder.CreateNot(C)));
2202 }
2203 }
2204
2205 if (Instruction *R = foldSubOfMinMax(I, Builder))
2206 return R;
2207
2208 {
2209 // If we have a subtraction between some value and a select between
2210 // said value and something else, sink subtraction into select hands, i.e.:
2211 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2212 // ->
2213 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2214 // or
2215 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2216 // ->
2217 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2218 // This will result in select between new subtraction and 0.
2219 auto SinkSubIntoSelect =
2220 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2221 auto SubBuilder) -> Instruction * {
2222 Value *Cond, *TrueVal, *FalseVal;
2223 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal),
2224 m_Value(FalseVal)))))
2225 return nullptr;
2226 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2227 return nullptr;
2228 // While it is really tempting to just create two subtractions and let
2229 // InstCombine fold one of those to 0, it isn't possible to do so
2230 // because of worklist visitation order. So ugly it is.
2231 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2232 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2233 Constant *Zero = Constant::getNullValue(Ty);
2234 SelectInst *NewSel =
2235 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2236 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2237 // Preserve prof metadata if any.
2238 NewSel->copyMetadata(cast<Instruction>(*Select));
2239 return NewSel;
2240 };
2241 if (Instruction *NewSel = SinkSubIntoSelect(
2242 /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2243 [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2244 return Builder->CreateSub(OtherHandOfSelect,
2245 /*OtherHandOfSub=*/Op1);
2246 }))
2247 return NewSel;
2248 if (Instruction *NewSel = SinkSubIntoSelect(
2249 /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2250 [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2251 return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2252 OtherHandOfSelect);
2253 }))
2254 return NewSel;
2255 }
2256
2257 // (X - (X & Y)) --> (X & ~Y)
2258 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2259 (Op1->hasOneUse() || isa<Constant>(Y)))
2260 return BinaryOperator::CreateAnd(
2261 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2262
2263 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2264 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2265 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2266 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2267 // As long as Y is freely invertible, this will be neutral or a win.
2268 // Note: We don't generate the inverse max/min, just create the 'not' of
2269 // it and let other folds do the rest.
2270 if (match(Op0, m_Not(m_Value(X))) &&
2271 match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) &&
2272 !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2273 Value *Not = Builder.CreateNot(Op1);
2274 return BinaryOperator::CreateSub(Not, X);
2275 }
2276 if (match(Op1, m_Not(m_Value(X))) &&
2277 match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) &&
2278 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2279 Value *Not = Builder.CreateNot(Op0);
2280 return BinaryOperator::CreateSub(X, Not);
2281 }
2282
2283 // Optimize pointer differences into the same array into a size. Consider:
2284 // &A[10] - &A[0]: we should compile this to "10".
2285 Value *LHSOp, *RHSOp;
2286 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2287 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2288 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2289 I.hasNoUnsignedWrap()))
2290 return replaceInstUsesWith(I, Res);
2291
2292 // trunc(p)-trunc(q) -> trunc(p-q)
2293 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2294 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
2295 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2296 /* IsNUW */ false))
2297 return replaceInstUsesWith(I, Res);
2298
2299 // Canonicalize a shifty way to code absolute value to the common pattern.
2300 // There are 2 potential commuted variants.
2301 // We're relying on the fact that we only do this transform when the shift has
2302 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2303 // instructions).
2304 Value *A;
2305 const APInt *ShAmt;
2306 Type *Ty = I.getType();
2307 unsigned BitWidth = Ty->getScalarSizeInBits();
2308 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2309 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&
2310 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2311 // B = ashr i32 A, 31 ; smear the sign bit
2312 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2313 // --> (A < 0) ? -A : A
2314 Value *IsNeg = Builder.CreateIsNeg(A);
2315 // Copy the nuw/nsw flags from the sub to the negate.
2316 Value *NegA = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(),
2317 I.hasNoSignedWrap());
2318 return SelectInst::Create(IsNeg, NegA, A);
2319 }
2320
2321 // If we are subtracting a low-bit masked subset of some value from an add
2322 // of that same value with no low bits changed, that is clearing some low bits
2323 // of the sum:
2324 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2325 const APInt *AddC, *AndC;
2326 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2327 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2328 unsigned Cttz = AddC->countTrailingZeros();
2329 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2330 if ((HighMask & *AndC).isZero())
2331 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2332 }
2333
2334 if (Instruction *V =
2335 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
2336 return V;
2337
2338 // X - usub.sat(X, Y) => umin(X, Y)
2339 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0),
2340 m_Value(Y)))))
2341 return replaceInstUsesWith(
2342 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2343
2344 // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2345 // TODO: The one-use restriction is not strictly necessary, but it may
2346 // require improving other pattern matching and/or codegen.
2347 if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1)))))
2348 return replaceInstUsesWith(
2349 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2350
2351 // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2352 if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0)))))
2353 return replaceInstUsesWith(
2354 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2355
2356 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2357 if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) {
2358 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});
2359 return BinaryOperator::CreateNeg(USub);
2360 }
2361
2362 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2363 if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) {
2364 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});
2365 return BinaryOperator::CreateNeg(USub);
2366 }
2367
2368 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2369 if (match(Op0, m_SpecificInt(BitWidth)) &&
2370 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X)))))
2371 return replaceInstUsesWith(
2372 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2373 {Builder.CreateNot(X)}));
2374
2375 // Reduce multiplies for difference-of-squares by factoring:
2376 // (X * X) - (Y * Y) --> (X + Y) * (X - Y)
2377 if (match(Op0, m_OneUse(m_Mul(m_Value(X), m_Deferred(X)))) &&
2378 match(Op1, m_OneUse(m_Mul(m_Value(Y), m_Deferred(Y))))) {
2379 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0);
2380 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2381 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2382 OBO1->hasNoSignedWrap() && BitWidth > 2;
2383 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2384 OBO1->hasNoUnsignedWrap() && BitWidth > 1;
2385 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW);
2386 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW);
2387 Value *Mul = Builder.CreateMul(Add, Sub, "", PropagateNUW, PropagateNSW);
2388 return replaceInstUsesWith(I, Mul);
2389 }
2390
2391 return TryToNarrowDeduceFlags();
2392 }
2393
2394 /// This eliminates floating-point negation in either 'fneg(X)' or
2395 /// 'fsub(-0.0, X)' form by combining into a constant operand.
foldFNegIntoConstant(Instruction & I,const DataLayout & DL)2396 static Instruction *foldFNegIntoConstant(Instruction &I, const DataLayout &DL) {
2397 // This is limited with one-use because fneg is assumed better for
2398 // reassociation and cheaper in codegen than fmul/fdiv.
2399 // TODO: Should the m_OneUse restriction be removed?
2400 Instruction *FNegOp;
2401 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
2402 return nullptr;
2403
2404 Value *X;
2405 Constant *C;
2406
2407 // Fold negation into constant operand.
2408 // -(X * C) --> X * (-C)
2409 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
2410 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2411 return BinaryOperator::CreateFMulFMF(X, NegC, &I);
2412 // -(X / C) --> X / (-C)
2413 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C))))
2414 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2415 return BinaryOperator::CreateFDivFMF(X, NegC, &I);
2416 // -(C / X) --> (-C) / X
2417 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X))))
2418 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
2419 Instruction *FDiv = BinaryOperator::CreateFDivFMF(NegC, X, &I);
2420
2421 // Intersect 'nsz' and 'ninf' because those special value exceptions may
2422 // not apply to the fdiv. Everything else propagates from the fneg.
2423 // TODO: We could propagate nsz/ninf from fdiv alone?
2424 FastMathFlags FMF = I.getFastMathFlags();
2425 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
2426 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
2427 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
2428 return FDiv;
2429 }
2430 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2431 // -(X + C) --> -X + -C --> -C - X
2432 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
2433 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2434 return BinaryOperator::CreateFSubFMF(NegC, X, &I);
2435
2436 return nullptr;
2437 }
2438
hoistFNegAboveFMulFDiv(Instruction & I,InstCombiner::BuilderTy & Builder)2439 static Instruction *hoistFNegAboveFMulFDiv(Instruction &I,
2440 InstCombiner::BuilderTy &Builder) {
2441 Value *FNeg;
2442 if (!match(&I, m_FNeg(m_Value(FNeg))))
2443 return nullptr;
2444
2445 Value *X, *Y;
2446 if (match(FNeg, m_OneUse(m_FMul(m_Value(X), m_Value(Y)))))
2447 return BinaryOperator::CreateFMulFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2448
2449 if (match(FNeg, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))))
2450 return BinaryOperator::CreateFDivFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2451
2452 return nullptr;
2453 }
2454
visitFNeg(UnaryOperator & I)2455 Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) {
2456 Value *Op = I.getOperand(0);
2457
2458 if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(),
2459 getSimplifyQuery().getWithInstruction(&I)))
2460 return replaceInstUsesWith(I, V);
2461
2462 if (Instruction *X = foldFNegIntoConstant(I, DL))
2463 return X;
2464
2465 Value *X, *Y;
2466
2467 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2468 if (I.hasNoSignedZeros() &&
2469 match(Op, m_OneUse(m_FSub(m_Value(X), m_Value(Y)))))
2470 return BinaryOperator::CreateFSubFMF(Y, X, &I);
2471
2472 if (Instruction *R = hoistFNegAboveFMulFDiv(I, Builder))
2473 return R;
2474
2475 Value *OneUse;
2476 if (!match(Op, m_OneUse(m_Value(OneUse))))
2477 return nullptr;
2478
2479 // Try to eliminate fneg if at least 1 arm of the select is negated.
2480 Value *Cond;
2481 if (match(OneUse, m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))) {
2482 // Unlike most transforms, this one is not safe to propagate nsz unless
2483 // it is present on the original select. We union the flags from the select
2484 // and fneg and then remove nsz if needed.
2485 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
2486 S->copyFastMathFlags(&I);
2487 if (auto *OldSel = dyn_cast<SelectInst>(Op)) {
2488 FastMathFlags FMF = I.getFastMathFlags();
2489 FMF |= OldSel->getFastMathFlags();
2490 S->setFastMathFlags(FMF);
2491 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2492 !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition()))
2493 S->setHasNoSignedZeros(false);
2494 }
2495 };
2496 // -(Cond ? -P : Y) --> Cond ? P : -Y
2497 Value *P;
2498 if (match(X, m_FNeg(m_Value(P)))) {
2499 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
2500 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
2501 propagateSelectFMF(NewSel, P == Y);
2502 return NewSel;
2503 }
2504 // -(Cond ? X : -P) --> Cond ? -X : P
2505 if (match(Y, m_FNeg(m_Value(P)))) {
2506 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
2507 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
2508 propagateSelectFMF(NewSel, P == X);
2509 return NewSel;
2510 }
2511 }
2512
2513 // fneg (copysign x, y) -> copysign x, (fneg y)
2514 if (match(OneUse, m_CopySign(m_Value(X), m_Value(Y)))) {
2515 // The source copysign has an additional value input, so we can't propagate
2516 // flags the copysign doesn't also have.
2517 FastMathFlags FMF = I.getFastMathFlags();
2518 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags();
2519
2520 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2521 Builder.setFastMathFlags(FMF);
2522
2523 Value *NegY = Builder.CreateFNeg(Y);
2524 Value *NewCopySign = Builder.CreateCopySign(X, NegY);
2525 return replaceInstUsesWith(I, NewCopySign);
2526 }
2527
2528 return nullptr;
2529 }
2530
visitFSub(BinaryOperator & I)2531 Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
2532 if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1),
2533 I.getFastMathFlags(),
2534 getSimplifyQuery().getWithInstruction(&I)))
2535 return replaceInstUsesWith(I, V);
2536
2537 if (Instruction *X = foldVectorBinop(I))
2538 return X;
2539
2540 if (Instruction *Phi = foldBinopWithPhiOperands(I))
2541 return Phi;
2542
2543 // Subtraction from -0.0 is the canonical form of fneg.
2544 // fsub -0.0, X ==> fneg X
2545 // fsub nsz 0.0, X ==> fneg nsz X
2546 //
2547 // FIXME This matcher does not respect FTZ or DAZ yet:
2548 // fsub -0.0, Denorm ==> +-0
2549 // fneg Denorm ==> -Denorm
2550 Value *Op;
2551 if (match(&I, m_FNeg(m_Value(Op))))
2552 return UnaryOperator::CreateFNegFMF(Op, &I);
2553
2554 if (Instruction *X = foldFNegIntoConstant(I, DL))
2555 return X;
2556
2557 if (Instruction *R = hoistFNegAboveFMulFDiv(I, Builder))
2558 return R;
2559
2560 Value *X, *Y;
2561 Constant *C;
2562
2563 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2564 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
2565 // Canonicalize to fadd to make analysis easier.
2566 // This can also help codegen because fadd is commutative.
2567 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
2568 // killed later. We still limit that particular transform with 'hasOneUse'
2569 // because an fneg is assumed better/cheaper than a generic fsub.
2570 if (I.hasNoSignedZeros() || CannotBeNegativeZero(Op0, SQ.TLI)) {
2571 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2572 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
2573 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
2574 }
2575 }
2576
2577 // (-X) - Op1 --> -(X + Op1)
2578 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2579 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
2580 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
2581 return UnaryOperator::CreateFNegFMF(FAdd, &I);
2582 }
2583
2584 if (isa<Constant>(Op0))
2585 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2586 if (Instruction *NV = FoldOpIntoSelect(I, SI))
2587 return NV;
2588
2589 // X - C --> X + (-C)
2590 // But don't transform constant expressions because there's an inverse fold
2591 // for X + (-Y) --> X - Y.
2592 if (match(Op1, m_ImmConstant(C)))
2593 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2594 return BinaryOperator::CreateFAddFMF(Op0, NegC, &I);
2595
2596 // X - (-Y) --> X + Y
2597 if (match(Op1, m_FNeg(m_Value(Y))))
2598 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
2599
2600 // Similar to above, but look through a cast of the negated value:
2601 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
2602 Type *Ty = I.getType();
2603 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
2604 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
2605
2606 // X - (fpext(-Y)) --> X + fpext(Y)
2607 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
2608 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
2609
2610 // Similar to above, but look through fmul/fdiv of the negated value:
2611 // Op0 - (-X * Y) --> Op0 + (X * Y)
2612 // Op0 - (Y * -X) --> Op0 + (X * Y)
2613 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
2614 Value *FMul = Builder.CreateFMulFMF(X, Y, &I);
2615 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
2616 }
2617 // Op0 - (-X / Y) --> Op0 + (X / Y)
2618 // Op0 - (X / -Y) --> Op0 + (X / Y)
2619 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
2620 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
2621 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
2622 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
2623 }
2624
2625 // Handle special cases for FSub with selects feeding the operation
2626 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
2627 return replaceInstUsesWith(I, V);
2628
2629 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2630 // (Y - X) - Y --> -X
2631 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
2632 return UnaryOperator::CreateFNegFMF(X, &I);
2633
2634 // Y - (X + Y) --> -X
2635 // Y - (Y + X) --> -X
2636 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
2637 return UnaryOperator::CreateFNegFMF(X, &I);
2638
2639 // (X * C) - X --> X * (C - 1.0)
2640 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
2641 if (Constant *CSubOne = ConstantFoldBinaryOpOperands(
2642 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))
2643 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
2644 }
2645 // X - (X * C) --> X * (1.0 - C)
2646 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
2647 if (Constant *OneSubC = ConstantFoldBinaryOpOperands(
2648 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))
2649 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
2650 }
2651
2652 // Reassociate fsub/fadd sequences to create more fadd instructions and
2653 // reduce dependency chains:
2654 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2655 Value *Z;
2656 if (match(Op0, m_OneUse(m_c_FAdd(m_OneUse(m_FSub(m_Value(X), m_Value(Y))),
2657 m_Value(Z))))) {
2658 Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
2659 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
2660 return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
2661 }
2662
2663 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
2664 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum),
2665 m_Value(Vec)));
2666 };
2667 Value *A0, *A1, *V0, *V1;
2668 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
2669 V0->getType() == V1->getType()) {
2670 // Difference of sums is sum of differences:
2671 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
2672 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
2673 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2674 {Sub->getType()}, {A0, Sub}, &I);
2675 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
2676 }
2677
2678 if (Instruction *F = factorizeFAddFSub(I, Builder))
2679 return F;
2680
2681 // TODO: This performs reassociative folds for FP ops. Some fraction of the
2682 // functionality has been subsumed by simple pattern matching here and in
2683 // InstSimplify. We should let a dedicated reassociation pass handle more
2684 // complex pattern matching and remove this from InstCombine.
2685 if (Value *V = FAddCombine(Builder).simplify(&I))
2686 return replaceInstUsesWith(I, V);
2687
2688 // (X - Y) - Op1 --> X - (Y + Op1)
2689 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2690 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
2691 return BinaryOperator::CreateFSubFMF(X, FAdd, &I);
2692 }
2693 }
2694
2695 return nullptr;
2696 }
2697