xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp (revision 647cbc5de815c5651677bf8582797f716ec7b48d)
1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // InstructionCombining - Combine instructions to form fewer, simple
10 // instructions.  This pass does not modify the CFG.  This pass is where
11 // algebraic simplification happens.
12 //
13 // This pass combines things like:
14 //    %Y = add i32 %X, 1
15 //    %Z = add i32 %Y, 1
16 // into:
17 //    %Z = add i32 %X, 2
18 //
19 // This is a simple worklist driven algorithm.
20 //
21 // This pass guarantees that the following canonicalizations are performed on
22 // the program:
23 //    1. If a binary operator has a constant operand, it is moved to the RHS
24 //    2. Bitwise operators with constant operands are always grouped so that
25 //       shifts are performed first, then or's, then and's, then xor's.
26 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27 //    4. All cmp instructions on boolean values are replaced with logical ops
28 //    5. add X, X is represented as (X*2) => (X << 1)
29 //    6. Multiplies with a power-of-two constant argument are transformed into
30 //       shifts.
31 //   ... etc.
32 //
33 //===----------------------------------------------------------------------===//
34 
35 #include "InstCombineInternal.h"
36 #include "llvm/ADT/APInt.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AssumptionCache.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CFG.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/InstructionSimplify.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/LoopInfo.h"
52 #include "llvm/Analysis/MemoryBuiltins.h"
53 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
54 #include "llvm/Analysis/ProfileSummaryInfo.h"
55 #include "llvm/Analysis/TargetFolder.h"
56 #include "llvm/Analysis/TargetLibraryInfo.h"
57 #include "llvm/Analysis/TargetTransformInfo.h"
58 #include "llvm/Analysis/Utils/Local.h"
59 #include "llvm/Analysis/ValueTracking.h"
60 #include "llvm/Analysis/VectorUtils.h"
61 #include "llvm/IR/BasicBlock.h"
62 #include "llvm/IR/CFG.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DIBuilder.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfo.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/IRBuilder.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/Metadata.h"
80 #include "llvm/IR/Operator.h"
81 #include "llvm/IR/PassManager.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/Use.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/IR/ValueHandle.h"
88 #include "llvm/InitializePasses.h"
89 #include "llvm/Support/Casting.h"
90 #include "llvm/Support/CommandLine.h"
91 #include "llvm/Support/Compiler.h"
92 #include "llvm/Support/Debug.h"
93 #include "llvm/Support/DebugCounter.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/KnownBits.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Transforms/InstCombine/InstCombine.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstdint>
103 #include <memory>
104 #include <optional>
105 #include <string>
106 #include <utility>
107 
108 #define DEBUG_TYPE "instcombine"
109 #include "llvm/Transforms/Utils/InstructionWorklist.h"
110 #include <optional>
111 
112 using namespace llvm;
113 using namespace llvm::PatternMatch;
114 
115 STATISTIC(NumWorklistIterations,
116           "Number of instruction combining iterations performed");
117 STATISTIC(NumOneIteration, "Number of functions with one iteration");
118 STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119 STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120 STATISTIC(NumFourOrMoreIterations,
121           "Number of functions with four or more iterations");
122 
123 STATISTIC(NumCombined , "Number of insts combined");
124 STATISTIC(NumConstProp, "Number of constant folds");
125 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126 STATISTIC(NumSunkInst , "Number of instructions sunk");
127 STATISTIC(NumExpand,    "Number of expansions");
128 STATISTIC(NumFactor   , "Number of factorizations");
129 STATISTIC(NumReassoc  , "Number of reassociations");
130 DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131               "Controls which instructions are visited");
132 
133 static cl::opt<bool>
134 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135                                               cl::init(true));
136 
137 static cl::opt<unsigned> MaxSinkNumUsers(
138     "instcombine-max-sink-users", cl::init(32),
139     cl::desc("Maximum number of undroppable users for instruction sinking"));
140 
141 static cl::opt<unsigned>
142 MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143              cl::desc("Maximum array size considered when doing a combine"));
144 
145 // FIXME: Remove this flag when it is no longer necessary to convert
146 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147 // increases variable availability at the cost of accuracy. Variables that
148 // cannot be promoted by mem2reg or SROA will be described as living in memory
149 // for their entire lifetime. However, passes like DSE and instcombine can
150 // delete stores to the alloca, leading to misleading and inaccurate debug
151 // information. This flag can be removed when those passes are fixed.
152 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153                                                cl::Hidden, cl::init(true));
154 
155 std::optional<Instruction *>
156 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
157   // Handle target specific intrinsics
158   if (II.getCalledFunction()->isTargetIntrinsic()) {
159     return TTI.instCombineIntrinsic(*this, II);
160   }
161   return std::nullopt;
162 }
163 
164 std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
165     IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166     bool &KnownBitsComputed) {
167   // Handle target specific intrinsics
168   if (II.getCalledFunction()->isTargetIntrinsic()) {
169     return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known,
170                                                 KnownBitsComputed);
171   }
172   return std::nullopt;
173 }
174 
175 std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
176     IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177     APInt &PoisonElts2, APInt &PoisonElts3,
178     std::function<void(Instruction *, unsigned, APInt, APInt &)>
179         SimplifyAndSetOp) {
180   // Handle target specific intrinsics
181   if (II.getCalledFunction()->isTargetIntrinsic()) {
182     return TTI.simplifyDemandedVectorEltsIntrinsic(
183         *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184         SimplifyAndSetOp);
185   }
186   return std::nullopt;
187 }
188 
189 bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190   return TTI.isValidAddrSpaceCast(FromAS, ToAS);
191 }
192 
193 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) {
194   return llvm::emitGEPOffset(&Builder, DL, GEP);
195 }
196 
197 /// Legal integers and common types are considered desirable. This is used to
198 /// avoid creating instructions with types that may not be supported well by the
199 /// the backend.
200 /// NOTE: This treats i8, i16 and i32 specially because they are common
201 ///       types in frontend languages.
202 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
203   switch (BitWidth) {
204   case 8:
205   case 16:
206   case 32:
207     return true;
208   default:
209     return DL.isLegalInteger(BitWidth);
210   }
211 }
212 
213 /// Return true if it is desirable to convert an integer computation from a
214 /// given bit width to a new bit width.
215 /// We don't want to convert from a legal or desirable type (like i8) to an
216 /// illegal type or from a smaller to a larger illegal type. A width of '1'
217 /// is always treated as a desirable type because i1 is a fundamental type in
218 /// IR, and there are many specialized optimizations for i1 types.
219 /// Common/desirable widths are equally treated as legal to convert to, in
220 /// order to open up more combining opportunities.
221 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
222                                         unsigned ToWidth) const {
223   bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
224   bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
225 
226   // Convert to desirable widths even if they are not legal types.
227   // Only shrink types, to prevent infinite loops.
228   if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
229     return true;
230 
231   // If this is a legal or desiable integer from type, and the result would be
232   // an illegal type, don't do the transformation.
233   if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
234     return false;
235 
236   // Otherwise, if both are illegal, do not increase the size of the result. We
237   // do allow things like i160 -> i64, but not i64 -> i160.
238   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
239     return false;
240 
241   return true;
242 }
243 
244 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
245 /// We don't want to convert from a legal to an illegal type or from a smaller
246 /// to a larger illegal type. i1 is always treated as a legal type because it is
247 /// a fundamental type in IR, and there are many specialized optimizations for
248 /// i1 types.
249 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
250   // TODO: This could be extended to allow vectors. Datalayout changes might be
251   // needed to properly support that.
252   if (!From->isIntegerTy() || !To->isIntegerTy())
253     return false;
254 
255   unsigned FromWidth = From->getPrimitiveSizeInBits();
256   unsigned ToWidth = To->getPrimitiveSizeInBits();
257   return shouldChangeType(FromWidth, ToWidth);
258 }
259 
260 // Return true, if No Signed Wrap should be maintained for I.
261 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
262 // where both B and C should be ConstantInts, results in a constant that does
263 // not overflow. This function only handles the Add and Sub opcodes. For
264 // all other opcodes, the function conservatively returns false.
265 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
266   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
267   if (!OBO || !OBO->hasNoSignedWrap())
268     return false;
269 
270   // We reason about Add and Sub Only.
271   Instruction::BinaryOps Opcode = I.getOpcode();
272   if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
273     return false;
274 
275   const APInt *BVal, *CVal;
276   if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
277     return false;
278 
279   bool Overflow = false;
280   if (Opcode == Instruction::Add)
281     (void)BVal->sadd_ov(*CVal, Overflow);
282   else
283     (void)BVal->ssub_ov(*CVal, Overflow);
284 
285   return !Overflow;
286 }
287 
288 static bool hasNoUnsignedWrap(BinaryOperator &I) {
289   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
290   return OBO && OBO->hasNoUnsignedWrap();
291 }
292 
293 static bool hasNoSignedWrap(BinaryOperator &I) {
294   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
295   return OBO && OBO->hasNoSignedWrap();
296 }
297 
298 /// Conservatively clears subclassOptionalData after a reassociation or
299 /// commutation. We preserve fast-math flags when applicable as they can be
300 /// preserved.
301 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
302   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
303   if (!FPMO) {
304     I.clearSubclassOptionalData();
305     return;
306   }
307 
308   FastMathFlags FMF = I.getFastMathFlags();
309   I.clearSubclassOptionalData();
310   I.setFastMathFlags(FMF);
311 }
312 
313 /// Combine constant operands of associative operations either before or after a
314 /// cast to eliminate one of the associative operations:
315 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
316 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
317 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
318                                    InstCombinerImpl &IC) {
319   auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
320   if (!Cast || !Cast->hasOneUse())
321     return false;
322 
323   // TODO: Enhance logic for other casts and remove this check.
324   auto CastOpcode = Cast->getOpcode();
325   if (CastOpcode != Instruction::ZExt)
326     return false;
327 
328   // TODO: Enhance logic for other BinOps and remove this check.
329   if (!BinOp1->isBitwiseLogicOp())
330     return false;
331 
332   auto AssocOpcode = BinOp1->getOpcode();
333   auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
334   if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
335     return false;
336 
337   Constant *C1, *C2;
338   if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
339       !match(BinOp2->getOperand(1), m_Constant(C2)))
340     return false;
341 
342   // TODO: This assumes a zext cast.
343   // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
344   // to the destination type might lose bits.
345 
346   // Fold the constants together in the destination type:
347   // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
348   const DataLayout &DL = IC.getDataLayout();
349   Type *DestTy = C1->getType();
350   Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
351   if (!CastC2)
352     return false;
353   Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
354   if (!FoldedC)
355     return false;
356 
357   IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
358   IC.replaceOperand(*BinOp1, 1, FoldedC);
359   BinOp1->dropPoisonGeneratingFlags();
360   Cast->dropPoisonGeneratingFlags();
361   return true;
362 }
363 
364 // Simplifies IntToPtr/PtrToInt RoundTrip Cast.
365 // inttoptr ( ptrtoint (x) ) --> x
366 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
367   auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
368   if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
369                       DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
370     auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
371     Type *CastTy = IntToPtr->getDestTy();
372     if (PtrToInt &&
373         CastTy->getPointerAddressSpace() ==
374             PtrToInt->getSrcTy()->getPointerAddressSpace() &&
375         DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
376             DL.getTypeSizeInBits(PtrToInt->getDestTy()))
377       return PtrToInt->getOperand(0);
378   }
379   return nullptr;
380 }
381 
382 /// This performs a few simplifications for operators that are associative or
383 /// commutative:
384 ///
385 ///  Commutative operators:
386 ///
387 ///  1. Order operands such that they are listed from right (least complex) to
388 ///     left (most complex).  This puts constants before unary operators before
389 ///     binary operators.
390 ///
391 ///  Associative operators:
392 ///
393 ///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
394 ///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
395 ///
396 ///  Associative and commutative operators:
397 ///
398 ///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
399 ///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
400 ///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
401 ///     if C1 and C2 are constants.
402 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
403   Instruction::BinaryOps Opcode = I.getOpcode();
404   bool Changed = false;
405 
406   do {
407     // Order operands such that they are listed from right (least complex) to
408     // left (most complex).  This puts constants before unary operators before
409     // binary operators.
410     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
411         getComplexity(I.getOperand(1)))
412       Changed = !I.swapOperands();
413 
414     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
415     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
416 
417     if (I.isAssociative()) {
418       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
419       if (Op0 && Op0->getOpcode() == Opcode) {
420         Value *A = Op0->getOperand(0);
421         Value *B = Op0->getOperand(1);
422         Value *C = I.getOperand(1);
423 
424         // Does "B op C" simplify?
425         if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
426           // It simplifies to V.  Form "A op V".
427           replaceOperand(I, 0, A);
428           replaceOperand(I, 1, V);
429           bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
430           bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
431 
432           // Conservatively clear all optional flags since they may not be
433           // preserved by the reassociation. Reset nsw/nuw based on the above
434           // analysis.
435           ClearSubclassDataAfterReassociation(I);
436 
437           // Note: this is only valid because SimplifyBinOp doesn't look at
438           // the operands to Op0.
439           if (IsNUW)
440             I.setHasNoUnsignedWrap(true);
441 
442           if (IsNSW)
443             I.setHasNoSignedWrap(true);
444 
445           Changed = true;
446           ++NumReassoc;
447           continue;
448         }
449       }
450 
451       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
452       if (Op1 && Op1->getOpcode() == Opcode) {
453         Value *A = I.getOperand(0);
454         Value *B = Op1->getOperand(0);
455         Value *C = Op1->getOperand(1);
456 
457         // Does "A op B" simplify?
458         if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
459           // It simplifies to V.  Form "V op C".
460           replaceOperand(I, 0, V);
461           replaceOperand(I, 1, C);
462           // Conservatively clear the optional flags, since they may not be
463           // preserved by the reassociation.
464           ClearSubclassDataAfterReassociation(I);
465           Changed = true;
466           ++NumReassoc;
467           continue;
468         }
469       }
470     }
471 
472     if (I.isAssociative() && I.isCommutative()) {
473       if (simplifyAssocCastAssoc(&I, *this)) {
474         Changed = true;
475         ++NumReassoc;
476         continue;
477       }
478 
479       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
480       if (Op0 && Op0->getOpcode() == Opcode) {
481         Value *A = Op0->getOperand(0);
482         Value *B = Op0->getOperand(1);
483         Value *C = I.getOperand(1);
484 
485         // Does "C op A" simplify?
486         if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
487           // It simplifies to V.  Form "V op B".
488           replaceOperand(I, 0, V);
489           replaceOperand(I, 1, B);
490           // Conservatively clear the optional flags, since they may not be
491           // preserved by the reassociation.
492           ClearSubclassDataAfterReassociation(I);
493           Changed = true;
494           ++NumReassoc;
495           continue;
496         }
497       }
498 
499       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
500       if (Op1 && Op1->getOpcode() == Opcode) {
501         Value *A = I.getOperand(0);
502         Value *B = Op1->getOperand(0);
503         Value *C = Op1->getOperand(1);
504 
505         // Does "C op A" simplify?
506         if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
507           // It simplifies to V.  Form "B op V".
508           replaceOperand(I, 0, B);
509           replaceOperand(I, 1, V);
510           // Conservatively clear the optional flags, since they may not be
511           // preserved by the reassociation.
512           ClearSubclassDataAfterReassociation(I);
513           Changed = true;
514           ++NumReassoc;
515           continue;
516         }
517       }
518 
519       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
520       // if C1 and C2 are constants.
521       Value *A, *B;
522       Constant *C1, *C2, *CRes;
523       if (Op0 && Op1 &&
524           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
525           match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
526           match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
527           (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
528         bool IsNUW = hasNoUnsignedWrap(I) &&
529            hasNoUnsignedWrap(*Op0) &&
530            hasNoUnsignedWrap(*Op1);
531          BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
532            BinaryOperator::CreateNUW(Opcode, A, B) :
533            BinaryOperator::Create(Opcode, A, B);
534 
535          if (isa<FPMathOperator>(NewBO)) {
536            FastMathFlags Flags = I.getFastMathFlags() &
537                                  Op0->getFastMathFlags() &
538                                  Op1->getFastMathFlags();
539            NewBO->setFastMathFlags(Flags);
540         }
541         InsertNewInstWith(NewBO, I.getIterator());
542         NewBO->takeName(Op1);
543         replaceOperand(I, 0, NewBO);
544         replaceOperand(I, 1, CRes);
545         // Conservatively clear the optional flags, since they may not be
546         // preserved by the reassociation.
547         ClearSubclassDataAfterReassociation(I);
548         if (IsNUW)
549           I.setHasNoUnsignedWrap(true);
550 
551         Changed = true;
552         continue;
553       }
554     }
555 
556     // No further simplifications.
557     return Changed;
558   } while (true);
559 }
560 
561 /// Return whether "X LOp (Y ROp Z)" is always equal to
562 /// "(X LOp Y) ROp (X LOp Z)".
563 static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
564                                      Instruction::BinaryOps ROp) {
565   // X & (Y | Z) <--> (X & Y) | (X & Z)
566   // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
567   if (LOp == Instruction::And)
568     return ROp == Instruction::Or || ROp == Instruction::Xor;
569 
570   // X | (Y & Z) <--> (X | Y) & (X | Z)
571   if (LOp == Instruction::Or)
572     return ROp == Instruction::And;
573 
574   // X * (Y + Z) <--> (X * Y) + (X * Z)
575   // X * (Y - Z) <--> (X * Y) - (X * Z)
576   if (LOp == Instruction::Mul)
577     return ROp == Instruction::Add || ROp == Instruction::Sub;
578 
579   return false;
580 }
581 
582 /// Return whether "(X LOp Y) ROp Z" is always equal to
583 /// "(X ROp Z) LOp (Y ROp Z)".
584 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
585                                      Instruction::BinaryOps ROp) {
586   if (Instruction::isCommutative(ROp))
587     return leftDistributesOverRight(ROp, LOp);
588 
589   // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
590   return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp);
591 
592   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
593   // but this requires knowing that the addition does not overflow and other
594   // such subtleties.
595 }
596 
597 /// This function returns identity value for given opcode, which can be used to
598 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
599 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
600   if (isa<Constant>(V))
601     return nullptr;
602 
603   return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
604 }
605 
606 /// This function predicates factorization using distributive laws. By default,
607 /// it just returns the 'Op' inputs. But for special-cases like
608 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
609 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
610 /// allow more factorization opportunities.
611 static Instruction::BinaryOps
612 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
613                           Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
614   assert(Op && "Expected a binary operator");
615   LHS = Op->getOperand(0);
616   RHS = Op->getOperand(1);
617   if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
618     Constant *C;
619     if (match(Op, m_Shl(m_Value(), m_Constant(C)))) {
620       // X << C --> X * (1 << C)
621       RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C);
622       return Instruction::Mul;
623     }
624     // TODO: We can add other conversions e.g. shr => div etc.
625   }
626   if (Instruction::isBitwiseLogicOp(TopOpcode)) {
627     if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
628         match(Op, m_LShr(m_NonNegative(), m_Value()))) {
629       // lshr nneg C, X --> ashr nneg C, X
630       return Instruction::AShr;
631     }
632   }
633   return Op->getOpcode();
634 }
635 
636 /// This tries to simplify binary operations by factorizing out common terms
637 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
638 static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
639                                InstCombiner::BuilderTy &Builder,
640                                Instruction::BinaryOps InnerOpcode, Value *A,
641                                Value *B, Value *C, Value *D) {
642   assert(A && B && C && D && "All values must be provided");
643 
644   Value *V = nullptr;
645   Value *RetVal = nullptr;
646   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
647   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
648 
649   // Does "X op' Y" always equal "Y op' X"?
650   bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
651 
652   // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
653   if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
654     // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
655     // commutative case, "(A op' B) op (C op' A)"?
656     if (A == C || (InnerCommutative && A == D)) {
657       if (A != C)
658         std::swap(C, D);
659       // Consider forming "A op' (B op D)".
660       // If "B op D" simplifies then it can be formed with no cost.
661       V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
662 
663       // If "B op D" doesn't simplify then only go on if one of the existing
664       // operations "A op' B" and "C op' D" will be zapped as no longer used.
665       if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
666         V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
667       if (V)
668         RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
669     }
670   }
671 
672   // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
673   if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
674     // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
675     // commutative case, "(A op' B) op (B op' D)"?
676     if (B == D || (InnerCommutative && B == C)) {
677       if (B != D)
678         std::swap(C, D);
679       // Consider forming "(A op C) op' B".
680       // If "A op C" simplifies then it can be formed with no cost.
681       V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
682 
683       // If "A op C" doesn't simplify then only go on if one of the existing
684       // operations "A op' B" and "C op' D" will be zapped as no longer used.
685       if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
686         V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
687       if (V)
688         RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
689     }
690   }
691 
692   if (!RetVal)
693     return nullptr;
694 
695   ++NumFactor;
696   RetVal->takeName(&I);
697 
698   // Try to add no-overflow flags to the final value.
699   if (isa<OverflowingBinaryOperator>(RetVal)) {
700     bool HasNSW = false;
701     bool HasNUW = false;
702     if (isa<OverflowingBinaryOperator>(&I)) {
703       HasNSW = I.hasNoSignedWrap();
704       HasNUW = I.hasNoUnsignedWrap();
705     }
706     if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
707       HasNSW &= LOBO->hasNoSignedWrap();
708       HasNUW &= LOBO->hasNoUnsignedWrap();
709     }
710 
711     if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
712       HasNSW &= ROBO->hasNoSignedWrap();
713       HasNUW &= ROBO->hasNoUnsignedWrap();
714     }
715 
716     if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
717       // We can propagate 'nsw' if we know that
718       //  %Y = mul nsw i16 %X, C
719       //  %Z = add nsw i16 %Y, %X
720       // =>
721       //  %Z = mul nsw i16 %X, C+1
722       //
723       // iff C+1 isn't INT_MIN
724       const APInt *CInt;
725       if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
726         cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
727 
728       // nuw can be propagated with any constant or nuw value.
729       cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
730     }
731   }
732   return RetVal;
733 }
734 
735 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
736 //   IFF
737 //    1) the logic_shifts match
738 //    2) either both binops are binops and one is `and` or
739 //       BinOp1 is `and`
740 //       (logic_shift (inv_logic_shift C1, C), C) == C1 or
741 //
742 //    -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
743 //
744 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
745 //   IFF
746 //    1) the logic_shifts match
747 //    2) BinOp1 == BinOp2 (if BinOp ==  `add`, then also requires `shl`).
748 //
749 //    -> (BinOp (logic_shift (BinOp X, Y)), Mask)
750 //
751 // (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
752 //   IFF
753 //   1) Binop1 is bitwise logical operator `and`, `or` or `xor`
754 //   2) Binop2 is `not`
755 //
756 //   -> (arithmetic_shift Binop1((not X), Y), Amt)
757 
758 Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
759   const DataLayout &DL = I.getModule()->getDataLayout();
760   auto IsValidBinOpc = [](unsigned Opc) {
761     switch (Opc) {
762     default:
763       return false;
764     case Instruction::And:
765     case Instruction::Or:
766     case Instruction::Xor:
767     case Instruction::Add:
768       // Skip Sub as we only match constant masks which will canonicalize to use
769       // add.
770       return true;
771     }
772   };
773 
774   // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
775   // constraints.
776   auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
777                                       unsigned ShOpc) {
778     assert(ShOpc != Instruction::AShr);
779     return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
780            ShOpc == Instruction::Shl;
781   };
782 
783   auto GetInvShift = [](unsigned ShOpc) {
784     assert(ShOpc != Instruction::AShr);
785     return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
786   };
787 
788   auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
789                                  unsigned ShOpc, Constant *CMask,
790                                  Constant *CShift) {
791     // If the BinOp1 is `and` we don't need to check the mask.
792     if (BinOpc1 == Instruction::And)
793       return true;
794 
795     // For all other possible transfers we need complete distributable
796     // binop/shift (anything but `add` + `lshr`).
797     if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
798       return false;
799 
800     // If BinOp2 is `and`, any mask works (this only really helps for non-splat
801     // vecs, otherwise the mask will be simplified and the following check will
802     // handle it).
803     if (BinOpc2 == Instruction::And)
804       return true;
805 
806     // Otherwise, need mask that meets the below requirement.
807     // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
808     Constant *MaskInvShift =
809         ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
810     return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
811            CMask;
812   };
813 
814   auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
815     Constant *CMask, *CShift;
816     Value *X, *Y, *ShiftedX, *Mask, *Shift;
817     if (!match(I.getOperand(ShOpnum),
818                m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
819       return nullptr;
820     if (!match(I.getOperand(1 - ShOpnum),
821                m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
822       return nullptr;
823 
824     if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift)))))
825       return nullptr;
826 
827     // Make sure we are matching instruction shifts and not ConstantExpr
828     auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
829     auto *IX = dyn_cast<Instruction>(ShiftedX);
830     if (!IY || !IX)
831       return nullptr;
832 
833     // LHS and RHS need same shift opcode
834     unsigned ShOpc = IY->getOpcode();
835     if (ShOpc != IX->getOpcode())
836       return nullptr;
837 
838     // Make sure binop is real instruction and not ConstantExpr
839     auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
840     if (!BO2)
841       return nullptr;
842 
843     unsigned BinOpc = BO2->getOpcode();
844     // Make sure we have valid binops.
845     if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
846       return nullptr;
847 
848     if (ShOpc == Instruction::AShr) {
849       if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
850           BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
851         Value *NotX = Builder.CreateNot(X);
852         Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
853         return BinaryOperator::Create(
854             static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
855       }
856 
857       return nullptr;
858     }
859 
860     // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
861     // distribute to drop the shift irrelevant of constants.
862     if (BinOpc == I.getOpcode() &&
863         IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
864       Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
865       Value *NewBinOp1 = Builder.CreateBinOp(
866           static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
867       return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
868     }
869 
870     // Otherwise we can only distribute by constant shifting the mask, so
871     // ensure we have constants.
872     if (!match(Shift, m_ImmConstant(CShift)))
873       return nullptr;
874     if (!match(Mask, m_ImmConstant(CMask)))
875       return nullptr;
876 
877     // Check if we can distribute the binops.
878     if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
879       return nullptr;
880 
881     Constant *NewCMask =
882         ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
883     Value *NewBinOp2 = Builder.CreateBinOp(
884         static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
885     Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
886     return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
887                                   NewBinOp1, CShift);
888   };
889 
890   if (Instruction *R = MatchBinOp(0))
891     return R;
892   return MatchBinOp(1);
893 }
894 
895 // (Binop (zext C), (select C, T, F))
896 //    -> (select C, (binop 1, T), (binop 0, F))
897 //
898 // (Binop (sext C), (select C, T, F))
899 //    -> (select C, (binop -1, T), (binop 0, F))
900 //
901 // Attempt to simplify binary operations into a select with folded args, when
902 // one operand of the binop is a select instruction and the other operand is a
903 // zext/sext extension, whose value is the select condition.
904 Instruction *
905 InstCombinerImpl::foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I) {
906   // TODO: this simplification may be extended to any speculatable instruction,
907   // not just binops, and would possibly be handled better in FoldOpIntoSelect.
908   Instruction::BinaryOps Opc = I.getOpcode();
909   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
910   Value *A, *CondVal, *TrueVal, *FalseVal;
911   Value *CastOp;
912 
913   auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
914     return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
915            A->getType()->getScalarSizeInBits() == 1 &&
916            match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
917                                     m_Value(FalseVal)));
918   };
919 
920   // Make sure one side of the binop is a select instruction, and the other is a
921   // zero/sign extension operating on a i1.
922   if (MatchSelectAndCast(LHS, RHS))
923     CastOp = LHS;
924   else if (MatchSelectAndCast(RHS, LHS))
925     CastOp = RHS;
926   else
927     return nullptr;
928 
929   auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
930     bool IsCastOpRHS = (CastOp == RHS);
931     bool IsZExt = isa<ZExtInst>(CastOp);
932     Constant *C;
933 
934     if (IsTrueArm) {
935       C = Constant::getNullValue(V->getType());
936     } else if (IsZExt) {
937       unsigned BitWidth = V->getType()->getScalarSizeInBits();
938       C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
939     } else {
940       C = Constant::getAllOnesValue(V->getType());
941     }
942 
943     return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
944                        : Builder.CreateBinOp(Opc, C, V);
945   };
946 
947   // If the value used in the zext/sext is the select condition, or the negated
948   // of the select condition, the binop can be simplified.
949   if (CondVal == A) {
950     Value *NewTrueVal = NewFoldedConst(false, TrueVal);
951     return SelectInst::Create(CondVal, NewTrueVal,
952                               NewFoldedConst(true, FalseVal));
953   }
954 
955   if (match(A, m_Not(m_Specific(CondVal)))) {
956     Value *NewTrueVal = NewFoldedConst(true, TrueVal);
957     return SelectInst::Create(CondVal, NewTrueVal,
958                               NewFoldedConst(false, FalseVal));
959   }
960 
961   return nullptr;
962 }
963 
964 Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) {
965   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
966   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
967   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
968   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
969   Value *A, *B, *C, *D;
970   Instruction::BinaryOps LHSOpcode, RHSOpcode;
971 
972   if (Op0)
973     LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
974   if (Op1)
975     RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
976 
977   // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
978   // a common term.
979   if (Op0 && Op1 && LHSOpcode == RHSOpcode)
980     if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
981       return V;
982 
983   // The instruction has the form "(A op' B) op (C)".  Try to factorize common
984   // term.
985   if (Op0)
986     if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
987       if (Value *V =
988               tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
989         return V;
990 
991   // The instruction has the form "(B) op (C op' D)".  Try to factorize common
992   // term.
993   if (Op1)
994     if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
995       if (Value *V =
996               tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
997         return V;
998 
999   return nullptr;
1000 }
1001 
1002 /// This tries to simplify binary operations which some other binary operation
1003 /// distributes over either by factorizing out common terms
1004 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1005 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1006 /// Returns the simplified value, or null if it didn't simplify.
1007 Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) {
1008   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1009   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1010   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1011   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1012 
1013   // Factorization.
1014   if (Value *R = tryFactorizationFolds(I))
1015     return R;
1016 
1017   // Expansion.
1018   if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1019     // The instruction has the form "(A op' B) op C".  See if expanding it out
1020     // to "(A op C) op' (B op C)" results in simplifications.
1021     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1022     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1023 
1024     // Disable the use of undef because it's not safe to distribute undef.
1025     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1026     Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1027     Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1028 
1029     // Do "A op C" and "B op C" both simplify?
1030     if (L && R) {
1031       // They do! Return "L op' R".
1032       ++NumExpand;
1033       C = Builder.CreateBinOp(InnerOpcode, L, R);
1034       C->takeName(&I);
1035       return C;
1036     }
1037 
1038     // Does "A op C" simplify to the identity value for the inner opcode?
1039     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1040       // They do! Return "B op C".
1041       ++NumExpand;
1042       C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1043       C->takeName(&I);
1044       return C;
1045     }
1046 
1047     // Does "B op C" simplify to the identity value for the inner opcode?
1048     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1049       // They do! Return "A op C".
1050       ++NumExpand;
1051       C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1052       C->takeName(&I);
1053       return C;
1054     }
1055   }
1056 
1057   if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1058     // The instruction has the form "A op (B op' C)".  See if expanding it out
1059     // to "(A op B) op' (A op C)" results in simplifications.
1060     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1061     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1062 
1063     // Disable the use of undef because it's not safe to distribute undef.
1064     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1065     Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1066     Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1067 
1068     // Do "A op B" and "A op C" both simplify?
1069     if (L && R) {
1070       // They do! Return "L op' R".
1071       ++NumExpand;
1072       A = Builder.CreateBinOp(InnerOpcode, L, R);
1073       A->takeName(&I);
1074       return A;
1075     }
1076 
1077     // Does "A op B" simplify to the identity value for the inner opcode?
1078     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1079       // They do! Return "A op C".
1080       ++NumExpand;
1081       A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1082       A->takeName(&I);
1083       return A;
1084     }
1085 
1086     // Does "A op C" simplify to the identity value for the inner opcode?
1087     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1088       // They do! Return "A op B".
1089       ++NumExpand;
1090       A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1091       A->takeName(&I);
1092       return A;
1093     }
1094   }
1095 
1096   return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1097 }
1098 
1099 std::optional<std::pair<Value *, Value *>>
1100 InstCombinerImpl::matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS) {
1101   if (LHS->getParent() != RHS->getParent())
1102     return std::nullopt;
1103 
1104   if (LHS->getNumIncomingValues() < 2)
1105     return std::nullopt;
1106 
1107   if (!equal(LHS->blocks(), RHS->blocks()))
1108     return std::nullopt;
1109 
1110   Value *L0 = LHS->getIncomingValue(0);
1111   Value *R0 = RHS->getIncomingValue(0);
1112 
1113   for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1114     Value *L1 = LHS->getIncomingValue(I);
1115     Value *R1 = RHS->getIncomingValue(I);
1116 
1117     if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1118       continue;
1119 
1120     return std::nullopt;
1121   }
1122 
1123   return std::optional(std::pair(L0, R0));
1124 }
1125 
1126 Value *InstCombinerImpl::SimplifyPhiCommutativeBinaryOp(BinaryOperator &I,
1127                                                         Value *Op0,
1128                                                         Value *Op1) {
1129   assert(I.isCommutative() && "Instruction should be commutative");
1130 
1131   PHINode *LHS = dyn_cast<PHINode>(Op0);
1132   PHINode *RHS = dyn_cast<PHINode>(Op1);
1133 
1134   if (!LHS || !RHS)
1135     return nullptr;
1136 
1137   if (auto P = matchSymmetricPhiNodesPair(LHS, RHS)) {
1138     Value *BI = Builder.CreateBinOp(I.getOpcode(), P->first, P->second);
1139     if (auto *BO = dyn_cast<BinaryOperator>(BI))
1140       BO->copyIRFlags(&I);
1141     return BI;
1142   }
1143 
1144   return nullptr;
1145 }
1146 
1147 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
1148                                                         Value *LHS,
1149                                                         Value *RHS) {
1150   Value *A, *B, *C, *D, *E, *F;
1151   bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1152   bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1153   if (!LHSIsSelect && !RHSIsSelect)
1154     return nullptr;
1155 
1156   FastMathFlags FMF;
1157   BuilderTy::FastMathFlagGuard Guard(Builder);
1158   if (isa<FPMathOperator>(&I)) {
1159     FMF = I.getFastMathFlags();
1160     Builder.setFastMathFlags(FMF);
1161   }
1162 
1163   Instruction::BinaryOps Opcode = I.getOpcode();
1164   SimplifyQuery Q = SQ.getWithInstruction(&I);
1165 
1166   Value *Cond, *True = nullptr, *False = nullptr;
1167 
1168   // Special-case for add/negate combination. Replace the zero in the negation
1169   // with the trailing add operand:
1170   // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1171   // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1172   auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1173     // We need an 'add' and exactly 1 arm of the select to have been simplified.
1174     if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1175       return nullptr;
1176 
1177     Value *N;
1178     if (True && match(FVal, m_Neg(m_Value(N)))) {
1179       Value *Sub = Builder.CreateSub(Z, N);
1180       return Builder.CreateSelect(Cond, True, Sub, I.getName());
1181     }
1182     if (False && match(TVal, m_Neg(m_Value(N)))) {
1183       Value *Sub = Builder.CreateSub(Z, N);
1184       return Builder.CreateSelect(Cond, Sub, False, I.getName());
1185     }
1186     return nullptr;
1187   };
1188 
1189   if (LHSIsSelect && RHSIsSelect && A == D) {
1190     // op(select(%v, %x, %y), select(%v, %y, %x)) --> op(%x, %y)
1191     if (I.isCommutative() && B == F && C == E) {
1192       Value *BI = Builder.CreateBinOp(I.getOpcode(), B, E);
1193       if (auto *BO = dyn_cast<BinaryOperator>(BI))
1194         BO->copyIRFlags(&I);
1195       return BI;
1196     }
1197 
1198     // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1199     Cond = A;
1200     True = simplifyBinOp(Opcode, B, E, FMF, Q);
1201     False = simplifyBinOp(Opcode, C, F, FMF, Q);
1202 
1203     if (LHS->hasOneUse() && RHS->hasOneUse()) {
1204       if (False && !True)
1205         True = Builder.CreateBinOp(Opcode, B, E);
1206       else if (True && !False)
1207         False = Builder.CreateBinOp(Opcode, C, F);
1208     }
1209   } else if (LHSIsSelect && LHS->hasOneUse()) {
1210     // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1211     Cond = A;
1212     True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1213     False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1214     if (Value *NewSel = foldAddNegate(B, C, RHS))
1215       return NewSel;
1216   } else if (RHSIsSelect && RHS->hasOneUse()) {
1217     // X op (D ? E : F) -> D ? (X op E) : (X op F)
1218     Cond = D;
1219     True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1220     False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1221     if (Value *NewSel = foldAddNegate(E, F, LHS))
1222       return NewSel;
1223   }
1224 
1225   if (!True || !False)
1226     return nullptr;
1227 
1228   Value *SI = Builder.CreateSelect(Cond, True, False);
1229   SI->takeName(&I);
1230   return SI;
1231 }
1232 
1233 /// Freely adapt every user of V as-if V was changed to !V.
1234 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1235 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) {
1236   assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1237   for (User *U : make_early_inc_range(I->users())) {
1238     if (U == IgnoredUser)
1239       continue; // Don't consider this user.
1240     switch (cast<Instruction>(U)->getOpcode()) {
1241     case Instruction::Select: {
1242       auto *SI = cast<SelectInst>(U);
1243       SI->swapValues();
1244       SI->swapProfMetadata();
1245       break;
1246     }
1247     case Instruction::Br:
1248       cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too
1249       break;
1250     case Instruction::Xor:
1251       replaceInstUsesWith(cast<Instruction>(*U), I);
1252       // Add to worklist for DCE.
1253       addToWorklist(cast<Instruction>(U));
1254       break;
1255     default:
1256       llvm_unreachable("Got unexpected user - out of sync with "
1257                        "canFreelyInvertAllUsersOf() ?");
1258     }
1259   }
1260 }
1261 
1262 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1263 /// constant zero (which is the 'negate' form).
1264 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1265   Value *NegV;
1266   if (match(V, m_Neg(m_Value(NegV))))
1267     return NegV;
1268 
1269   // Constants can be considered to be negated values if they can be folded.
1270   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1271     return ConstantExpr::getNeg(C);
1272 
1273   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1274     if (C->getType()->getElementType()->isIntegerTy())
1275       return ConstantExpr::getNeg(C);
1276 
1277   if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1278     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1279       Constant *Elt = CV->getAggregateElement(i);
1280       if (!Elt)
1281         return nullptr;
1282 
1283       if (isa<UndefValue>(Elt))
1284         continue;
1285 
1286       if (!isa<ConstantInt>(Elt))
1287         return nullptr;
1288     }
1289     return ConstantExpr::getNeg(CV);
1290   }
1291 
1292   // Negate integer vector splats.
1293   if (auto *CV = dyn_cast<Constant>(V))
1294     if (CV->getType()->isVectorTy() &&
1295         CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1296       return ConstantExpr::getNeg(CV);
1297 
1298   return nullptr;
1299 }
1300 
1301 /// A binop with a constant operand and a sign-extended boolean operand may be
1302 /// converted into a select of constants by applying the binary operation to
1303 /// the constant with the two possible values of the extended boolean (0 or -1).
1304 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1305   // TODO: Handle non-commutative binop (constant is operand 0).
1306   // TODO: Handle zext.
1307   // TODO: Peek through 'not' of cast.
1308   Value *BO0 = BO.getOperand(0);
1309   Value *BO1 = BO.getOperand(1);
1310   Value *X;
1311   Constant *C;
1312   if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1313       !X->getType()->isIntOrIntVectorTy(1))
1314     return nullptr;
1315 
1316   // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1317   Constant *Ones = ConstantInt::getAllOnesValue(BO.getType());
1318   Constant *Zero = ConstantInt::getNullValue(BO.getType());
1319   Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1320   Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1321   return SelectInst::Create(X, TVal, FVal);
1322 }
1323 
1324 static Constant *constantFoldOperationIntoSelectOperand(Instruction &I,
1325                                                         SelectInst *SI,
1326                                                         bool IsTrueArm) {
1327   SmallVector<Constant *> ConstOps;
1328   for (Value *Op : I.operands()) {
1329     CmpInst::Predicate Pred;
1330     Constant *C = nullptr;
1331     if (Op == SI) {
1332       C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1333                                        : SI->getFalseValue());
1334     } else if (match(SI->getCondition(),
1335                      m_ICmp(Pred, m_Specific(Op), m_Constant(C))) &&
1336                Pred == (IsTrueArm ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
1337                isGuaranteedNotToBeUndefOrPoison(C)) {
1338       // Pass
1339     } else {
1340       C = dyn_cast<Constant>(Op);
1341     }
1342     if (C == nullptr)
1343       return nullptr;
1344 
1345     ConstOps.push_back(C);
1346   }
1347 
1348   return ConstantFoldInstOperands(&I, ConstOps, I.getModule()->getDataLayout());
1349 }
1350 
1351 static Value *foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI,
1352                                              Value *NewOp, InstCombiner &IC) {
1353   Instruction *Clone = I.clone();
1354   Clone->replaceUsesOfWith(SI, NewOp);
1355   IC.InsertNewInstBefore(Clone, SI->getIterator());
1356   return Clone;
1357 }
1358 
1359 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
1360                                                 bool FoldWithMultiUse) {
1361   // Don't modify shared select instructions unless set FoldWithMultiUse
1362   if (!SI->hasOneUse() && !FoldWithMultiUse)
1363     return nullptr;
1364 
1365   Value *TV = SI->getTrueValue();
1366   Value *FV = SI->getFalseValue();
1367   if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1368     return nullptr;
1369 
1370   // Bool selects with constant operands can be folded to logical ops.
1371   if (SI->getType()->isIntOrIntVectorTy(1))
1372     return nullptr;
1373 
1374   // If it's a bitcast involving vectors, make sure it has the same number of
1375   // elements on both sides.
1376   if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
1377     VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1378     VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1379 
1380     // Verify that either both or neither are vectors.
1381     if ((SrcTy == nullptr) != (DestTy == nullptr))
1382       return nullptr;
1383 
1384     // If vectors, verify that they have the same number of elements.
1385     if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount())
1386       return nullptr;
1387   }
1388 
1389   // Test if a FCmpInst instruction is used exclusively by a select as
1390   // part of a minimum or maximum operation. If so, refrain from doing
1391   // any other folding. This helps out other analyses which understand
1392   // non-obfuscated minimum and maximum idioms. And in this case, at
1393   // least one of the comparison operands has at least one user besides
1394   // the compare (the select), which would often largely negate the
1395   // benefit of folding anyway.
1396   if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1397     if (CI->hasOneUse()) {
1398       Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1399       if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1400         return nullptr;
1401     }
1402   }
1403 
1404   // Make sure that one of the select arms constant folds successfully.
1405   Value *NewTV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ true);
1406   Value *NewFV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ false);
1407   if (!NewTV && !NewFV)
1408     return nullptr;
1409 
1410   // Create an instruction for the arm that did not fold.
1411   if (!NewTV)
1412     NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1413   if (!NewFV)
1414     NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1415   return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1416 }
1417 
1418 static Value *simplifyInstructionWithPHI(Instruction &I, PHINode *PN,
1419                                          Value *InValue, BasicBlock *InBB,
1420                                          const DataLayout &DL,
1421                                          const SimplifyQuery SQ) {
1422   // NB: It is a precondition of this transform that the operands be
1423   // phi translatable! This is usually trivially satisfied by limiting it
1424   // to constant ops, and for selects we do a more sophisticated check.
1425   SmallVector<Value *> Ops;
1426   for (Value *Op : I.operands()) {
1427     if (Op == PN)
1428       Ops.push_back(InValue);
1429     else
1430       Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1431   }
1432 
1433   // Don't consider the simplification successful if we get back a constant
1434   // expression. That's just an instruction in hiding.
1435   // Also reject the case where we simplify back to the phi node. We wouldn't
1436   // be able to remove it in that case.
1437   Value *NewVal = simplifyInstructionWithOperands(
1438       &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1439   if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1440     return NewVal;
1441 
1442   // Check if incoming PHI value can be replaced with constant
1443   // based on implied condition.
1444   BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1445   const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1446   if (TerminatorBI && TerminatorBI->isConditional() &&
1447       TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1448     bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1449     std::optional<bool> ImpliedCond =
1450         isImpliedCondition(TerminatorBI->getCondition(), ICmp->getPredicate(),
1451                            Ops[0], Ops[1], DL, LHSIsTrue);
1452     if (ImpliedCond)
1453       return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1454   }
1455 
1456   return nullptr;
1457 }
1458 
1459 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
1460   unsigned NumPHIValues = PN->getNumIncomingValues();
1461   if (NumPHIValues == 0)
1462     return nullptr;
1463 
1464   // We normally only transform phis with a single use.  However, if a PHI has
1465   // multiple uses and they are all the same operation, we can fold *all* of the
1466   // uses into the PHI.
1467   if (!PN->hasOneUse()) {
1468     // Walk the use list for the instruction, comparing them to I.
1469     for (User *U : PN->users()) {
1470       Instruction *UI = cast<Instruction>(U);
1471       if (UI != &I && !I.isIdenticalTo(UI))
1472         return nullptr;
1473     }
1474     // Otherwise, we can replace *all* users with the new PHI we form.
1475   }
1476 
1477   // Check to see whether the instruction can be folded into each phi operand.
1478   // If there is one operand that does not fold, remember the BB it is in.
1479   // If there is more than one or if *it* is a PHI, bail out.
1480   SmallVector<Value *> NewPhiValues;
1481   BasicBlock *NonSimplifiedBB = nullptr;
1482   Value *NonSimplifiedInVal = nullptr;
1483   for (unsigned i = 0; i != NumPHIValues; ++i) {
1484     Value *InVal = PN->getIncomingValue(i);
1485     BasicBlock *InBB = PN->getIncomingBlock(i);
1486 
1487     if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1488       NewPhiValues.push_back(NewVal);
1489       continue;
1490     }
1491 
1492     if (NonSimplifiedBB) return nullptr;  // More than one non-simplified value.
1493 
1494     NonSimplifiedBB = InBB;
1495     NonSimplifiedInVal = InVal;
1496     NewPhiValues.push_back(nullptr);
1497 
1498     // If the InVal is an invoke at the end of the pred block, then we can't
1499     // insert a computation after it without breaking the edge.
1500     if (isa<InvokeInst>(InVal))
1501       if (cast<Instruction>(InVal)->getParent() == NonSimplifiedBB)
1502         return nullptr;
1503 
1504     // If the incoming non-constant value is reachable from the phis block,
1505     // we'll push the operation across a loop backedge. This could result in
1506     // an infinite combine loop, and is generally non-profitable (especially
1507     // if the operation was originally outside the loop).
1508     if (isPotentiallyReachable(PN->getParent(), NonSimplifiedBB, nullptr, &DT,
1509                                LI))
1510       return nullptr;
1511   }
1512 
1513   // If there is exactly one non-simplified value, we can insert a copy of the
1514   // operation in that block.  However, if this is a critical edge, we would be
1515   // inserting the computation on some other paths (e.g. inside a loop).  Only
1516   // do this if the pred block is unconditionally branching into the phi block.
1517   // Also, make sure that the pred block is not dead code.
1518   if (NonSimplifiedBB != nullptr) {
1519     BranchInst *BI = dyn_cast<BranchInst>(NonSimplifiedBB->getTerminator());
1520     if (!BI || !BI->isUnconditional() ||
1521         !DT.isReachableFromEntry(NonSimplifiedBB))
1522       return nullptr;
1523   }
1524 
1525   // Okay, we can do the transformation: create the new PHI node.
1526   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1527   InsertNewInstBefore(NewPN, PN->getIterator());
1528   NewPN->takeName(PN);
1529   NewPN->setDebugLoc(PN->getDebugLoc());
1530 
1531   // If we are going to have to insert a new computation, do so right before the
1532   // predecessor's terminator.
1533   Instruction *Clone = nullptr;
1534   if (NonSimplifiedBB) {
1535     Clone = I.clone();
1536     for (Use &U : Clone->operands()) {
1537       if (U == PN)
1538         U = NonSimplifiedInVal;
1539       else
1540         U = U->DoPHITranslation(PN->getParent(), NonSimplifiedBB);
1541     }
1542     InsertNewInstBefore(Clone, NonSimplifiedBB->getTerminator()->getIterator());
1543   }
1544 
1545   for (unsigned i = 0; i != NumPHIValues; ++i) {
1546     if (NewPhiValues[i])
1547       NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1548     else
1549       NewPN->addIncoming(Clone, PN->getIncomingBlock(i));
1550   }
1551 
1552   for (User *U : make_early_inc_range(PN->users())) {
1553     Instruction *User = cast<Instruction>(U);
1554     if (User == &I) continue;
1555     replaceInstUsesWith(*User, NewPN);
1556     eraseInstFromFunction(*User);
1557   }
1558 
1559   replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1560                         const_cast<PHINode &>(*NewPN),
1561                         const_cast<PHINode &>(*PN), DT);
1562   return replaceInstUsesWith(I, NewPN);
1563 }
1564 
1565 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
1566   // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1567   //       we are guarding against replicating the binop in >1 predecessor.
1568   //       This could miss matching a phi with 2 constant incoming values.
1569   auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1570   auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1571   if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1572       Phi0->getNumOperands() != Phi1->getNumOperands())
1573     return nullptr;
1574 
1575   // TODO: Remove the restriction for binop being in the same block as the phis.
1576   if (BO.getParent() != Phi0->getParent() ||
1577       BO.getParent() != Phi1->getParent())
1578     return nullptr;
1579 
1580   if (BO.isCommutative()) {
1581     if (Value *V = SimplifyPhiCommutativeBinaryOp(BO, Phi0, Phi1))
1582       return replaceInstUsesWith(BO, V);
1583   }
1584 
1585   // Fold if there is at least one specific constant value in phi0 or phi1's
1586   // incoming values that comes from the same block and this specific constant
1587   // value can be used to do optimization for specific binary operator.
1588   // For example:
1589   // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1590   // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1591   // %add = add i32 %phi0, %phi1
1592   // ==>
1593   // %add = phi i32 [%j, %bb0], [%i, %bb1]
1594   Constant *C = ConstantExpr::getBinOpIdentity(BO.getOpcode(), BO.getType(),
1595                                                /*AllowRHSConstant*/ false);
1596   if (C) {
1597     SmallVector<Value *, 4> NewIncomingValues;
1598     auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1599       auto &Phi0Use = std::get<0>(T);
1600       auto &Phi1Use = std::get<1>(T);
1601       if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1602         return false;
1603       Value *Phi0UseV = Phi0Use.get();
1604       Value *Phi1UseV = Phi1Use.get();
1605       if (Phi0UseV == C)
1606         NewIncomingValues.push_back(Phi1UseV);
1607       else if (Phi1UseV == C)
1608         NewIncomingValues.push_back(Phi0UseV);
1609       else
1610         return false;
1611       return true;
1612     };
1613 
1614     if (all_of(zip(Phi0->operands(), Phi1->operands()),
1615                CanFoldIncomingValuePair)) {
1616       PHINode *NewPhi =
1617           PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1618       assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1619              "The number of collected incoming values should equal the number "
1620              "of the original PHINode operands!");
1621       for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1622         NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1623       return NewPhi;
1624     }
1625   }
1626 
1627   if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1628     return nullptr;
1629 
1630   // Match a pair of incoming constants for one of the predecessor blocks.
1631   BasicBlock *ConstBB, *OtherBB;
1632   Constant *C0, *C1;
1633   if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
1634     ConstBB = Phi0->getIncomingBlock(0);
1635     OtherBB = Phi0->getIncomingBlock(1);
1636   } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
1637     ConstBB = Phi0->getIncomingBlock(1);
1638     OtherBB = Phi0->getIncomingBlock(0);
1639   } else {
1640     return nullptr;
1641   }
1642   if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
1643     return nullptr;
1644 
1645   // The block that we are hoisting to must reach here unconditionally.
1646   // Otherwise, we could be speculatively executing an expensive or
1647   // non-speculative op.
1648   auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
1649   if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1650       !DT.isReachableFromEntry(OtherBB))
1651     return nullptr;
1652 
1653   // TODO: This check could be tightened to only apply to binops (div/rem) that
1654   //       are not safe to speculatively execute. But that could allow hoisting
1655   //       potentially expensive instructions (fdiv for example).
1656   for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
1657     if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
1658       return nullptr;
1659 
1660   // Fold constants for the predecessor block with constant incoming values.
1661   Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
1662   if (!NewC)
1663     return nullptr;
1664 
1665   // Make a new binop in the predecessor block with the non-constant incoming
1666   // values.
1667   Builder.SetInsertPoint(PredBlockBranch);
1668   Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
1669                                      Phi0->getIncomingValueForBlock(OtherBB),
1670                                      Phi1->getIncomingValueForBlock(OtherBB));
1671   if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1672     NotFoldedNewBO->copyIRFlags(&BO);
1673 
1674   // Replace the binop with a phi of the new values. The old phis are dead.
1675   PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
1676   NewPhi->addIncoming(NewBO, OtherBB);
1677   NewPhi->addIncoming(NewC, ConstBB);
1678   return NewPhi;
1679 }
1680 
1681 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
1682   if (!isa<Constant>(I.getOperand(1)))
1683     return nullptr;
1684 
1685   if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
1686     if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
1687       return NewSel;
1688   } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
1689     if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
1690       return NewPhi;
1691   }
1692   return nullptr;
1693 }
1694 
1695 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
1696   // If this GEP has only 0 indices, it is the same pointer as
1697   // Src. If Src is not a trivial GEP too, don't combine
1698   // the indices.
1699   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1700       !Src.hasOneUse())
1701     return false;
1702   return true;
1703 }
1704 
1705 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
1706   if (!isa<VectorType>(Inst.getType()))
1707     return nullptr;
1708 
1709   BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
1710   Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1711   assert(cast<VectorType>(LHS->getType())->getElementCount() ==
1712          cast<VectorType>(Inst.getType())->getElementCount());
1713   assert(cast<VectorType>(RHS->getType())->getElementCount() ==
1714          cast<VectorType>(Inst.getType())->getElementCount());
1715 
1716   // If both operands of the binop are vector concatenations, then perform the
1717   // narrow binop on each pair of the source operands followed by concatenation
1718   // of the results.
1719   Value *L0, *L1, *R0, *R1;
1720   ArrayRef<int> Mask;
1721   if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
1722       match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
1723       LHS->hasOneUse() && RHS->hasOneUse() &&
1724       cast<ShuffleVectorInst>(LHS)->isConcat() &&
1725       cast<ShuffleVectorInst>(RHS)->isConcat()) {
1726     // This transform does not have the speculative execution constraint as
1727     // below because the shuffle is a concatenation. The new binops are
1728     // operating on exactly the same elements as the existing binop.
1729     // TODO: We could ease the mask requirement to allow different undef lanes,
1730     //       but that requires an analysis of the binop-with-undef output value.
1731     Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
1732     if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1733       BO->copyIRFlags(&Inst);
1734     Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
1735     if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1736       BO->copyIRFlags(&Inst);
1737     return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
1738   }
1739 
1740   auto createBinOpReverse = [&](Value *X, Value *Y) {
1741     Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
1742     if (auto *BO = dyn_cast<BinaryOperator>(V))
1743       BO->copyIRFlags(&Inst);
1744     Module *M = Inst.getModule();
1745     Function *F = Intrinsic::getDeclaration(
1746         M, Intrinsic::experimental_vector_reverse, V->getType());
1747     return CallInst::Create(F, V);
1748   };
1749 
1750   // NOTE: Reverse shuffles don't require the speculative execution protection
1751   // below because they don't affect which lanes take part in the computation.
1752 
1753   Value *V1, *V2;
1754   if (match(LHS, m_VecReverse(m_Value(V1)))) {
1755     // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
1756     if (match(RHS, m_VecReverse(m_Value(V2))) &&
1757         (LHS->hasOneUse() || RHS->hasOneUse() ||
1758          (LHS == RHS && LHS->hasNUses(2))))
1759       return createBinOpReverse(V1, V2);
1760 
1761     // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
1762     if (LHS->hasOneUse() && isSplatValue(RHS))
1763       return createBinOpReverse(V1, RHS);
1764   }
1765   // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
1766   else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
1767     return createBinOpReverse(LHS, V2);
1768 
1769   // It may not be safe to reorder shuffles and things like div, urem, etc.
1770   // because we may trap when executing those ops on unknown vector elements.
1771   // See PR20059.
1772   if (!isSafeToSpeculativelyExecute(&Inst))
1773     return nullptr;
1774 
1775   auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
1776     Value *XY = Builder.CreateBinOp(Opcode, X, Y);
1777     if (auto *BO = dyn_cast<BinaryOperator>(XY))
1778       BO->copyIRFlags(&Inst);
1779     return new ShuffleVectorInst(XY, M);
1780   };
1781 
1782   // If both arguments of the binary operation are shuffles that use the same
1783   // mask and shuffle within a single vector, move the shuffle after the binop.
1784   if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
1785       match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
1786       V1->getType() == V2->getType() &&
1787       (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
1788     // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
1789     return createBinOpShuffle(V1, V2, Mask);
1790   }
1791 
1792   // If both arguments of a commutative binop are select-shuffles that use the
1793   // same mask with commuted operands, the shuffles are unnecessary.
1794   if (Inst.isCommutative() &&
1795       match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
1796       match(RHS,
1797             m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
1798     auto *LShuf = cast<ShuffleVectorInst>(LHS);
1799     auto *RShuf = cast<ShuffleVectorInst>(RHS);
1800     // TODO: Allow shuffles that contain undefs in the mask?
1801     //       That is legal, but it reduces undef knowledge.
1802     // TODO: Allow arbitrary shuffles by shuffling after binop?
1803     //       That might be legal, but we have to deal with poison.
1804     if (LShuf->isSelect() &&
1805         !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
1806         RShuf->isSelect() &&
1807         !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
1808       // Example:
1809       // LHS = shuffle V1, V2, <0, 5, 6, 3>
1810       // RHS = shuffle V2, V1, <0, 5, 6, 3>
1811       // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
1812       Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
1813       NewBO->copyIRFlags(&Inst);
1814       return NewBO;
1815     }
1816   }
1817 
1818   // If one argument is a shuffle within one vector and the other is a constant,
1819   // try moving the shuffle after the binary operation. This canonicalization
1820   // intends to move shuffles closer to other shuffles and binops closer to
1821   // other binops, so they can be folded. It may also enable demanded elements
1822   // transforms.
1823   Constant *C;
1824   auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
1825   if (InstVTy &&
1826       match(&Inst, m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Poison(),
1827                                                 m_Mask(Mask))),
1828                              m_ImmConstant(C))) &&
1829       cast<FixedVectorType>(V1->getType())->getNumElements() <=
1830           InstVTy->getNumElements()) {
1831     assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
1832            "Shuffle should not change scalar type");
1833 
1834     // Find constant NewC that has property:
1835     //   shuffle(NewC, ShMask) = C
1836     // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
1837     // reorder is not possible. A 1-to-1 mapping is not required. Example:
1838     // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
1839     bool ConstOp1 = isa<Constant>(RHS);
1840     ArrayRef<int> ShMask = Mask;
1841     unsigned SrcVecNumElts =
1842         cast<FixedVectorType>(V1->getType())->getNumElements();
1843     PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
1844     SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
1845     bool MayChange = true;
1846     unsigned NumElts = InstVTy->getNumElements();
1847     for (unsigned I = 0; I < NumElts; ++I) {
1848       Constant *CElt = C->getAggregateElement(I);
1849       if (ShMask[I] >= 0) {
1850         assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
1851         Constant *NewCElt = NewVecC[ShMask[I]];
1852         // Bail out if:
1853         // 1. The constant vector contains a constant expression.
1854         // 2. The shuffle needs an element of the constant vector that can't
1855         //    be mapped to a new constant vector.
1856         // 3. This is a widening shuffle that copies elements of V1 into the
1857         //    extended elements (extending with poison is allowed).
1858         if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
1859             I >= SrcVecNumElts) {
1860           MayChange = false;
1861           break;
1862         }
1863         NewVecC[ShMask[I]] = CElt;
1864       }
1865       // If this is a widening shuffle, we must be able to extend with poison
1866       // elements. If the original binop does not produce a poison in the high
1867       // lanes, then this transform is not safe.
1868       // Similarly for poison lanes due to the shuffle mask, we can only
1869       // transform binops that preserve poison.
1870       // TODO: We could shuffle those non-poison constant values into the
1871       //       result by using a constant vector (rather than an poison vector)
1872       //       as operand 1 of the new binop, but that might be too aggressive
1873       //       for target-independent shuffle creation.
1874       if (I >= SrcVecNumElts || ShMask[I] < 0) {
1875         Constant *MaybePoison =
1876             ConstOp1
1877                 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
1878                 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
1879         if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
1880           MayChange = false;
1881           break;
1882         }
1883       }
1884     }
1885     if (MayChange) {
1886       Constant *NewC = ConstantVector::get(NewVecC);
1887       // It may not be safe to execute a binop on a vector with poison elements
1888       // because the entire instruction can be folded to undef or create poison
1889       // that did not exist in the original code.
1890       // TODO: The shift case should not be necessary.
1891       if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
1892         NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
1893 
1894       // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
1895       // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
1896       Value *NewLHS = ConstOp1 ? V1 : NewC;
1897       Value *NewRHS = ConstOp1 ? NewC : V1;
1898       return createBinOpShuffle(NewLHS, NewRHS, Mask);
1899     }
1900   }
1901 
1902   // Try to reassociate to sink a splat shuffle after a binary operation.
1903   if (Inst.isAssociative() && Inst.isCommutative()) {
1904     // Canonicalize shuffle operand as LHS.
1905     if (isa<ShuffleVectorInst>(RHS))
1906       std::swap(LHS, RHS);
1907 
1908     Value *X;
1909     ArrayRef<int> MaskC;
1910     int SplatIndex;
1911     Value *Y, *OtherOp;
1912     if (!match(LHS,
1913                m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
1914         !match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
1915         X->getType() != Inst.getType() ||
1916         !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
1917       return nullptr;
1918 
1919     // FIXME: This may not be safe if the analysis allows undef elements. By
1920     //        moving 'Y' before the splat shuffle, we are implicitly assuming
1921     //        that it is not undef/poison at the splat index.
1922     if (isSplatValue(OtherOp, SplatIndex)) {
1923       std::swap(Y, OtherOp);
1924     } else if (!isSplatValue(Y, SplatIndex)) {
1925       return nullptr;
1926     }
1927 
1928     // X and Y are splatted values, so perform the binary operation on those
1929     // values followed by a splat followed by the 2nd binary operation:
1930     // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
1931     Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
1932     SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
1933     Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
1934     Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
1935 
1936     // Intersect FMF on both new binops. Other (poison-generating) flags are
1937     // dropped to be safe.
1938     if (isa<FPMathOperator>(R)) {
1939       R->copyFastMathFlags(&Inst);
1940       R->andIRFlags(RHS);
1941     }
1942     if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1943       NewInstBO->copyIRFlags(R);
1944     return R;
1945   }
1946 
1947   return nullptr;
1948 }
1949 
1950 /// Try to narrow the width of a binop if at least 1 operand is an extend of
1951 /// of a value. This requires a potentially expensive known bits check to make
1952 /// sure the narrow op does not overflow.
1953 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
1954   // We need at least one extended operand.
1955   Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
1956 
1957   // If this is a sub, we swap the operands since we always want an extension
1958   // on the RHS. The LHS can be an extension or a constant.
1959   if (BO.getOpcode() == Instruction::Sub)
1960     std::swap(Op0, Op1);
1961 
1962   Value *X;
1963   bool IsSext = match(Op0, m_SExt(m_Value(X)));
1964   if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
1965     return nullptr;
1966 
1967   // If both operands are the same extension from the same source type and we
1968   // can eliminate at least one (hasOneUse), this might work.
1969   CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
1970   Value *Y;
1971   if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
1972         cast<Operator>(Op1)->getOpcode() == CastOpc &&
1973         (Op0->hasOneUse() || Op1->hasOneUse()))) {
1974     // If that did not match, see if we have a suitable constant operand.
1975     // Truncating and extending must produce the same constant.
1976     Constant *WideC;
1977     if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
1978       return nullptr;
1979     Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
1980     if (!NarrowC)
1981       return nullptr;
1982     Y = NarrowC;
1983   }
1984 
1985   // Swap back now that we found our operands.
1986   if (BO.getOpcode() == Instruction::Sub)
1987     std::swap(X, Y);
1988 
1989   // Both operands have narrow versions. Last step: the math must not overflow
1990   // in the narrow width.
1991   if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
1992     return nullptr;
1993 
1994   // bo (ext X), (ext Y) --> ext (bo X, Y)
1995   // bo (ext X), C       --> ext (bo X, C')
1996   Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
1997   if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1998     if (IsSext)
1999       NewBinOp->setHasNoSignedWrap();
2000     else
2001       NewBinOp->setHasNoUnsignedWrap();
2002   }
2003   return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2004 }
2005 
2006 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) {
2007   // At least one GEP must be inbounds.
2008   if (!GEP1.isInBounds() && !GEP2.isInBounds())
2009     return false;
2010 
2011   return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) &&
2012          (GEP2.isInBounds() || GEP2.hasAllZeroIndices());
2013 }
2014 
2015 /// Thread a GEP operation with constant indices through the constant true/false
2016 /// arms of a select.
2017 static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
2018                                   InstCombiner::BuilderTy &Builder) {
2019   if (!GEP.hasAllConstantIndices())
2020     return nullptr;
2021 
2022   Instruction *Sel;
2023   Value *Cond;
2024   Constant *TrueC, *FalseC;
2025   if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2026       !match(Sel,
2027              m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2028     return nullptr;
2029 
2030   // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2031   // Propagate 'inbounds' and metadata from existing instructions.
2032   // Note: using IRBuilder to create the constants for efficiency.
2033   SmallVector<Value *, 4> IndexC(GEP.indices());
2034   bool IsInBounds = GEP.isInBounds();
2035   Type *Ty = GEP.getSourceElementType();
2036   Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", IsInBounds);
2037   Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", IsInBounds);
2038   return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2039 }
2040 
2041 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
2042                                              GEPOperator *Src) {
2043   // Combine Indices - If the source pointer to this getelementptr instruction
2044   // is a getelementptr instruction with matching element type, combine the
2045   // indices of the two getelementptr instructions into a single instruction.
2046   if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2047     return nullptr;
2048 
2049   // For constant GEPs, use a more general offset-based folding approach.
2050   Type *PtrTy = Src->getType()->getScalarType();
2051   if (GEP.hasAllConstantIndices() &&
2052       (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2053     // Split Src into a variable part and a constant suffix.
2054     gep_type_iterator GTI = gep_type_begin(*Src);
2055     Type *BaseType = GTI.getIndexedType();
2056     bool IsFirstType = true;
2057     unsigned NumVarIndices = 0;
2058     for (auto Pair : enumerate(Src->indices())) {
2059       if (!isa<ConstantInt>(Pair.value())) {
2060         BaseType = GTI.getIndexedType();
2061         IsFirstType = false;
2062         NumVarIndices = Pair.index() + 1;
2063       }
2064       ++GTI;
2065     }
2066 
2067     // Determine the offset for the constant suffix of Src.
2068     APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
2069     if (NumVarIndices != Src->getNumIndices()) {
2070       // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2071       if (BaseType->isScalableTy())
2072         return nullptr;
2073 
2074       SmallVector<Value *> ConstantIndices;
2075       if (!IsFirstType)
2076         ConstantIndices.push_back(
2077             Constant::getNullValue(Type::getInt32Ty(GEP.getContext())));
2078       append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2079       Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2080     }
2081 
2082     // Add the offset for GEP (which is fully constant).
2083     if (!GEP.accumulateConstantOffset(DL, Offset))
2084       return nullptr;
2085 
2086     APInt OffsetOld = Offset;
2087     // Convert the total offset back into indices.
2088     SmallVector<APInt> ConstIndices =
2089         DL.getGEPIndicesForOffset(BaseType, Offset);
2090     if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
2091       // If both GEP are constant-indexed, and cannot be merged in either way,
2092       // convert them to a GEP of i8.
2093       if (Src->hasAllConstantIndices())
2094         return replaceInstUsesWith(
2095             GEP, Builder.CreateGEP(
2096                      Builder.getInt8Ty(), Src->getOperand(0),
2097                      Builder.getInt(OffsetOld), "",
2098                      isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
2099       return nullptr;
2100     }
2101 
2102     bool IsInBounds = isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP));
2103     SmallVector<Value *> Indices;
2104     append_range(Indices, drop_end(Src->indices(),
2105                                    Src->getNumIndices() - NumVarIndices));
2106     for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2107       Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2108       // Even if the total offset is inbounds, we may end up representing it
2109       // by first performing a larger negative offset, and then a smaller
2110       // positive one. The large negative offset might go out of bounds. Only
2111       // preserve inbounds if all signs are the same.
2112       IsInBounds &= Idx.isNonNegative() == ConstIndices[0].isNonNegative();
2113     }
2114 
2115     return replaceInstUsesWith(
2116         GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2117                                Indices, "", IsInBounds));
2118   }
2119 
2120   if (Src->getResultElementType() != GEP.getSourceElementType())
2121     return nullptr;
2122 
2123   SmallVector<Value*, 8> Indices;
2124 
2125   // Find out whether the last index in the source GEP is a sequential idx.
2126   bool EndsWithSequential = false;
2127   for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2128        I != E; ++I)
2129     EndsWithSequential = I.isSequential();
2130 
2131   // Can we combine the two pointer arithmetics offsets?
2132   if (EndsWithSequential) {
2133     // Replace: gep (gep %P, long B), long A, ...
2134     // With:    T = long A+B; gep %P, T, ...
2135     Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2136     Value *GO1 = GEP.getOperand(1);
2137 
2138     // If they aren't the same type, then the input hasn't been processed
2139     // by the loop above yet (which canonicalizes sequential index types to
2140     // intptr_t).  Just avoid transforming this until the input has been
2141     // normalized.
2142     if (SO1->getType() != GO1->getType())
2143       return nullptr;
2144 
2145     Value *Sum =
2146         simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2147     // Only do the combine when we are sure the cost after the
2148     // merge is never more than that before the merge.
2149     if (Sum == nullptr)
2150       return nullptr;
2151 
2152     // Update the GEP in place if possible.
2153     if (Src->getNumOperands() == 2) {
2154       GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
2155       replaceOperand(GEP, 0, Src->getOperand(0));
2156       replaceOperand(GEP, 1, Sum);
2157       return &GEP;
2158     }
2159     Indices.append(Src->op_begin()+1, Src->op_end()-1);
2160     Indices.push_back(Sum);
2161     Indices.append(GEP.op_begin()+2, GEP.op_end());
2162   } else if (isa<Constant>(*GEP.idx_begin()) &&
2163              cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2164              Src->getNumOperands() != 1) {
2165     // Otherwise we can do the fold if the first index of the GEP is a zero
2166     Indices.append(Src->op_begin()+1, Src->op_end());
2167     Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2168   }
2169 
2170   if (!Indices.empty())
2171     return replaceInstUsesWith(
2172         GEP, Builder.CreateGEP(
2173                  Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2174                  isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
2175 
2176   return nullptr;
2177 }
2178 
2179 Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
2180                                            BuilderTy *Builder,
2181                                            bool &DoesConsume, unsigned Depth) {
2182   static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2183   // ~(~(X)) -> X.
2184   Value *A, *B;
2185   if (match(V, m_Not(m_Value(A)))) {
2186     DoesConsume = true;
2187     return A;
2188   }
2189 
2190   Constant *C;
2191   // Constants can be considered to be not'ed values.
2192   if (match(V, m_ImmConstant(C)))
2193     return ConstantExpr::getNot(C);
2194 
2195   if (Depth++ >= MaxAnalysisRecursionDepth)
2196     return nullptr;
2197 
2198   // The rest of the cases require that we invert all uses so don't bother
2199   // doing the analysis if we know we can't use the result.
2200   if (!WillInvertAllUses)
2201     return nullptr;
2202 
2203   // Compares can be inverted if all of their uses are being modified to use
2204   // the ~V.
2205   if (auto *I = dyn_cast<CmpInst>(V)) {
2206     if (Builder != nullptr)
2207       return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2208                                 I->getOperand(1));
2209     return NonNull;
2210   }
2211 
2212   // If `V` is of the form `A + B` then `-1 - V` can be folded into
2213   // `(-1 - B) - A` if we are willing to invert all of the uses.
2214   if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2215     if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2216                                          DoesConsume, Depth))
2217       return Builder ? Builder->CreateSub(BV, A) : NonNull;
2218     if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2219                                          DoesConsume, Depth))
2220       return Builder ? Builder->CreateSub(AV, B) : NonNull;
2221     return nullptr;
2222   }
2223 
2224   // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2225   // into `A ^ B` if we are willing to invert all of the uses.
2226   if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2227     if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2228                                          DoesConsume, Depth))
2229       return Builder ? Builder->CreateXor(A, BV) : NonNull;
2230     if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2231                                          DoesConsume, Depth))
2232       return Builder ? Builder->CreateXor(AV, B) : NonNull;
2233     return nullptr;
2234   }
2235 
2236   // If `V` is of the form `B - A` then `-1 - V` can be folded into
2237   // `A + (-1 - B)` if we are willing to invert all of the uses.
2238   if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2239     if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2240                                          DoesConsume, Depth))
2241       return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2242     return nullptr;
2243   }
2244 
2245   // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2246   // into `A s>> B` if we are willing to invert all of the uses.
2247   if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2248     if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2249                                          DoesConsume, Depth))
2250       return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2251     return nullptr;
2252   }
2253 
2254   Value *Cond;
2255   // LogicOps are special in that we canonicalize them at the cost of an
2256   // instruction.
2257   bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2258                   !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2259   // Selects/min/max with invertible operands are freely invertible
2260   if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2261     if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2262                                DoesConsume, Depth))
2263       return nullptr;
2264     if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2265                                             DoesConsume, Depth)) {
2266       if (Builder != nullptr) {
2267         Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2268                                             DoesConsume, Depth);
2269         assert(NotB != nullptr &&
2270                "Unable to build inverted value for known freely invertable op");
2271         if (auto *II = dyn_cast<IntrinsicInst>(V))
2272           return Builder->CreateBinaryIntrinsic(
2273               getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2274         return Builder->CreateSelect(Cond, NotA, NotB);
2275       }
2276       return NonNull;
2277     }
2278   }
2279 
2280   return nullptr;
2281 }
2282 
2283 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
2284   Value *PtrOp = GEP.getOperand(0);
2285   SmallVector<Value *, 8> Indices(GEP.indices());
2286   Type *GEPType = GEP.getType();
2287   Type *GEPEltType = GEP.getSourceElementType();
2288   bool IsGEPSrcEleScalable = GEPEltType->isScalableTy();
2289   if (Value *V = simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
2290                                  SQ.getWithInstruction(&GEP)))
2291     return replaceInstUsesWith(GEP, V);
2292 
2293   // For vector geps, use the generic demanded vector support.
2294   // Skip if GEP return type is scalable. The number of elements is unknown at
2295   // compile-time.
2296   if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2297     auto VWidth = GEPFVTy->getNumElements();
2298     APInt PoisonElts(VWidth, 0);
2299     APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2300     if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2301                                               PoisonElts)) {
2302       if (V != &GEP)
2303         return replaceInstUsesWith(GEP, V);
2304       return &GEP;
2305     }
2306 
2307     // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2308     // possible (decide on canonical form for pointer broadcast), 3) exploit
2309     // undef elements to decrease demanded bits
2310   }
2311 
2312   // Eliminate unneeded casts for indices, and replace indices which displace
2313   // by multiples of a zero size type with zero.
2314   bool MadeChange = false;
2315 
2316   // Index width may not be the same width as pointer width.
2317   // Data layout chooses the right type based on supported integer types.
2318   Type *NewScalarIndexTy =
2319       DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2320 
2321   gep_type_iterator GTI = gep_type_begin(GEP);
2322   for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2323        ++I, ++GTI) {
2324     // Skip indices into struct types.
2325     if (GTI.isStruct())
2326       continue;
2327 
2328     Type *IndexTy = (*I)->getType();
2329     Type *NewIndexType =
2330         IndexTy->isVectorTy()
2331             ? VectorType::get(NewScalarIndexTy,
2332                               cast<VectorType>(IndexTy)->getElementCount())
2333             : NewScalarIndexTy;
2334 
2335     // If the element type has zero size then any index over it is equivalent
2336     // to an index of zero, so replace it with zero if it is not zero already.
2337     Type *EltTy = GTI.getIndexedType();
2338     if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2339       if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2340         *I = Constant::getNullValue(NewIndexType);
2341         MadeChange = true;
2342       }
2343 
2344     if (IndexTy != NewIndexType) {
2345       // If we are using a wider index than needed for this platform, shrink
2346       // it to what we need.  If narrower, sign-extend it to what we need.
2347       // This explicit cast can make subsequent optimizations more obvious.
2348       *I = Builder.CreateIntCast(*I, NewIndexType, true);
2349       MadeChange = true;
2350     }
2351   }
2352   if (MadeChange)
2353     return &GEP;
2354 
2355   // Check to see if the inputs to the PHI node are getelementptr instructions.
2356   if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2357     auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2358     if (!Op1)
2359       return nullptr;
2360 
2361     // Don't fold a GEP into itself through a PHI node. This can only happen
2362     // through the back-edge of a loop. Folding a GEP into itself means that
2363     // the value of the previous iteration needs to be stored in the meantime,
2364     // thus requiring an additional register variable to be live, but not
2365     // actually achieving anything (the GEP still needs to be executed once per
2366     // loop iteration).
2367     if (Op1 == &GEP)
2368       return nullptr;
2369 
2370     int DI = -1;
2371 
2372     for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2373       auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2374       if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2375           Op1->getSourceElementType() != Op2->getSourceElementType())
2376         return nullptr;
2377 
2378       // As for Op1 above, don't try to fold a GEP into itself.
2379       if (Op2 == &GEP)
2380         return nullptr;
2381 
2382       // Keep track of the type as we walk the GEP.
2383       Type *CurTy = nullptr;
2384 
2385       for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2386         if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2387           return nullptr;
2388 
2389         if (Op1->getOperand(J) != Op2->getOperand(J)) {
2390           if (DI == -1) {
2391             // We have not seen any differences yet in the GEPs feeding the
2392             // PHI yet, so we record this one if it is allowed to be a
2393             // variable.
2394 
2395             // The first two arguments can vary for any GEP, the rest have to be
2396             // static for struct slots
2397             if (J > 1) {
2398               assert(CurTy && "No current type?");
2399               if (CurTy->isStructTy())
2400                 return nullptr;
2401             }
2402 
2403             DI = J;
2404           } else {
2405             // The GEP is different by more than one input. While this could be
2406             // extended to support GEPs that vary by more than one variable it
2407             // doesn't make sense since it greatly increases the complexity and
2408             // would result in an R+R+R addressing mode which no backend
2409             // directly supports and would need to be broken into several
2410             // simpler instructions anyway.
2411             return nullptr;
2412           }
2413         }
2414 
2415         // Sink down a layer of the type for the next iteration.
2416         if (J > 0) {
2417           if (J == 1) {
2418             CurTy = Op1->getSourceElementType();
2419           } else {
2420             CurTy =
2421                 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2422           }
2423         }
2424       }
2425     }
2426 
2427     // If not all GEPs are identical we'll have to create a new PHI node.
2428     // Check that the old PHI node has only one use so that it will get
2429     // removed.
2430     if (DI != -1 && !PN->hasOneUse())
2431       return nullptr;
2432 
2433     auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2434     if (DI == -1) {
2435       // All the GEPs feeding the PHI are identical. Clone one down into our
2436       // BB so that it can be merged with the current GEP.
2437     } else {
2438       // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2439       // into the current block so it can be merged, and create a new PHI to
2440       // set that index.
2441       PHINode *NewPN;
2442       {
2443         IRBuilderBase::InsertPointGuard Guard(Builder);
2444         Builder.SetInsertPoint(PN);
2445         NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2446                                   PN->getNumOperands());
2447       }
2448 
2449       for (auto &I : PN->operands())
2450         NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2451                            PN->getIncomingBlock(I));
2452 
2453       NewGEP->setOperand(DI, NewPN);
2454     }
2455 
2456     NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2457     return replaceOperand(GEP, 0, NewGEP);
2458   }
2459 
2460   if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2461     if (Instruction *I = visitGEPOfGEP(GEP, Src))
2462       return I;
2463 
2464   // Skip if GEP source element type is scalable. The type alloc size is unknown
2465   // at compile-time.
2466   if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2467     unsigned AS = GEP.getPointerAddressSpace();
2468     if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2469         DL.getIndexSizeInBits(AS)) {
2470       uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
2471 
2472       if (TyAllocSize == 1) {
2473         // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
2474         // but only if the result pointer is only used as if it were an integer,
2475         // or both point to the same underlying object (otherwise provenance is
2476         // not necessarily retained).
2477         Value *X = GEP.getPointerOperand();
2478         Value *Y;
2479         if (match(GEP.getOperand(1),
2480                   m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) &&
2481             GEPType == Y->getType()) {
2482           bool HasSameUnderlyingObject =
2483               getUnderlyingObject(X) == getUnderlyingObject(Y);
2484           bool Changed = false;
2485           GEP.replaceUsesWithIf(Y, [&](Use &U) {
2486             bool ShouldReplace = HasSameUnderlyingObject ||
2487                                  isa<ICmpInst>(U.getUser()) ||
2488                                  isa<PtrToIntInst>(U.getUser());
2489             Changed |= ShouldReplace;
2490             return ShouldReplace;
2491           });
2492           return Changed ? &GEP : nullptr;
2493         }
2494       } else {
2495         // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
2496         Value *V;
2497         if ((has_single_bit(TyAllocSize) &&
2498              match(GEP.getOperand(1),
2499                    m_Exact(m_AShr(m_Value(V),
2500                                   m_SpecificInt(countr_zero(TyAllocSize)))))) ||
2501             match(GEP.getOperand(1),
2502                   m_Exact(m_SDiv(m_Value(V), m_SpecificInt(TyAllocSize))))) {
2503           GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
2504               Builder.getInt8Ty(), GEP.getPointerOperand(), V);
2505           NewGEP->setIsInBounds(GEP.isInBounds());
2506           return NewGEP;
2507         }
2508       }
2509     }
2510   }
2511   // We do not handle pointer-vector geps here.
2512   if (GEPType->isVectorTy())
2513     return nullptr;
2514 
2515   if (GEP.getNumIndices() == 1) {
2516     // Try to replace ADD + GEP with GEP + GEP.
2517     Value *Idx1, *Idx2;
2518     if (match(GEP.getOperand(1),
2519               m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
2520       //   %idx = add i64 %idx1, %idx2
2521       //   %gep = getelementptr i32, ptr %ptr, i64 %idx
2522       // as:
2523       //   %newptr = getelementptr i32, ptr %ptr, i64 %idx1
2524       //   %newgep = getelementptr i32, ptr %newptr, i64 %idx2
2525       auto *NewPtr = Builder.CreateGEP(GEP.getResultElementType(),
2526                                        GEP.getPointerOperand(), Idx1);
2527       return GetElementPtrInst::Create(GEP.getResultElementType(), NewPtr,
2528                                        Idx2);
2529     }
2530     ConstantInt *C;
2531     if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
2532                                      m_Value(Idx1), m_ConstantInt(C))))))) {
2533       // %add = add nsw i32 %idx1, idx2
2534       // %sidx = sext i32 %add to i64
2535       // %gep = getelementptr i32, ptr %ptr, i64 %sidx
2536       // as:
2537       // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
2538       // %newgep = getelementptr i32, ptr %newptr, i32 idx2
2539       auto *NewPtr = Builder.CreateGEP(
2540           GEP.getResultElementType(), GEP.getPointerOperand(),
2541           Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()));
2542       return GetElementPtrInst::Create(
2543           GEP.getResultElementType(), NewPtr,
2544           Builder.CreateSExt(C, GEP.getOperand(1)->getType()));
2545     }
2546   }
2547 
2548   if (!GEP.isInBounds()) {
2549     unsigned IdxWidth =
2550         DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
2551     APInt BasePtrOffset(IdxWidth, 0);
2552     Value *UnderlyingPtrOp =
2553             PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
2554                                                              BasePtrOffset);
2555     bool CanBeNull, CanBeFreed;
2556     uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
2557         DL, CanBeNull, CanBeFreed);
2558     if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
2559       if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
2560           BasePtrOffset.isNonNegative()) {
2561         APInt AllocSize(IdxWidth, DerefBytes);
2562         if (BasePtrOffset.ule(AllocSize)) {
2563           return GetElementPtrInst::CreateInBounds(
2564               GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
2565         }
2566       }
2567     }
2568   }
2569 
2570   if (Instruction *R = foldSelectGEP(GEP, Builder))
2571     return R;
2572 
2573   return nullptr;
2574 }
2575 
2576 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
2577                                          Instruction *AI) {
2578   if (isa<ConstantPointerNull>(V))
2579     return true;
2580   if (auto *LI = dyn_cast<LoadInst>(V))
2581     return isa<GlobalVariable>(LI->getPointerOperand());
2582   // Two distinct allocations will never be equal.
2583   return isAllocLikeFn(V, &TLI) && V != AI;
2584 }
2585 
2586 /// Given a call CB which uses an address UsedV, return true if we can prove the
2587 /// call's only possible effect is storing to V.
2588 static bool isRemovableWrite(CallBase &CB, Value *UsedV,
2589                              const TargetLibraryInfo &TLI) {
2590   if (!CB.use_empty())
2591     // TODO: add recursion if returned attribute is present
2592     return false;
2593 
2594   if (CB.isTerminator())
2595     // TODO: remove implementation restriction
2596     return false;
2597 
2598   if (!CB.willReturn() || !CB.doesNotThrow())
2599     return false;
2600 
2601   // If the only possible side effect of the call is writing to the alloca,
2602   // and the result isn't used, we can safely remove any reads implied by the
2603   // call including those which might read the alloca itself.
2604   std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
2605   return Dest && Dest->Ptr == UsedV;
2606 }
2607 
2608 static bool isAllocSiteRemovable(Instruction *AI,
2609                                  SmallVectorImpl<WeakTrackingVH> &Users,
2610                                  const TargetLibraryInfo &TLI) {
2611   SmallVector<Instruction*, 4> Worklist;
2612   const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
2613   Worklist.push_back(AI);
2614 
2615   do {
2616     Instruction *PI = Worklist.pop_back_val();
2617     for (User *U : PI->users()) {
2618       Instruction *I = cast<Instruction>(U);
2619       switch (I->getOpcode()) {
2620       default:
2621         // Give up the moment we see something we can't handle.
2622         return false;
2623 
2624       case Instruction::AddrSpaceCast:
2625       case Instruction::BitCast:
2626       case Instruction::GetElementPtr:
2627         Users.emplace_back(I);
2628         Worklist.push_back(I);
2629         continue;
2630 
2631       case Instruction::ICmp: {
2632         ICmpInst *ICI = cast<ICmpInst>(I);
2633         // We can fold eq/ne comparisons with null to false/true, respectively.
2634         // We also fold comparisons in some conditions provided the alloc has
2635         // not escaped (see isNeverEqualToUnescapedAlloc).
2636         if (!ICI->isEquality())
2637           return false;
2638         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
2639         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
2640           return false;
2641 
2642         // Do not fold compares to aligned_alloc calls, as they may have to
2643         // return null in case the required alignment cannot be satisfied,
2644         // unless we can prove that both alignment and size are valid.
2645         auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
2646           // Check if alignment and size of a call to aligned_alloc is valid,
2647           // that is alignment is a power-of-2 and the size is a multiple of the
2648           // alignment.
2649           const APInt *Alignment;
2650           const APInt *Size;
2651           return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
2652                  match(CB->getArgOperand(1), m_APInt(Size)) &&
2653                  Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
2654         };
2655         auto *CB = dyn_cast<CallBase>(AI);
2656         LibFunc TheLibFunc;
2657         if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
2658             TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
2659             !AlignmentAndSizeKnownValid(CB))
2660           return false;
2661         Users.emplace_back(I);
2662         continue;
2663       }
2664 
2665       case Instruction::Call:
2666         // Ignore no-op and store intrinsics.
2667         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2668           switch (II->getIntrinsicID()) {
2669           default:
2670             return false;
2671 
2672           case Intrinsic::memmove:
2673           case Intrinsic::memcpy:
2674           case Intrinsic::memset: {
2675             MemIntrinsic *MI = cast<MemIntrinsic>(II);
2676             if (MI->isVolatile() || MI->getRawDest() != PI)
2677               return false;
2678             [[fallthrough]];
2679           }
2680           case Intrinsic::assume:
2681           case Intrinsic::invariant_start:
2682           case Intrinsic::invariant_end:
2683           case Intrinsic::lifetime_start:
2684           case Intrinsic::lifetime_end:
2685           case Intrinsic::objectsize:
2686             Users.emplace_back(I);
2687             continue;
2688           case Intrinsic::launder_invariant_group:
2689           case Intrinsic::strip_invariant_group:
2690             Users.emplace_back(I);
2691             Worklist.push_back(I);
2692             continue;
2693           }
2694         }
2695 
2696         if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
2697           Users.emplace_back(I);
2698           continue;
2699         }
2700 
2701         if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
2702             getAllocationFamily(I, &TLI) == Family) {
2703           assert(Family);
2704           Users.emplace_back(I);
2705           continue;
2706         }
2707 
2708         if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
2709             getAllocationFamily(I, &TLI) == Family) {
2710           assert(Family);
2711           Users.emplace_back(I);
2712           Worklist.push_back(I);
2713           continue;
2714         }
2715 
2716         return false;
2717 
2718       case Instruction::Store: {
2719         StoreInst *SI = cast<StoreInst>(I);
2720         if (SI->isVolatile() || SI->getPointerOperand() != PI)
2721           return false;
2722         Users.emplace_back(I);
2723         continue;
2724       }
2725       }
2726       llvm_unreachable("missing a return?");
2727     }
2728   } while (!Worklist.empty());
2729   return true;
2730 }
2731 
2732 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
2733   assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
2734 
2735   // If we have a malloc call which is only used in any amount of comparisons to
2736   // null and free calls, delete the calls and replace the comparisons with true
2737   // or false as appropriate.
2738 
2739   // This is based on the principle that we can substitute our own allocation
2740   // function (which will never return null) rather than knowledge of the
2741   // specific function being called. In some sense this can change the permitted
2742   // outputs of a program (when we convert a malloc to an alloca, the fact that
2743   // the allocation is now on the stack is potentially visible, for example),
2744   // but we believe in a permissible manner.
2745   SmallVector<WeakTrackingVH, 64> Users;
2746 
2747   // If we are removing an alloca with a dbg.declare, insert dbg.value calls
2748   // before each store.
2749   SmallVector<DbgVariableIntrinsic *, 8> DVIs;
2750   SmallVector<DPValue *, 8> DPVs;
2751   std::unique_ptr<DIBuilder> DIB;
2752   if (isa<AllocaInst>(MI)) {
2753     findDbgUsers(DVIs, &MI, &DPVs);
2754     DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
2755   }
2756 
2757   if (isAllocSiteRemovable(&MI, Users, TLI)) {
2758     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2759       // Lowering all @llvm.objectsize calls first because they may
2760       // use a bitcast/GEP of the alloca we are removing.
2761       if (!Users[i])
2762        continue;
2763 
2764       Instruction *I = cast<Instruction>(&*Users[i]);
2765 
2766       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2767         if (II->getIntrinsicID() == Intrinsic::objectsize) {
2768           SmallVector<Instruction *> InsertedInstructions;
2769           Value *Result = lowerObjectSizeCall(
2770               II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
2771           for (Instruction *Inserted : InsertedInstructions)
2772             Worklist.add(Inserted);
2773           replaceInstUsesWith(*I, Result);
2774           eraseInstFromFunction(*I);
2775           Users[i] = nullptr; // Skip examining in the next loop.
2776         }
2777       }
2778     }
2779     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2780       if (!Users[i])
2781         continue;
2782 
2783       Instruction *I = cast<Instruction>(&*Users[i]);
2784 
2785       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
2786         replaceInstUsesWith(*C,
2787                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
2788                                              C->isFalseWhenEqual()));
2789       } else if (auto *SI = dyn_cast<StoreInst>(I)) {
2790         for (auto *DVI : DVIs)
2791           if (DVI->isAddressOfVariable())
2792             ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
2793         for (auto *DPV : DPVs)
2794           if (DPV->isAddressOfVariable())
2795             ConvertDebugDeclareToDebugValue(DPV, SI, *DIB);
2796       } else {
2797         // Casts, GEP, or anything else: we're about to delete this instruction,
2798         // so it can not have any valid uses.
2799         replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
2800       }
2801       eraseInstFromFunction(*I);
2802     }
2803 
2804     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
2805       // Replace invoke with a NOP intrinsic to maintain the original CFG
2806       Module *M = II->getModule();
2807       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
2808       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
2809                          std::nullopt, "", II->getParent());
2810     }
2811 
2812     // Remove debug intrinsics which describe the value contained within the
2813     // alloca. In addition to removing dbg.{declare,addr} which simply point to
2814     // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
2815     //
2816     // ```
2817     //   define void @foo(i32 %0) {
2818     //     %a = alloca i32                              ; Deleted.
2819     //     store i32 %0, i32* %a
2820     //     dbg.value(i32 %0, "arg0")                    ; Not deleted.
2821     //     dbg.value(i32* %a, "arg0", DW_OP_deref)      ; Deleted.
2822     //     call void @trivially_inlinable_no_op(i32* %a)
2823     //     ret void
2824     //  }
2825     // ```
2826     //
2827     // This may not be required if we stop describing the contents of allocas
2828     // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
2829     // the LowerDbgDeclare utility.
2830     //
2831     // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
2832     // "arg0" dbg.value may be stale after the call. However, failing to remove
2833     // the DW_OP_deref dbg.value causes large gaps in location coverage.
2834     //
2835     // FIXME: the Assignment Tracking project has now likely made this
2836     // redundant (and it's sometimes harmful).
2837     for (auto *DVI : DVIs)
2838       if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2839         DVI->eraseFromParent();
2840     for (auto *DPV : DPVs)
2841       if (DPV->isAddressOfVariable() || DPV->getExpression()->startsWithDeref())
2842         DPV->eraseFromParent();
2843 
2844     return eraseInstFromFunction(MI);
2845   }
2846   return nullptr;
2847 }
2848 
2849 /// Move the call to free before a NULL test.
2850 ///
2851 /// Check if this free is accessed after its argument has been test
2852 /// against NULL (property 0).
2853 /// If yes, it is legal to move this call in its predecessor block.
2854 ///
2855 /// The move is performed only if the block containing the call to free
2856 /// will be removed, i.e.:
2857 /// 1. it has only one predecessor P, and P has two successors
2858 /// 2. it contains the call, noops, and an unconditional branch
2859 /// 3. its successor is the same as its predecessor's successor
2860 ///
2861 /// The profitability is out-of concern here and this function should
2862 /// be called only if the caller knows this transformation would be
2863 /// profitable (e.g., for code size).
2864 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
2865                                                 const DataLayout &DL) {
2866   Value *Op = FI.getArgOperand(0);
2867   BasicBlock *FreeInstrBB = FI.getParent();
2868   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
2869 
2870   // Validate part of constraint #1: Only one predecessor
2871   // FIXME: We can extend the number of predecessor, but in that case, we
2872   //        would duplicate the call to free in each predecessor and it may
2873   //        not be profitable even for code size.
2874   if (!PredBB)
2875     return nullptr;
2876 
2877   // Validate constraint #2: Does this block contains only the call to
2878   //                         free, noops, and an unconditional branch?
2879   BasicBlock *SuccBB;
2880   Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
2881   if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
2882     return nullptr;
2883 
2884   // If there are only 2 instructions in the block, at this point,
2885   // this is the call to free and unconditional.
2886   // If there are more than 2 instructions, check that they are noops
2887   // i.e., they won't hurt the performance of the generated code.
2888   if (FreeInstrBB->size() != 2) {
2889     for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
2890       if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2891         continue;
2892       auto *Cast = dyn_cast<CastInst>(&Inst);
2893       if (!Cast || !Cast->isNoopCast(DL))
2894         return nullptr;
2895     }
2896   }
2897   // Validate the rest of constraint #1 by matching on the pred branch.
2898   Instruction *TI = PredBB->getTerminator();
2899   BasicBlock *TrueBB, *FalseBB;
2900   ICmpInst::Predicate Pred;
2901   if (!match(TI, m_Br(m_ICmp(Pred,
2902                              m_CombineOr(m_Specific(Op),
2903                                          m_Specific(Op->stripPointerCasts())),
2904                              m_Zero()),
2905                       TrueBB, FalseBB)))
2906     return nullptr;
2907   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2908     return nullptr;
2909 
2910   // Validate constraint #3: Ensure the null case just falls through.
2911   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
2912     return nullptr;
2913   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
2914          "Broken CFG: missing edge from predecessor to successor");
2915 
2916   // At this point, we know that everything in FreeInstrBB can be moved
2917   // before TI.
2918   for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
2919     if (&Instr == FreeInstrBBTerminator)
2920       break;
2921     Instr.moveBeforePreserving(TI);
2922   }
2923   assert(FreeInstrBB->size() == 1 &&
2924          "Only the branch instruction should remain");
2925 
2926   // Now that we've moved the call to free before the NULL check, we have to
2927   // remove any attributes on its parameter that imply it's non-null, because
2928   // those attributes might have only been valid because of the NULL check, and
2929   // we can get miscompiles if we keep them. This is conservative if non-null is
2930   // also implied by something other than the NULL check, but it's guaranteed to
2931   // be correct, and the conservativeness won't matter in practice, since the
2932   // attributes are irrelevant for the call to free itself and the pointer
2933   // shouldn't be used after the call.
2934   AttributeList Attrs = FI.getAttributes();
2935   Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
2936   Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
2937   if (Dereferenceable.isValid()) {
2938     uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
2939     Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
2940                                        Attribute::Dereferenceable);
2941     Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
2942   }
2943   FI.setAttributes(Attrs);
2944 
2945   return &FI;
2946 }
2947 
2948 Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {
2949   // free undef -> unreachable.
2950   if (isa<UndefValue>(Op)) {
2951     // Leave a marker since we can't modify the CFG here.
2952     CreateNonTerminatorUnreachable(&FI);
2953     return eraseInstFromFunction(FI);
2954   }
2955 
2956   // If we have 'free null' delete the instruction.  This can happen in stl code
2957   // when lots of inlining happens.
2958   if (isa<ConstantPointerNull>(Op))
2959     return eraseInstFromFunction(FI);
2960 
2961   // If we had free(realloc(...)) with no intervening uses, then eliminate the
2962   // realloc() entirely.
2963   CallInst *CI = dyn_cast<CallInst>(Op);
2964   if (CI && CI->hasOneUse())
2965     if (Value *ReallocatedOp = getReallocatedOperand(CI))
2966       return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
2967 
2968   // If we optimize for code size, try to move the call to free before the null
2969   // test so that simplify cfg can remove the empty block and dead code
2970   // elimination the branch. I.e., helps to turn something like:
2971   // if (foo) free(foo);
2972   // into
2973   // free(foo);
2974   //
2975   // Note that we can only do this for 'free' and not for any flavor of
2976   // 'operator delete'; there is no 'operator delete' symbol for which we are
2977   // permitted to invent a call, even if we're passing in a null pointer.
2978   if (MinimizeSize) {
2979     LibFunc Func;
2980     if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
2981       if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL))
2982         return I;
2983   }
2984 
2985   return nullptr;
2986 }
2987 
2988 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
2989   // Nothing for now.
2990   return nullptr;
2991 }
2992 
2993 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
2994 bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) {
2995   // Try to remove the previous instruction if it must lead to unreachable.
2996   // This includes instructions like stores and "llvm.assume" that may not get
2997   // removed by simple dead code elimination.
2998   bool Changed = false;
2999   while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3000     // While we theoretically can erase EH, that would result in a block that
3001     // used to start with an EH no longer starting with EH, which is invalid.
3002     // To make it valid, we'd need to fixup predecessors to no longer refer to
3003     // this block, but that changes CFG, which is not allowed in InstCombine.
3004     if (Prev->isEHPad())
3005       break; // Can not drop any more instructions. We're done here.
3006 
3007     if (!isGuaranteedToTransferExecutionToSuccessor(Prev))
3008       break; // Can not drop any more instructions. We're done here.
3009     // Otherwise, this instruction can be freely erased,
3010     // even if it is not side-effect free.
3011 
3012     // A value may still have uses before we process it here (for example, in
3013     // another unreachable block), so convert those to poison.
3014     replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3015     eraseInstFromFunction(*Prev);
3016     Changed = true;
3017   }
3018   return Changed;
3019 }
3020 
3021 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
3022   removeInstructionsBeforeUnreachable(I);
3023   return nullptr;
3024 }
3025 
3026 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {
3027   assert(BI.isUnconditional() && "Only for unconditional branches.");
3028 
3029   // If this store is the second-to-last instruction in the basic block
3030   // (excluding debug info and bitcasts of pointers) and if the block ends with
3031   // an unconditional branch, try to move the store to the successor block.
3032 
3033   auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3034     auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3035       return BBI->isDebugOrPseudoInst() ||
3036              (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3037     };
3038 
3039     BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3040     do {
3041       if (BBI != FirstInstr)
3042         --BBI;
3043     } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3044 
3045     return dyn_cast<StoreInst>(BBI);
3046   };
3047 
3048   if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3049     if (mergeStoreIntoSuccessor(*SI))
3050       return &BI;
3051 
3052   return nullptr;
3053 }
3054 
3055 void InstCombinerImpl::addDeadEdge(BasicBlock *From, BasicBlock *To,
3056                                    SmallVectorImpl<BasicBlock *> &Worklist) {
3057   if (!DeadEdges.insert({From, To}).second)
3058     return;
3059 
3060   // Replace phi node operands in successor with poison.
3061   for (PHINode &PN : To->phis())
3062     for (Use &U : PN.incoming_values())
3063       if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3064         replaceUse(U, PoisonValue::get(PN.getType()));
3065         addToWorklist(&PN);
3066         MadeIRChange = true;
3067       }
3068 
3069   Worklist.push_back(To);
3070 }
3071 
3072 // Under the assumption that I is unreachable, remove it and following
3073 // instructions. Changes are reported directly to MadeIRChange.
3074 void InstCombinerImpl::handleUnreachableFrom(
3075     Instruction *I, SmallVectorImpl<BasicBlock *> &Worklist) {
3076   BasicBlock *BB = I->getParent();
3077   for (Instruction &Inst : make_early_inc_range(
3078            make_range(std::next(BB->getTerminator()->getReverseIterator()),
3079                       std::next(I->getReverseIterator())))) {
3080     if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3081       replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3082       MadeIRChange = true;
3083     }
3084     if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3085       continue;
3086     // RemoveDIs: erase debug-info on this instruction manually.
3087     Inst.dropDbgValues();
3088     eraseInstFromFunction(Inst);
3089     MadeIRChange = true;
3090   }
3091 
3092   // RemoveDIs: to match behaviour in dbg.value mode, drop debug-info on
3093   // terminator too.
3094   BB->getTerminator()->dropDbgValues();
3095 
3096   // Handle potentially dead successors.
3097   for (BasicBlock *Succ : successors(BB))
3098     addDeadEdge(BB, Succ, Worklist);
3099 }
3100 
3101 void InstCombinerImpl::handlePotentiallyDeadBlocks(
3102     SmallVectorImpl<BasicBlock *> &Worklist) {
3103   while (!Worklist.empty()) {
3104     BasicBlock *BB = Worklist.pop_back_val();
3105     if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3106           return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3107         }))
3108       continue;
3109 
3110     handleUnreachableFrom(&BB->front(), Worklist);
3111   }
3112 }
3113 
3114 void InstCombinerImpl::handlePotentiallyDeadSuccessors(BasicBlock *BB,
3115                                                        BasicBlock *LiveSucc) {
3116   SmallVector<BasicBlock *> Worklist;
3117   for (BasicBlock *Succ : successors(BB)) {
3118     // The live successor isn't dead.
3119     if (Succ == LiveSucc)
3120       continue;
3121 
3122     addDeadEdge(BB, Succ, Worklist);
3123   }
3124 
3125   handlePotentiallyDeadBlocks(Worklist);
3126 }
3127 
3128 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {
3129   if (BI.isUnconditional())
3130     return visitUnconditionalBranchInst(BI);
3131 
3132   // Change br (not X), label True, label False to: br X, label False, True
3133   Value *Cond = BI.getCondition();
3134   Value *X;
3135   if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3136     // Swap Destinations and condition...
3137     BI.swapSuccessors();
3138     return replaceOperand(BI, 0, X);
3139   }
3140 
3141   // Canonicalize logical-and-with-invert as logical-or-with-invert.
3142   // This is done by inverting the condition and swapping successors:
3143   // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3144   Value *Y;
3145   if (isa<SelectInst>(Cond) &&
3146       match(Cond,
3147             m_OneUse(m_LogicalAnd(m_Value(X), m_OneUse(m_Not(m_Value(Y))))))) {
3148     Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3149     Value *Or = Builder.CreateLogicalOr(NotX, Y);
3150     BI.swapSuccessors();
3151     return replaceOperand(BI, 0, Or);
3152   }
3153 
3154   // If the condition is irrelevant, remove the use so that other
3155   // transforms on the condition become more effective.
3156   if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3157     return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3158 
3159   // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3160   CmpInst::Predicate Pred;
3161   if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3162       !isCanonicalPredicate(Pred)) {
3163     // Swap destinations and condition.
3164     auto *Cmp = cast<CmpInst>(Cond);
3165     Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3166     BI.swapSuccessors();
3167     Worklist.push(Cmp);
3168     return &BI;
3169   }
3170 
3171   if (isa<UndefValue>(Cond)) {
3172     handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3173     return nullptr;
3174   }
3175   if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3176     handlePotentiallyDeadSuccessors(BI.getParent(),
3177                                     BI.getSuccessor(!CI->getZExtValue()));
3178     return nullptr;
3179   }
3180 
3181   DC.registerBranch(&BI);
3182   return nullptr;
3183 }
3184 
3185 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
3186   Value *Cond = SI.getCondition();
3187   Value *Op0;
3188   ConstantInt *AddRHS;
3189   if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3190     // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3191     for (auto Case : SI.cases()) {
3192       Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3193       assert(isa<ConstantInt>(NewCase) &&
3194              "Result of expression should be constant");
3195       Case.setValue(cast<ConstantInt>(NewCase));
3196     }
3197     return replaceOperand(SI, 0, Op0);
3198   }
3199 
3200   KnownBits Known = computeKnownBits(Cond, 0, &SI);
3201   unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3202   unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3203 
3204   // Compute the number of leading bits we can ignore.
3205   // TODO: A better way to determine this would use ComputeNumSignBits().
3206   for (const auto &C : SI.cases()) {
3207     LeadingKnownZeros =
3208         std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3209     LeadingKnownOnes =
3210         std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3211   }
3212 
3213   unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3214 
3215   // Shrink the condition operand if the new type is smaller than the old type.
3216   // But do not shrink to a non-standard type, because backend can't generate
3217   // good code for that yet.
3218   // TODO: We can make it aggressive again after fixing PR39569.
3219   if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3220       shouldChangeType(Known.getBitWidth(), NewWidth)) {
3221     IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3222     Builder.SetInsertPoint(&SI);
3223     Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3224 
3225     for (auto Case : SI.cases()) {
3226       APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3227       Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3228     }
3229     return replaceOperand(SI, 0, NewCond);
3230   }
3231 
3232   if (isa<UndefValue>(Cond)) {
3233     handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3234     return nullptr;
3235   }
3236   if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3237     handlePotentiallyDeadSuccessors(SI.getParent(),
3238                                     SI.findCaseValue(CI)->getCaseSuccessor());
3239     return nullptr;
3240   }
3241 
3242   return nullptr;
3243 }
3244 
3245 Instruction *
3246 InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3247   auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3248   if (!WO)
3249     return nullptr;
3250 
3251   Intrinsic::ID OvID = WO->getIntrinsicID();
3252   const APInt *C = nullptr;
3253   if (match(WO->getRHS(), m_APIntAllowUndef(C))) {
3254     if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3255                                  OvID == Intrinsic::umul_with_overflow)) {
3256       // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3257       if (C->isAllOnes())
3258         return BinaryOperator::CreateNeg(WO->getLHS());
3259       // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
3260       if (C->isPowerOf2()) {
3261         return BinaryOperator::CreateShl(
3262             WO->getLHS(),
3263             ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
3264       }
3265     }
3266   }
3267 
3268   // We're extracting from an overflow intrinsic. See if we're the only user.
3269   // That allows us to simplify multiple result intrinsics to simpler things
3270   // that just get one value.
3271   if (!WO->hasOneUse())
3272     return nullptr;
3273 
3274   // Check if we're grabbing only the result of a 'with overflow' intrinsic
3275   // and replace it with a traditional binary instruction.
3276   if (*EV.idx_begin() == 0) {
3277     Instruction::BinaryOps BinOp = WO->getBinaryOp();
3278     Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
3279     // Replace the old instruction's uses with poison.
3280     replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
3281     eraseInstFromFunction(*WO);
3282     return BinaryOperator::Create(BinOp, LHS, RHS);
3283   }
3284 
3285   assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
3286 
3287   // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
3288   if (OvID == Intrinsic::usub_with_overflow)
3289     return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
3290 
3291   // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
3292   // +1 is not possible because we assume signed values.
3293   if (OvID == Intrinsic::smul_with_overflow &&
3294       WO->getLHS()->getType()->isIntOrIntVectorTy(1))
3295     return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
3296 
3297   // If only the overflow result is used, and the right hand side is a
3298   // constant (or constant splat), we can remove the intrinsic by directly
3299   // checking for overflow.
3300   if (C) {
3301     // Compute the no-wrap range for LHS given RHS=C, then construct an
3302     // equivalent icmp, potentially using an offset.
3303     ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
3304         WO->getBinaryOp(), *C, WO->getNoWrapKind());
3305 
3306     CmpInst::Predicate Pred;
3307     APInt NewRHSC, Offset;
3308     NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
3309     auto *OpTy = WO->getRHS()->getType();
3310     auto *NewLHS = WO->getLHS();
3311     if (Offset != 0)
3312       NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
3313     return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
3314                         ConstantInt::get(OpTy, NewRHSC));
3315   }
3316 
3317   return nullptr;
3318 }
3319 
3320 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
3321   Value *Agg = EV.getAggregateOperand();
3322 
3323   if (!EV.hasIndices())
3324     return replaceInstUsesWith(EV, Agg);
3325 
3326   if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
3327                                           SQ.getWithInstruction(&EV)))
3328     return replaceInstUsesWith(EV, V);
3329 
3330   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
3331     // We're extracting from an insertvalue instruction, compare the indices
3332     const unsigned *exti, *exte, *insi, *inse;
3333     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
3334          exte = EV.idx_end(), inse = IV->idx_end();
3335          exti != exte && insi != inse;
3336          ++exti, ++insi) {
3337       if (*insi != *exti)
3338         // The insert and extract both reference distinctly different elements.
3339         // This means the extract is not influenced by the insert, and we can
3340         // replace the aggregate operand of the extract with the aggregate
3341         // operand of the insert. i.e., replace
3342         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3343         // %E = extractvalue { i32, { i32 } } %I, 0
3344         // with
3345         // %E = extractvalue { i32, { i32 } } %A, 0
3346         return ExtractValueInst::Create(IV->getAggregateOperand(),
3347                                         EV.getIndices());
3348     }
3349     if (exti == exte && insi == inse)
3350       // Both iterators are at the end: Index lists are identical. Replace
3351       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3352       // %C = extractvalue { i32, { i32 } } %B, 1, 0
3353       // with "i32 42"
3354       return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
3355     if (exti == exte) {
3356       // The extract list is a prefix of the insert list. i.e. replace
3357       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3358       // %E = extractvalue { i32, { i32 } } %I, 1
3359       // with
3360       // %X = extractvalue { i32, { i32 } } %A, 1
3361       // %E = insertvalue { i32 } %X, i32 42, 0
3362       // by switching the order of the insert and extract (though the
3363       // insertvalue should be left in, since it may have other uses).
3364       Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
3365                                                 EV.getIndices());
3366       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
3367                                      ArrayRef(insi, inse));
3368     }
3369     if (insi == inse)
3370       // The insert list is a prefix of the extract list
3371       // We can simply remove the common indices from the extract and make it
3372       // operate on the inserted value instead of the insertvalue result.
3373       // i.e., replace
3374       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3375       // %E = extractvalue { i32, { i32 } } %I, 1, 0
3376       // with
3377       // %E extractvalue { i32 } { i32 42 }, 0
3378       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
3379                                       ArrayRef(exti, exte));
3380   }
3381 
3382   if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3383     return R;
3384 
3385   if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3386     // Bail out if the aggregate contains scalable vector type
3387     if (auto *STy = dyn_cast<StructType>(Agg->getType());
3388         STy && STy->containsScalableVectorType())
3389       return nullptr;
3390 
3391     // If the (non-volatile) load only has one use, we can rewrite this to a
3392     // load from a GEP. This reduces the size of the load. If a load is used
3393     // only by extractvalue instructions then this either must have been
3394     // optimized before, or it is a struct with padding, in which case we
3395     // don't want to do the transformation as it loses padding knowledge.
3396     if (L->isSimple() && L->hasOneUse()) {
3397       // extractvalue has integer indices, getelementptr has Value*s. Convert.
3398       SmallVector<Value*, 4> Indices;
3399       // Prefix an i32 0 since we need the first element.
3400       Indices.push_back(Builder.getInt32(0));
3401       for (unsigned Idx : EV.indices())
3402         Indices.push_back(Builder.getInt32(Idx));
3403 
3404       // We need to insert these at the location of the old load, not at that of
3405       // the extractvalue.
3406       Builder.SetInsertPoint(L);
3407       Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
3408                                              L->getPointerOperand(), Indices);
3409       Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
3410       // Whatever aliasing information we had for the orignal load must also
3411       // hold for the smaller load, so propagate the annotations.
3412       NL->setAAMetadata(L->getAAMetadata());
3413       // Returning the load directly will cause the main loop to insert it in
3414       // the wrong spot, so use replaceInstUsesWith().
3415       return replaceInstUsesWith(EV, NL);
3416     }
3417   }
3418 
3419   if (auto *PN = dyn_cast<PHINode>(Agg))
3420     if (Instruction *Res = foldOpIntoPhi(EV, PN))
3421       return Res;
3422 
3423   // We could simplify extracts from other values. Note that nested extracts may
3424   // already be simplified implicitly by the above: extract (extract (insert) )
3425   // will be translated into extract ( insert ( extract ) ) first and then just
3426   // the value inserted, if appropriate. Similarly for extracts from single-use
3427   // loads: extract (extract (load)) will be translated to extract (load (gep))
3428   // and if again single-use then via load (gep (gep)) to load (gep).
3429   // However, double extracts from e.g. function arguments or return values
3430   // aren't handled yet.
3431   return nullptr;
3432 }
3433 
3434 /// Return 'true' if the given typeinfo will match anything.
3435 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
3436   switch (Personality) {
3437   case EHPersonality::GNU_C:
3438   case EHPersonality::GNU_C_SjLj:
3439   case EHPersonality::Rust:
3440     // The GCC C EH and Rust personality only exists to support cleanups, so
3441     // it's not clear what the semantics of catch clauses are.
3442     return false;
3443   case EHPersonality::Unknown:
3444     return false;
3445   case EHPersonality::GNU_Ada:
3446     // While __gnat_all_others_value will match any Ada exception, it doesn't
3447     // match foreign exceptions (or didn't, before gcc-4.7).
3448     return false;
3449   case EHPersonality::GNU_CXX:
3450   case EHPersonality::GNU_CXX_SjLj:
3451   case EHPersonality::GNU_ObjC:
3452   case EHPersonality::MSVC_X86SEH:
3453   case EHPersonality::MSVC_TableSEH:
3454   case EHPersonality::MSVC_CXX:
3455   case EHPersonality::CoreCLR:
3456   case EHPersonality::Wasm_CXX:
3457   case EHPersonality::XL_CXX:
3458     return TypeInfo->isNullValue();
3459   }
3460   llvm_unreachable("invalid enum");
3461 }
3462 
3463 static bool shorter_filter(const Value *LHS, const Value *RHS) {
3464   return
3465     cast<ArrayType>(LHS->getType())->getNumElements()
3466   <
3467     cast<ArrayType>(RHS->getType())->getNumElements();
3468 }
3469 
3470 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
3471   // The logic here should be correct for any real-world personality function.
3472   // However if that turns out not to be true, the offending logic can always
3473   // be conditioned on the personality function, like the catch-all logic is.
3474   EHPersonality Personality =
3475       classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
3476 
3477   // Simplify the list of clauses, eg by removing repeated catch clauses
3478   // (these are often created by inlining).
3479   bool MakeNewInstruction = false; // If true, recreate using the following:
3480   SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
3481   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
3482 
3483   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
3484   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
3485     bool isLastClause = i + 1 == e;
3486     if (LI.isCatch(i)) {
3487       // A catch clause.
3488       Constant *CatchClause = LI.getClause(i);
3489       Constant *TypeInfo = CatchClause->stripPointerCasts();
3490 
3491       // If we already saw this clause, there is no point in having a second
3492       // copy of it.
3493       if (AlreadyCaught.insert(TypeInfo).second) {
3494         // This catch clause was not already seen.
3495         NewClauses.push_back(CatchClause);
3496       } else {
3497         // Repeated catch clause - drop the redundant copy.
3498         MakeNewInstruction = true;
3499       }
3500 
3501       // If this is a catch-all then there is no point in keeping any following
3502       // clauses or marking the landingpad as having a cleanup.
3503       if (isCatchAll(Personality, TypeInfo)) {
3504         if (!isLastClause)
3505           MakeNewInstruction = true;
3506         CleanupFlag = false;
3507         break;
3508       }
3509     } else {
3510       // A filter clause.  If any of the filter elements were already caught
3511       // then they can be dropped from the filter.  It is tempting to try to
3512       // exploit the filter further by saying that any typeinfo that does not
3513       // occur in the filter can't be caught later (and thus can be dropped).
3514       // However this would be wrong, since typeinfos can match without being
3515       // equal (for example if one represents a C++ class, and the other some
3516       // class derived from it).
3517       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
3518       Constant *FilterClause = LI.getClause(i);
3519       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
3520       unsigned NumTypeInfos = FilterType->getNumElements();
3521 
3522       // An empty filter catches everything, so there is no point in keeping any
3523       // following clauses or marking the landingpad as having a cleanup.  By
3524       // dealing with this case here the following code is made a bit simpler.
3525       if (!NumTypeInfos) {
3526         NewClauses.push_back(FilterClause);
3527         if (!isLastClause)
3528           MakeNewInstruction = true;
3529         CleanupFlag = false;
3530         break;
3531       }
3532 
3533       bool MakeNewFilter = false; // If true, make a new filter.
3534       SmallVector<Constant *, 16> NewFilterElts; // New elements.
3535       if (isa<ConstantAggregateZero>(FilterClause)) {
3536         // Not an empty filter - it contains at least one null typeinfo.
3537         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
3538         Constant *TypeInfo =
3539           Constant::getNullValue(FilterType->getElementType());
3540         // If this typeinfo is a catch-all then the filter can never match.
3541         if (isCatchAll(Personality, TypeInfo)) {
3542           // Throw the filter away.
3543           MakeNewInstruction = true;
3544           continue;
3545         }
3546 
3547         // There is no point in having multiple copies of this typeinfo, so
3548         // discard all but the first copy if there is more than one.
3549         NewFilterElts.push_back(TypeInfo);
3550         if (NumTypeInfos > 1)
3551           MakeNewFilter = true;
3552       } else {
3553         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
3554         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
3555         NewFilterElts.reserve(NumTypeInfos);
3556 
3557         // Remove any filter elements that were already caught or that already
3558         // occurred in the filter.  While there, see if any of the elements are
3559         // catch-alls.  If so, the filter can be discarded.
3560         bool SawCatchAll = false;
3561         for (unsigned j = 0; j != NumTypeInfos; ++j) {
3562           Constant *Elt = Filter->getOperand(j);
3563           Constant *TypeInfo = Elt->stripPointerCasts();
3564           if (isCatchAll(Personality, TypeInfo)) {
3565             // This element is a catch-all.  Bail out, noting this fact.
3566             SawCatchAll = true;
3567             break;
3568           }
3569 
3570           // Even if we've seen a type in a catch clause, we don't want to
3571           // remove it from the filter.  An unexpected type handler may be
3572           // set up for a call site which throws an exception of the same
3573           // type caught.  In order for the exception thrown by the unexpected
3574           // handler to propagate correctly, the filter must be correctly
3575           // described for the call site.
3576           //
3577           // Example:
3578           //
3579           // void unexpected() { throw 1;}
3580           // void foo() throw (int) {
3581           //   std::set_unexpected(unexpected);
3582           //   try {
3583           //     throw 2.0;
3584           //   } catch (int i) {}
3585           // }
3586 
3587           // There is no point in having multiple copies of the same typeinfo in
3588           // a filter, so only add it if we didn't already.
3589           if (SeenInFilter.insert(TypeInfo).second)
3590             NewFilterElts.push_back(cast<Constant>(Elt));
3591         }
3592         // A filter containing a catch-all cannot match anything by definition.
3593         if (SawCatchAll) {
3594           // Throw the filter away.
3595           MakeNewInstruction = true;
3596           continue;
3597         }
3598 
3599         // If we dropped something from the filter, make a new one.
3600         if (NewFilterElts.size() < NumTypeInfos)
3601           MakeNewFilter = true;
3602       }
3603       if (MakeNewFilter) {
3604         FilterType = ArrayType::get(FilterType->getElementType(),
3605                                     NewFilterElts.size());
3606         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
3607         MakeNewInstruction = true;
3608       }
3609 
3610       NewClauses.push_back(FilterClause);
3611 
3612       // If the new filter is empty then it will catch everything so there is
3613       // no point in keeping any following clauses or marking the landingpad
3614       // as having a cleanup.  The case of the original filter being empty was
3615       // already handled above.
3616       if (MakeNewFilter && !NewFilterElts.size()) {
3617         assert(MakeNewInstruction && "New filter but not a new instruction!");
3618         CleanupFlag = false;
3619         break;
3620       }
3621     }
3622   }
3623 
3624   // If several filters occur in a row then reorder them so that the shortest
3625   // filters come first (those with the smallest number of elements).  This is
3626   // advantageous because shorter filters are more likely to match, speeding up
3627   // unwinding, but mostly because it increases the effectiveness of the other
3628   // filter optimizations below.
3629   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
3630     unsigned j;
3631     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
3632     for (j = i; j != e; ++j)
3633       if (!isa<ArrayType>(NewClauses[j]->getType()))
3634         break;
3635 
3636     // Check whether the filters are already sorted by length.  We need to know
3637     // if sorting them is actually going to do anything so that we only make a
3638     // new landingpad instruction if it does.
3639     for (unsigned k = i; k + 1 < j; ++k)
3640       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
3641         // Not sorted, so sort the filters now.  Doing an unstable sort would be
3642         // correct too but reordering filters pointlessly might confuse users.
3643         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
3644                          shorter_filter);
3645         MakeNewInstruction = true;
3646         break;
3647       }
3648 
3649     // Look for the next batch of filters.
3650     i = j + 1;
3651   }
3652 
3653   // If typeinfos matched if and only if equal, then the elements of a filter L
3654   // that occurs later than a filter F could be replaced by the intersection of
3655   // the elements of F and L.  In reality two typeinfos can match without being
3656   // equal (for example if one represents a C++ class, and the other some class
3657   // derived from it) so it would be wrong to perform this transform in general.
3658   // However the transform is correct and useful if F is a subset of L.  In that
3659   // case L can be replaced by F, and thus removed altogether since repeating a
3660   // filter is pointless.  So here we look at all pairs of filters F and L where
3661   // L follows F in the list of clauses, and remove L if every element of F is
3662   // an element of L.  This can occur when inlining C++ functions with exception
3663   // specifications.
3664   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
3665     // Examine each filter in turn.
3666     Value *Filter = NewClauses[i];
3667     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
3668     if (!FTy)
3669       // Not a filter - skip it.
3670       continue;
3671     unsigned FElts = FTy->getNumElements();
3672     // Examine each filter following this one.  Doing this backwards means that
3673     // we don't have to worry about filters disappearing under us when removed.
3674     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
3675       Value *LFilter = NewClauses[j];
3676       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
3677       if (!LTy)
3678         // Not a filter - skip it.
3679         continue;
3680       // If Filter is a subset of LFilter, i.e. every element of Filter is also
3681       // an element of LFilter, then discard LFilter.
3682       SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
3683       // If Filter is empty then it is a subset of LFilter.
3684       if (!FElts) {
3685         // Discard LFilter.
3686         NewClauses.erase(J);
3687         MakeNewInstruction = true;
3688         // Move on to the next filter.
3689         continue;
3690       }
3691       unsigned LElts = LTy->getNumElements();
3692       // If Filter is longer than LFilter then it cannot be a subset of it.
3693       if (FElts > LElts)
3694         // Move on to the next filter.
3695         continue;
3696       // At this point we know that LFilter has at least one element.
3697       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
3698         // Filter is a subset of LFilter iff Filter contains only zeros (as we
3699         // already know that Filter is not longer than LFilter).
3700         if (isa<ConstantAggregateZero>(Filter)) {
3701           assert(FElts <= LElts && "Should have handled this case earlier!");
3702           // Discard LFilter.
3703           NewClauses.erase(J);
3704           MakeNewInstruction = true;
3705         }
3706         // Move on to the next filter.
3707         continue;
3708       }
3709       ConstantArray *LArray = cast<ConstantArray>(LFilter);
3710       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
3711         // Since Filter is non-empty and contains only zeros, it is a subset of
3712         // LFilter iff LFilter contains a zero.
3713         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
3714         for (unsigned l = 0; l != LElts; ++l)
3715           if (LArray->getOperand(l)->isNullValue()) {
3716             // LFilter contains a zero - discard it.
3717             NewClauses.erase(J);
3718             MakeNewInstruction = true;
3719             break;
3720           }
3721         // Move on to the next filter.
3722         continue;
3723       }
3724       // At this point we know that both filters are ConstantArrays.  Loop over
3725       // operands to see whether every element of Filter is also an element of
3726       // LFilter.  Since filters tend to be short this is probably faster than
3727       // using a method that scales nicely.
3728       ConstantArray *FArray = cast<ConstantArray>(Filter);
3729       bool AllFound = true;
3730       for (unsigned f = 0; f != FElts; ++f) {
3731         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
3732         AllFound = false;
3733         for (unsigned l = 0; l != LElts; ++l) {
3734           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
3735           if (LTypeInfo == FTypeInfo) {
3736             AllFound = true;
3737             break;
3738           }
3739         }
3740         if (!AllFound)
3741           break;
3742       }
3743       if (AllFound) {
3744         // Discard LFilter.
3745         NewClauses.erase(J);
3746         MakeNewInstruction = true;
3747       }
3748       // Move on to the next filter.
3749     }
3750   }
3751 
3752   // If we changed any of the clauses, replace the old landingpad instruction
3753   // with a new one.
3754   if (MakeNewInstruction) {
3755     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
3756                                                  NewClauses.size());
3757     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
3758       NLI->addClause(NewClauses[i]);
3759     // A landing pad with no clauses must have the cleanup flag set.  It is
3760     // theoretically possible, though highly unlikely, that we eliminated all
3761     // clauses.  If so, force the cleanup flag to true.
3762     if (NewClauses.empty())
3763       CleanupFlag = true;
3764     NLI->setCleanup(CleanupFlag);
3765     return NLI;
3766   }
3767 
3768   // Even if none of the clauses changed, we may nonetheless have understood
3769   // that the cleanup flag is pointless.  Clear it if so.
3770   if (LI.isCleanup() != CleanupFlag) {
3771     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
3772     LI.setCleanup(CleanupFlag);
3773     return &LI;
3774   }
3775 
3776   return nullptr;
3777 }
3778 
3779 Value *
3780 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {
3781   // Try to push freeze through instructions that propagate but don't produce
3782   // poison as far as possible.  If an operand of freeze follows three
3783   // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
3784   // guaranteed-non-poison operands then push the freeze through to the one
3785   // operand that is not guaranteed non-poison.  The actual transform is as
3786   // follows.
3787   //   Op1 = ...                        ; Op1 can be posion
3788   //   Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
3789   //                                    ; single guaranteed-non-poison operands
3790   //   ... = Freeze(Op0)
3791   // =>
3792   //   Op1 = ...
3793   //   Op1.fr = Freeze(Op1)
3794   //   ... = Inst(Op1.fr, NonPoisonOps...)
3795   auto *OrigOp = OrigFI.getOperand(0);
3796   auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3797 
3798   // While we could change the other users of OrigOp to use freeze(OrigOp), that
3799   // potentially reduces their optimization potential, so let's only do this iff
3800   // the OrigOp is only used by the freeze.
3801   if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3802     return nullptr;
3803 
3804   // We can't push the freeze through an instruction which can itself create
3805   // poison.  If the only source of new poison is flags, we can simply
3806   // strip them (since we know the only use is the freeze and nothing can
3807   // benefit from them.)
3808   if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
3809                              /*ConsiderFlagsAndMetadata*/ false))
3810     return nullptr;
3811 
3812   // If operand is guaranteed not to be poison, there is no need to add freeze
3813   // to the operand. So we first find the operand that is not guaranteed to be
3814   // poison.
3815   Use *MaybePoisonOperand = nullptr;
3816   for (Use &U : OrigOpInst->operands()) {
3817     if (isa<MetadataAsValue>(U.get()) ||
3818         isGuaranteedNotToBeUndefOrPoison(U.get()))
3819       continue;
3820     if (!MaybePoisonOperand)
3821       MaybePoisonOperand = &U;
3822     else
3823       return nullptr;
3824   }
3825 
3826   OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
3827 
3828   // If all operands are guaranteed to be non-poison, we can drop freeze.
3829   if (!MaybePoisonOperand)
3830     return OrigOp;
3831 
3832   Builder.SetInsertPoint(OrigOpInst);
3833   auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
3834       MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
3835 
3836   replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3837   return OrigOp;
3838 }
3839 
3840 Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
3841                                                         PHINode *PN) {
3842   // Detect whether this is a recurrence with a start value and some number of
3843   // backedge values. We'll check whether we can push the freeze through the
3844   // backedge values (possibly dropping poison flags along the way) until we
3845   // reach the phi again. In that case, we can move the freeze to the start
3846   // value.
3847   Use *StartU = nullptr;
3848   SmallVector<Value *> Worklist;
3849   for (Use &U : PN->incoming_values()) {
3850     if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
3851       // Add backedge value to worklist.
3852       Worklist.push_back(U.get());
3853       continue;
3854     }
3855 
3856     // Don't bother handling multiple start values.
3857     if (StartU)
3858       return nullptr;
3859     StartU = &U;
3860   }
3861 
3862   if (!StartU || Worklist.empty())
3863     return nullptr; // Not a recurrence.
3864 
3865   Value *StartV = StartU->get();
3866   BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
3867   bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
3868   // We can't insert freeze if the start value is the result of the
3869   // terminator (e.g. an invoke).
3870   if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
3871     return nullptr;
3872 
3873   SmallPtrSet<Value *, 32> Visited;
3874   SmallVector<Instruction *> DropFlags;
3875   while (!Worklist.empty()) {
3876     Value *V = Worklist.pop_back_val();
3877     if (!Visited.insert(V).second)
3878       continue;
3879 
3880     if (Visited.size() > 32)
3881       return nullptr; // Limit the total number of values we inspect.
3882 
3883     // Assume that PN is non-poison, because it will be after the transform.
3884     if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
3885       continue;
3886 
3887     Instruction *I = dyn_cast<Instruction>(V);
3888     if (!I || canCreateUndefOrPoison(cast<Operator>(I),
3889                                      /*ConsiderFlagsAndMetadata*/ false))
3890       return nullptr;
3891 
3892     DropFlags.push_back(I);
3893     append_range(Worklist, I->operands());
3894   }
3895 
3896   for (Instruction *I : DropFlags)
3897     I->dropPoisonGeneratingFlagsAndMetadata();
3898 
3899   if (StartNeedsFreeze) {
3900     Builder.SetInsertPoint(StartBB->getTerminator());
3901     Value *FrozenStartV = Builder.CreateFreeze(StartV,
3902                                                StartV->getName() + ".fr");
3903     replaceUse(*StartU, FrozenStartV);
3904   }
3905   return replaceInstUsesWith(FI, PN);
3906 }
3907 
3908 bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
3909   Value *Op = FI.getOperand(0);
3910 
3911   if (isa<Constant>(Op) || Op->hasOneUse())
3912     return false;
3913 
3914   // Move the freeze directly after the definition of its operand, so that
3915   // it dominates the maximum number of uses. Note that it may not dominate
3916   // *all* uses if the operand is an invoke/callbr and the use is in a phi on
3917   // the normal/default destination. This is why the domination check in the
3918   // replacement below is still necessary.
3919   BasicBlock::iterator MoveBefore;
3920   if (isa<Argument>(Op)) {
3921     MoveBefore =
3922         FI.getFunction()->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
3923   } else {
3924     auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
3925     if (!MoveBeforeOpt)
3926       return false;
3927     MoveBefore = *MoveBeforeOpt;
3928   }
3929 
3930   // Don't move to the position of a debug intrinsic.
3931   if (isa<DbgInfoIntrinsic>(MoveBefore))
3932     MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
3933   // Re-point iterator to come after any debug-info records, if we're
3934   // running in "RemoveDIs" mode
3935   MoveBefore.setHeadBit(false);
3936 
3937   bool Changed = false;
3938   if (&FI != &*MoveBefore) {
3939     FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
3940     Changed = true;
3941   }
3942 
3943   Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
3944     bool Dominates = DT.dominates(&FI, U);
3945     Changed |= Dominates;
3946     return Dominates;
3947   });
3948 
3949   return Changed;
3950 }
3951 
3952 // Check if any direct or bitcast user of this value is a shuffle instruction.
3953 static bool isUsedWithinShuffleVector(Value *V) {
3954   for (auto *U : V->users()) {
3955     if (isa<ShuffleVectorInst>(U))
3956       return true;
3957     else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
3958       return true;
3959   }
3960   return false;
3961 }
3962 
3963 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
3964   Value *Op0 = I.getOperand(0);
3965 
3966   if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
3967     return replaceInstUsesWith(I, V);
3968 
3969   // freeze (phi const, x) --> phi const, (freeze x)
3970   if (auto *PN = dyn_cast<PHINode>(Op0)) {
3971     if (Instruction *NV = foldOpIntoPhi(I, PN))
3972       return NV;
3973     if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
3974       return NV;
3975   }
3976 
3977   if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I))
3978     return replaceInstUsesWith(I, NI);
3979 
3980   // If I is freeze(undef), check its uses and fold it to a fixed constant.
3981   // - or: pick -1
3982   // - select's condition: if the true value is constant, choose it by making
3983   //                       the condition true.
3984   // - default: pick 0
3985   //
3986   // Note that this transform is intentionally done here rather than
3987   // via an analysis in InstSimplify or at individual user sites. That is
3988   // because we must produce the same value for all uses of the freeze -
3989   // it's the reason "freeze" exists!
3990   //
3991   // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
3992   //       duplicating logic for binops at least.
3993   auto getUndefReplacement = [&I](Type *Ty) {
3994     Constant *BestValue = nullptr;
3995     Constant *NullValue = Constant::getNullValue(Ty);
3996     for (const auto *U : I.users()) {
3997       Constant *C = NullValue;
3998       if (match(U, m_Or(m_Value(), m_Value())))
3999         C = ConstantInt::getAllOnesValue(Ty);
4000       else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4001         C = ConstantInt::getTrue(Ty);
4002 
4003       if (!BestValue)
4004         BestValue = C;
4005       else if (BestValue != C)
4006         BestValue = NullValue;
4007     }
4008     assert(BestValue && "Must have at least one use");
4009     return BestValue;
4010   };
4011 
4012   if (match(Op0, m_Undef())) {
4013     // Don't fold freeze(undef/poison) if it's used as a vector operand in
4014     // a shuffle. This may improve codegen for shuffles that allow
4015     // unspecified inputs.
4016     if (isUsedWithinShuffleVector(&I))
4017       return nullptr;
4018     return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4019   }
4020 
4021   Constant *C;
4022   if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4023     Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4024     return replaceInstUsesWith(I, Constant::replaceUndefsWith(C, ReplaceC));
4025   }
4026 
4027   // Replace uses of Op with freeze(Op).
4028   if (freezeOtherUses(I))
4029     return &I;
4030 
4031   return nullptr;
4032 }
4033 
4034 /// Check for case where the call writes to an otherwise dead alloca.  This
4035 /// shows up for unused out-params in idiomatic C/C++ code.   Note that this
4036 /// helper *only* analyzes the write; doesn't check any other legality aspect.
4037 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
4038   auto *CB = dyn_cast<CallBase>(I);
4039   if (!CB)
4040     // TODO: handle e.g. store to alloca here - only worth doing if we extend
4041     // to allow reload along used path as described below.  Otherwise, this
4042     // is simply a store to a dead allocation which will be removed.
4043     return false;
4044   std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4045   if (!Dest)
4046     return false;
4047   auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4048   if (!AI)
4049     // TODO: allow malloc?
4050     return false;
4051   // TODO: allow memory access dominated by move point?  Note that since AI
4052   // could have a reference to itself captured by the call, we would need to
4053   // account for cycles in doing so.
4054   SmallVector<const User *> AllocaUsers;
4055   SmallPtrSet<const User *, 4> Visited;
4056   auto pushUsers = [&](const Instruction &I) {
4057     for (const User *U : I.users()) {
4058       if (Visited.insert(U).second)
4059         AllocaUsers.push_back(U);
4060     }
4061   };
4062   pushUsers(*AI);
4063   while (!AllocaUsers.empty()) {
4064     auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4065     if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
4066         isa<AddrSpaceCastInst>(UserI)) {
4067       pushUsers(*UserI);
4068       continue;
4069     }
4070     if (UserI == CB)
4071       continue;
4072     // TODO: support lifetime.start/end here
4073     return false;
4074   }
4075   return true;
4076 }
4077 
4078 /// Try to move the specified instruction from its current block into the
4079 /// beginning of DestBlock, which can only happen if it's safe to move the
4080 /// instruction past all of the instructions between it and the end of its
4081 /// block.
4082 bool InstCombinerImpl::tryToSinkInstruction(Instruction *I,
4083                                             BasicBlock *DestBlock) {
4084   BasicBlock *SrcBlock = I->getParent();
4085 
4086   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4087   if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4088       I->isTerminator())
4089     return false;
4090 
4091   // Do not sink static or dynamic alloca instructions. Static allocas must
4092   // remain in the entry block, and dynamic allocas must not be sunk in between
4093   // a stacksave / stackrestore pair, which would incorrectly shorten its
4094   // lifetime.
4095   if (isa<AllocaInst>(I))
4096     return false;
4097 
4098   // Do not sink into catchswitch blocks.
4099   if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4100     return false;
4101 
4102   // Do not sink convergent call instructions.
4103   if (auto *CI = dyn_cast<CallInst>(I)) {
4104     if (CI->isConvergent())
4105       return false;
4106   }
4107 
4108   // Unless we can prove that the memory write isn't visibile except on the
4109   // path we're sinking to, we must bail.
4110   if (I->mayWriteToMemory()) {
4111     if (!SoleWriteToDeadLocal(I, TLI))
4112       return false;
4113   }
4114 
4115   // We can only sink load instructions if there is nothing between the load and
4116   // the end of block that could change the value.
4117   if (I->mayReadFromMemory()) {
4118     // We don't want to do any sophisticated alias analysis, so we only check
4119     // the instructions after I in I's parent block if we try to sink to its
4120     // successor block.
4121     if (DestBlock->getUniquePredecessor() != I->getParent())
4122       return false;
4123     for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4124                               E = I->getParent()->end();
4125          Scan != E; ++Scan)
4126       if (Scan->mayWriteToMemory())
4127         return false;
4128   }
4129 
4130   I->dropDroppableUses([&](const Use *U) {
4131     auto *I = dyn_cast<Instruction>(U->getUser());
4132     if (I && I->getParent() != DestBlock) {
4133       Worklist.add(I);
4134       return true;
4135     }
4136     return false;
4137   });
4138   /// FIXME: We could remove droppable uses that are not dominated by
4139   /// the new position.
4140 
4141   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4142   I->moveBefore(*DestBlock, InsertPos);
4143   ++NumSunkInst;
4144 
4145   // Also sink all related debug uses from the source basic block. Otherwise we
4146   // get debug use before the def. Attempt to salvage debug uses first, to
4147   // maximise the range variables have location for. If we cannot salvage, then
4148   // mark the location undef: we know it was supposed to receive a new location
4149   // here, but that computation has been sunk.
4150   SmallVector<DbgVariableIntrinsic *, 2> DbgUsers;
4151   findDbgUsers(DbgUsers, I);
4152 
4153   // For all debug values in the destination block, the sunk instruction
4154   // will still be available, so they do not need to be dropped.
4155   SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSalvage;
4156   SmallVector<DPValue *, 2> DPValuesToSalvage;
4157   for (auto &DbgUser : DbgUsers)
4158     if (DbgUser->getParent() != DestBlock)
4159       DbgUsersToSalvage.push_back(DbgUser);
4160 
4161   // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4162   // to clone the last appearing debug intrinsic for each given variable.
4163   SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink;
4164   for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4165     if (DVI->getParent() == SrcBlock)
4166       DbgUsersToSink.push_back(DVI);
4167   llvm::sort(DbgUsersToSink,
4168              [](auto *A, auto *B) { return B->comesBefore(A); });
4169 
4170   SmallVector<DbgVariableIntrinsic *, 2> DIIClones;
4171   SmallSet<DebugVariable, 4> SunkVariables;
4172   for (auto *User : DbgUsersToSink) {
4173     // A dbg.declare instruction should not be cloned, since there can only be
4174     // one per variable fragment. It should be left in the original place
4175     // because the sunk instruction is not an alloca (otherwise we could not be
4176     // here).
4177     if (isa<DbgDeclareInst>(User))
4178       continue;
4179 
4180     DebugVariable DbgUserVariable =
4181         DebugVariable(User->getVariable(), User->getExpression(),
4182                       User->getDebugLoc()->getInlinedAt());
4183 
4184     if (!SunkVariables.insert(DbgUserVariable).second)
4185       continue;
4186 
4187     // Leave dbg.assign intrinsics in their original positions and there should
4188     // be no need to insert a clone.
4189     if (isa<DbgAssignIntrinsic>(User))
4190       continue;
4191 
4192     DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4193     if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4194       DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4195     LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4196   }
4197 
4198   // Perform salvaging without the clones, then sink the clones.
4199   if (!DIIClones.empty()) {
4200     // RemoveDIs: pass in empty vector of DPValues until we get to instrumenting
4201     // this pass.
4202     SmallVector<DPValue *, 1> DummyDPValues;
4203     salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, DummyDPValues);
4204     // The clones are in reverse order of original appearance, reverse again to
4205     // maintain the original order.
4206     for (auto &DIIClone : llvm::reverse(DIIClones)) {
4207       DIIClone->insertBefore(&*InsertPos);
4208       LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4209     }
4210   }
4211 
4212   return true;
4213 }
4214 
4215 bool InstCombinerImpl::run() {
4216   while (!Worklist.isEmpty()) {
4217     // Walk deferred instructions in reverse order, and push them to the
4218     // worklist, which means they'll end up popped from the worklist in-order.
4219     while (Instruction *I = Worklist.popDeferred()) {
4220       // Check to see if we can DCE the instruction. We do this already here to
4221       // reduce the number of uses and thus allow other folds to trigger.
4222       // Note that eraseInstFromFunction() may push additional instructions on
4223       // the deferred worklist, so this will DCE whole instruction chains.
4224       if (isInstructionTriviallyDead(I, &TLI)) {
4225         eraseInstFromFunction(*I);
4226         ++NumDeadInst;
4227         continue;
4228       }
4229 
4230       Worklist.push(I);
4231     }
4232 
4233     Instruction *I = Worklist.removeOne();
4234     if (I == nullptr) continue;  // skip null values.
4235 
4236     // Check to see if we can DCE the instruction.
4237     if (isInstructionTriviallyDead(I, &TLI)) {
4238       eraseInstFromFunction(*I);
4239       ++NumDeadInst;
4240       continue;
4241     }
4242 
4243     if (!DebugCounter::shouldExecute(VisitCounter))
4244       continue;
4245 
4246     // See if we can trivially sink this instruction to its user if we can
4247     // prove that the successor is not executed more frequently than our block.
4248     // Return the UserBlock if successful.
4249     auto getOptionalSinkBlockForInst =
4250         [this](Instruction *I) -> std::optional<BasicBlock *> {
4251       if (!EnableCodeSinking)
4252         return std::nullopt;
4253 
4254       BasicBlock *BB = I->getParent();
4255       BasicBlock *UserParent = nullptr;
4256       unsigned NumUsers = 0;
4257 
4258       for (auto *U : I->users()) {
4259         if (U->isDroppable())
4260           continue;
4261         if (NumUsers > MaxSinkNumUsers)
4262           return std::nullopt;
4263 
4264         Instruction *UserInst = cast<Instruction>(U);
4265         // Special handling for Phi nodes - get the block the use occurs in.
4266         if (PHINode *PN = dyn_cast<PHINode>(UserInst)) {
4267           for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
4268             if (PN->getIncomingValue(i) == I) {
4269               // Bail out if we have uses in different blocks. We don't do any
4270               // sophisticated analysis (i.e finding NearestCommonDominator of
4271               // these use blocks).
4272               if (UserParent && UserParent != PN->getIncomingBlock(i))
4273                 return std::nullopt;
4274               UserParent = PN->getIncomingBlock(i);
4275             }
4276           }
4277           assert(UserParent && "expected to find user block!");
4278         } else {
4279           if (UserParent && UserParent != UserInst->getParent())
4280             return std::nullopt;
4281           UserParent = UserInst->getParent();
4282         }
4283 
4284         // Make sure these checks are done only once, naturally we do the checks
4285         // the first time we get the userparent, this will save compile time.
4286         if (NumUsers == 0) {
4287           // Try sinking to another block. If that block is unreachable, then do
4288           // not bother. SimplifyCFG should handle it.
4289           if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
4290             return std::nullopt;
4291 
4292           auto *Term = UserParent->getTerminator();
4293           // See if the user is one of our successors that has only one
4294           // predecessor, so that we don't have to split the critical edge.
4295           // Another option where we can sink is a block that ends with a
4296           // terminator that does not pass control to other block (such as
4297           // return or unreachable or resume). In this case:
4298           //   - I dominates the User (by SSA form);
4299           //   - the User will be executed at most once.
4300           // So sinking I down to User is always profitable or neutral.
4301           if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
4302             return std::nullopt;
4303 
4304           assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
4305         }
4306 
4307         NumUsers++;
4308       }
4309 
4310       // No user or only has droppable users.
4311       if (!UserParent)
4312         return std::nullopt;
4313 
4314       return UserParent;
4315     };
4316 
4317     auto OptBB = getOptionalSinkBlockForInst(I);
4318     if (OptBB) {
4319       auto *UserParent = *OptBB;
4320       // Okay, the CFG is simple enough, try to sink this instruction.
4321       if (tryToSinkInstruction(I, UserParent)) {
4322         LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
4323         MadeIRChange = true;
4324         // We'll add uses of the sunk instruction below, but since
4325         // sinking can expose opportunities for it's *operands* add
4326         // them to the worklist
4327         for (Use &U : I->operands())
4328           if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
4329             Worklist.push(OpI);
4330       }
4331     }
4332 
4333     // Now that we have an instruction, try combining it to simplify it.
4334     Builder.SetInsertPoint(I);
4335     Builder.CollectMetadataToCopy(
4336         I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4337 
4338 #ifndef NDEBUG
4339     std::string OrigI;
4340 #endif
4341     LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
4342     LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
4343 
4344     if (Instruction *Result = visit(*I)) {
4345       ++NumCombined;
4346       // Should we replace the old instruction with a new one?
4347       if (Result != I) {
4348         LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
4349                           << "    New = " << *Result << '\n');
4350 
4351         Result->copyMetadata(*I,
4352                              {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4353         // Everything uses the new instruction now.
4354         I->replaceAllUsesWith(Result);
4355 
4356         // Move the name to the new instruction first.
4357         Result->takeName(I);
4358 
4359         // Insert the new instruction into the basic block...
4360         BasicBlock *InstParent = I->getParent();
4361         BasicBlock::iterator InsertPos = I->getIterator();
4362 
4363         // Are we replace a PHI with something that isn't a PHI, or vice versa?
4364         if (isa<PHINode>(Result) != isa<PHINode>(I)) {
4365           // We need to fix up the insertion point.
4366           if (isa<PHINode>(I)) // PHI -> Non-PHI
4367             InsertPos = InstParent->getFirstInsertionPt();
4368           else // Non-PHI -> PHI
4369             InsertPos = InstParent->getFirstNonPHI()->getIterator();
4370         }
4371 
4372         Result->insertInto(InstParent, InsertPos);
4373 
4374         // Push the new instruction and any users onto the worklist.
4375         Worklist.pushUsersToWorkList(*Result);
4376         Worklist.push(Result);
4377 
4378         eraseInstFromFunction(*I);
4379       } else {
4380         LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
4381                           << "    New = " << *I << '\n');
4382 
4383         // If the instruction was modified, it's possible that it is now dead.
4384         // if so, remove it.
4385         if (isInstructionTriviallyDead(I, &TLI)) {
4386           eraseInstFromFunction(*I);
4387         } else {
4388           Worklist.pushUsersToWorkList(*I);
4389           Worklist.push(I);
4390         }
4391       }
4392       MadeIRChange = true;
4393     }
4394   }
4395 
4396   Worklist.zap();
4397   return MadeIRChange;
4398 }
4399 
4400 // Track the scopes used by !alias.scope and !noalias. In a function, a
4401 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used
4402 // by both sets. If not, the declaration of the scope can be safely omitted.
4403 // The MDNode of the scope can be omitted as well for the instructions that are
4404 // part of this function. We do not do that at this point, as this might become
4405 // too time consuming to do.
4406 class AliasScopeTracker {
4407   SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
4408   SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
4409 
4410 public:
4411   void analyse(Instruction *I) {
4412     // This seems to be faster than checking 'mayReadOrWriteMemory()'.
4413     if (!I->hasMetadataOtherThanDebugLoc())
4414       return;
4415 
4416     auto Track = [](Metadata *ScopeList, auto &Container) {
4417       const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
4418       if (!MDScopeList || !Container.insert(MDScopeList).second)
4419         return;
4420       for (const auto &MDOperand : MDScopeList->operands())
4421         if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
4422           Container.insert(MDScope);
4423     };
4424 
4425     Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
4426     Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
4427   }
4428 
4429   bool isNoAliasScopeDeclDead(Instruction *Inst) {
4430     NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
4431     if (!Decl)
4432       return false;
4433 
4434     assert(Decl->use_empty() &&
4435            "llvm.experimental.noalias.scope.decl in use ?");
4436     const MDNode *MDSL = Decl->getScopeList();
4437     assert(MDSL->getNumOperands() == 1 &&
4438            "llvm.experimental.noalias.scope should refer to a single scope");
4439     auto &MDOperand = MDSL->getOperand(0);
4440     if (auto *MD = dyn_cast<MDNode>(MDOperand))
4441       return !UsedAliasScopesAndLists.contains(MD) ||
4442              !UsedNoAliasScopesAndLists.contains(MD);
4443 
4444     // Not an MDNode ? throw away.
4445     return true;
4446   }
4447 };
4448 
4449 /// Populate the IC worklist from a function, by walking it in reverse
4450 /// post-order and adding all reachable code to the worklist.
4451 ///
4452 /// This has a couple of tricks to make the code faster and more powerful.  In
4453 /// particular, we constant fold and DCE instructions as we go, to avoid adding
4454 /// them to the worklist (this significantly speeds up instcombine on code where
4455 /// many instructions are dead or constant).  Additionally, if we find a branch
4456 /// whose condition is a known constant, we only visit the reachable successors.
4457 bool InstCombinerImpl::prepareWorklist(
4458     Function &F, ReversePostOrderTraversal<BasicBlock *> &RPOT) {
4459   bool MadeIRChange = false;
4460   SmallPtrSet<BasicBlock *, 32> LiveBlocks;
4461   SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
4462   DenseMap<Constant *, Constant *> FoldedConstants;
4463   AliasScopeTracker SeenAliasScopes;
4464 
4465   auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
4466     for (BasicBlock *Succ : successors(BB))
4467       if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
4468         for (PHINode &PN : Succ->phis())
4469           for (Use &U : PN.incoming_values())
4470             if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
4471               U.set(PoisonValue::get(PN.getType()));
4472               MadeIRChange = true;
4473             }
4474   };
4475 
4476   for (BasicBlock *BB : RPOT) {
4477     if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
4478           return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4479         })) {
4480       HandleOnlyLiveSuccessor(BB, nullptr);
4481       continue;
4482     }
4483     LiveBlocks.insert(BB);
4484 
4485     for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
4486       // ConstantProp instruction if trivially constant.
4487       if (!Inst.use_empty() &&
4488           (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
4489         if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
4490           LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
4491                             << '\n');
4492           Inst.replaceAllUsesWith(C);
4493           ++NumConstProp;
4494           if (isInstructionTriviallyDead(&Inst, &TLI))
4495             Inst.eraseFromParent();
4496           MadeIRChange = true;
4497           continue;
4498         }
4499 
4500       // See if we can constant fold its operands.
4501       for (Use &U : Inst.operands()) {
4502         if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
4503           continue;
4504 
4505         auto *C = cast<Constant>(U);
4506         Constant *&FoldRes = FoldedConstants[C];
4507         if (!FoldRes)
4508           FoldRes = ConstantFoldConstant(C, DL, &TLI);
4509 
4510         if (FoldRes != C) {
4511           LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
4512                             << "\n    Old = " << *C
4513                             << "\n    New = " << *FoldRes << '\n');
4514           U = FoldRes;
4515           MadeIRChange = true;
4516         }
4517       }
4518 
4519       // Skip processing debug and pseudo intrinsics in InstCombine. Processing
4520       // these call instructions consumes non-trivial amount of time and
4521       // provides no value for the optimization.
4522       if (!Inst.isDebugOrPseudoInst()) {
4523         InstrsForInstructionWorklist.push_back(&Inst);
4524         SeenAliasScopes.analyse(&Inst);
4525       }
4526     }
4527 
4528     // If this is a branch or switch on a constant, mark only the single
4529     // live successor. Otherwise assume all successors are live.
4530     Instruction *TI = BB->getTerminator();
4531     if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
4532       if (isa<UndefValue>(BI->getCondition())) {
4533         // Branch on undef is UB.
4534         HandleOnlyLiveSuccessor(BB, nullptr);
4535         continue;
4536       }
4537       if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
4538         bool CondVal = Cond->getZExtValue();
4539         HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
4540         continue;
4541       }
4542     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
4543       if (isa<UndefValue>(SI->getCondition())) {
4544         // Switch on undef is UB.
4545         HandleOnlyLiveSuccessor(BB, nullptr);
4546         continue;
4547       }
4548       if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
4549         HandleOnlyLiveSuccessor(BB,
4550                                 SI->findCaseValue(Cond)->getCaseSuccessor());
4551         continue;
4552       }
4553     }
4554   }
4555 
4556   // Remove instructions inside unreachable blocks. This prevents the
4557   // instcombine code from having to deal with some bad special cases, and
4558   // reduces use counts of instructions.
4559   for (BasicBlock &BB : F) {
4560     if (LiveBlocks.count(&BB))
4561       continue;
4562 
4563     unsigned NumDeadInstInBB;
4564     unsigned NumDeadDbgInstInBB;
4565     std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
4566         removeAllNonTerminatorAndEHPadInstructions(&BB);
4567 
4568     MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
4569     NumDeadInst += NumDeadInstInBB;
4570   }
4571 
4572   // Once we've found all of the instructions to add to instcombine's worklist,
4573   // add them in reverse order.  This way instcombine will visit from the top
4574   // of the function down.  This jives well with the way that it adds all uses
4575   // of instructions to the worklist after doing a transformation, thus avoiding
4576   // some N^2 behavior in pathological cases.
4577   Worklist.reserve(InstrsForInstructionWorklist.size());
4578   for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
4579     // DCE instruction if trivially dead. As we iterate in reverse program
4580     // order here, we will clean up whole chains of dead instructions.
4581     if (isInstructionTriviallyDead(Inst, &TLI) ||
4582         SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
4583       ++NumDeadInst;
4584       LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
4585       salvageDebugInfo(*Inst);
4586       Inst->eraseFromParent();
4587       MadeIRChange = true;
4588       continue;
4589     }
4590 
4591     Worklist.push(Inst);
4592   }
4593 
4594   return MadeIRChange;
4595 }
4596 
4597 static bool combineInstructionsOverFunction(
4598     Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
4599     AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
4600     DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
4601     ProfileSummaryInfo *PSI, LoopInfo *LI, const InstCombineOptions &Opts) {
4602   auto &DL = F.getParent()->getDataLayout();
4603 
4604   /// Builder - This is an IRBuilder that automatically inserts new
4605   /// instructions into the worklist when they are created.
4606   IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
4607       F.getContext(), TargetFolder(DL),
4608       IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
4609         Worklist.add(I);
4610         if (auto *Assume = dyn_cast<AssumeInst>(I))
4611           AC.registerAssumption(Assume);
4612       }));
4613 
4614   ReversePostOrderTraversal<BasicBlock *> RPOT(&F.front());
4615 
4616   // Lower dbg.declare intrinsics otherwise their value may be clobbered
4617   // by instcombiner.
4618   bool MadeIRChange = false;
4619   if (ShouldLowerDbgDeclare)
4620     MadeIRChange = LowerDbgDeclare(F);
4621 
4622   // Iterate while there is work to do.
4623   unsigned Iteration = 0;
4624   while (true) {
4625     ++Iteration;
4626 
4627     if (Iteration > Opts.MaxIterations && !Opts.VerifyFixpoint) {
4628       LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
4629                         << " on " << F.getName()
4630                         << " reached; stopping without verifying fixpoint\n");
4631       break;
4632     }
4633 
4634     ++NumWorklistIterations;
4635     LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
4636                       << F.getName() << "\n");
4637 
4638     InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
4639                         ORE, BFI, PSI, DL, LI);
4640     IC.MaxArraySizeForCombine = MaxArraySize;
4641     bool MadeChangeInThisIteration = IC.prepareWorklist(F, RPOT);
4642     MadeChangeInThisIteration |= IC.run();
4643     if (!MadeChangeInThisIteration)
4644       break;
4645 
4646     MadeIRChange = true;
4647     if (Iteration > Opts.MaxIterations) {
4648       report_fatal_error(
4649           "Instruction Combining did not reach a fixpoint after " +
4650           Twine(Opts.MaxIterations) + " iterations");
4651     }
4652   }
4653 
4654   if (Iteration == 1)
4655     ++NumOneIteration;
4656   else if (Iteration == 2)
4657     ++NumTwoIterations;
4658   else if (Iteration == 3)
4659     ++NumThreeIterations;
4660   else
4661     ++NumFourOrMoreIterations;
4662 
4663   return MadeIRChange;
4664 }
4665 
4666 InstCombinePass::InstCombinePass(InstCombineOptions Opts) : Options(Opts) {}
4667 
4668 void InstCombinePass::printPipeline(
4669     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
4670   static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
4671       OS, MapClassName2PassName);
4672   OS << '<';
4673   OS << "max-iterations=" << Options.MaxIterations << ";";
4674   OS << (Options.UseLoopInfo ? "" : "no-") << "use-loop-info;";
4675   OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
4676   OS << '>';
4677 }
4678 
4679 PreservedAnalyses InstCombinePass::run(Function &F,
4680                                        FunctionAnalysisManager &AM) {
4681   auto &AC = AM.getResult<AssumptionAnalysis>(F);
4682   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
4683   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
4684   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4685   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
4686 
4687   // TODO: Only use LoopInfo when the option is set. This requires that the
4688   //       callers in the pass pipeline explicitly set the option.
4689   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
4690   if (!LI && Options.UseLoopInfo)
4691     LI = &AM.getResult<LoopAnalysis>(F);
4692 
4693   auto *AA = &AM.getResult<AAManager>(F);
4694   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
4695   ProfileSummaryInfo *PSI =
4696       MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
4697   auto *BFI = (PSI && PSI->hasProfileSummary()) ?
4698       &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
4699 
4700   if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4701                                        BFI, PSI, LI, Options))
4702     // No changes, all analyses are preserved.
4703     return PreservedAnalyses::all();
4704 
4705   // Mark all the analyses that instcombine updates as preserved.
4706   PreservedAnalyses PA;
4707   PA.preserveSet<CFGAnalyses>();
4708   return PA;
4709 }
4710 
4711 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
4712   AU.setPreservesCFG();
4713   AU.addRequired<AAResultsWrapperPass>();
4714   AU.addRequired<AssumptionCacheTracker>();
4715   AU.addRequired<TargetLibraryInfoWrapperPass>();
4716   AU.addRequired<TargetTransformInfoWrapperPass>();
4717   AU.addRequired<DominatorTreeWrapperPass>();
4718   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
4719   AU.addPreserved<DominatorTreeWrapperPass>();
4720   AU.addPreserved<AAResultsWrapperPass>();
4721   AU.addPreserved<BasicAAWrapperPass>();
4722   AU.addPreserved<GlobalsAAWrapperPass>();
4723   AU.addRequired<ProfileSummaryInfoWrapperPass>();
4724   LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
4725 }
4726 
4727 bool InstructionCombiningPass::runOnFunction(Function &F) {
4728   if (skipFunction(F))
4729     return false;
4730 
4731   // Required analyses.
4732   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4733   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
4734   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4735   auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
4736   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4737   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4738 
4739   // Optional analyses.
4740   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4741   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
4742   ProfileSummaryInfo *PSI =
4743       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4744   BlockFrequencyInfo *BFI =
4745       (PSI && PSI->hasProfileSummary()) ?
4746       &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4747       nullptr;
4748 
4749   return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4750                                          BFI, PSI, LI, InstCombineOptions());
4751 }
4752 
4753 char InstructionCombiningPass::ID = 0;
4754 
4755 InstructionCombiningPass::InstructionCombiningPass() : FunctionPass(ID) {
4756   initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
4757 }
4758 
4759 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
4760                       "Combine redundant instructions", false, false)
4761 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4762 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
4763 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4764 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4765 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4766 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
4767 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
4768 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
4769 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
4770 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
4771                     "Combine redundant instructions", false, false)
4772 
4773 // Initialization Routines
4774 void llvm::initializeInstCombine(PassRegistry &Registry) {
4775   initializeInstructionCombiningPassPass(Registry);
4776 }
4777 
4778 FunctionPass *llvm::createInstructionCombiningPass() {
4779   return new InstructionCombiningPass();
4780 }
4781