xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/FloatingPointMode.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumeBundleQueries.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/Loads.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/IntrinsicsAArch64.h"
49 #include "llvm/IR/IntrinsicsAMDGPU.h"
50 #include "llvm/IR/IntrinsicsARM.h"
51 #include "llvm/IR/IntrinsicsHexagon.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/IR/Statepoint.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/IR/ValueHandle.h"
60 #include "llvm/Support/AtomicOrdering.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Compiler.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/KnownBits.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
70 #include "llvm/Transforms/InstCombine/InstCombiner.h"
71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
72 #include "llvm/Transforms/Utils/Local.h"
73 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstdint>
77 #include <cstring>
78 #include <utility>
79 #include <vector>
80 
81 using namespace llvm;
82 using namespace PatternMatch;
83 
84 #define DEBUG_TYPE "instcombine"
85 
86 STATISTIC(NumSimplified, "Number of library calls simplified");
87 
88 static cl::opt<unsigned> GuardWideningWindow(
89     "instcombine-guard-widening-window",
90     cl::init(3),
91     cl::desc("How wide an instruction window to bypass looking for "
92              "another guard"));
93 
94 namespace llvm {
95 /// enable preservation of attributes in assume like:
96 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
97 extern cl::opt<bool> EnableKnowledgeRetention;
98 } // namespace llvm
99 
100 /// Return the specified type promoted as it would be to pass though a va_arg
101 /// area.
getPromotedType(Type * Ty)102 static Type *getPromotedType(Type *Ty) {
103   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
104     if (ITy->getBitWidth() < 32)
105       return Type::getInt32Ty(Ty->getContext());
106   }
107   return Ty;
108 }
109 
SimplifyAnyMemTransfer(AnyMemTransferInst * MI)110 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
111   Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
112   MaybeAlign CopyDstAlign = MI->getDestAlign();
113   if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
114     MI->setDestAlignment(DstAlign);
115     return MI;
116   }
117 
118   Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
119   MaybeAlign CopySrcAlign = MI->getSourceAlign();
120   if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
121     MI->setSourceAlignment(SrcAlign);
122     return MI;
123   }
124 
125   // If we have a store to a location which is known constant, we can conclude
126   // that the store must be storing the constant value (else the memory
127   // wouldn't be constant), and this must be a noop.
128   if (AA->pointsToConstantMemory(MI->getDest())) {
129     // Set the size of the copy to 0, it will be deleted on the next iteration.
130     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
131     return MI;
132   }
133 
134   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
135   // load/store.
136   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
137   if (!MemOpLength) return nullptr;
138 
139   // Source and destination pointer types are always "i8*" for intrinsic.  See
140   // if the size is something we can handle with a single primitive load/store.
141   // A single load+store correctly handles overlapping memory in the memmove
142   // case.
143   uint64_t Size = MemOpLength->getLimitedValue();
144   assert(Size && "0-sized memory transferring should be removed already.");
145 
146   if (Size > 8 || (Size&(Size-1)))
147     return nullptr;  // If not 1/2/4/8 bytes, exit.
148 
149   // If it is an atomic and alignment is less than the size then we will
150   // introduce the unaligned memory access which will be later transformed
151   // into libcall in CodeGen. This is not evident performance gain so disable
152   // it now.
153   if (isa<AtomicMemTransferInst>(MI))
154     if (*CopyDstAlign < Size || *CopySrcAlign < Size)
155       return nullptr;
156 
157   // Use an integer load+store unless we can find something better.
158   unsigned SrcAddrSp =
159     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
160   unsigned DstAddrSp =
161     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
162 
163   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
164   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
165   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
166 
167   // If the memcpy has metadata describing the members, see if we can get the
168   // TBAA tag describing our copy.
169   MDNode *CopyMD = nullptr;
170   if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
171     CopyMD = M;
172   } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
173     if (M->getNumOperands() == 3 && M->getOperand(0) &&
174         mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
175         mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
176         M->getOperand(1) &&
177         mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
178         mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
179         Size &&
180         M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
181       CopyMD = cast<MDNode>(M->getOperand(2));
182   }
183 
184   Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
185   Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
186   LoadInst *L = Builder.CreateLoad(IntType, Src);
187   // Alignment from the mem intrinsic will be better, so use it.
188   L->setAlignment(*CopySrcAlign);
189   if (CopyMD)
190     L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
191   MDNode *LoopMemParallelMD =
192     MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
193   if (LoopMemParallelMD)
194     L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
195   MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
196   if (AccessGroupMD)
197     L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
198 
199   StoreInst *S = Builder.CreateStore(L, Dest);
200   // Alignment from the mem intrinsic will be better, so use it.
201   S->setAlignment(*CopyDstAlign);
202   if (CopyMD)
203     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
204   if (LoopMemParallelMD)
205     S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
206   if (AccessGroupMD)
207     S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
208 
209   if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
210     // non-atomics can be volatile
211     L->setVolatile(MT->isVolatile());
212     S->setVolatile(MT->isVolatile());
213   }
214   if (isa<AtomicMemTransferInst>(MI)) {
215     // atomics have to be unordered
216     L->setOrdering(AtomicOrdering::Unordered);
217     S->setOrdering(AtomicOrdering::Unordered);
218   }
219 
220   // Set the size of the copy to 0, it will be deleted on the next iteration.
221   MI->setLength(Constant::getNullValue(MemOpLength->getType()));
222   return MI;
223 }
224 
SimplifyAnyMemSet(AnyMemSetInst * MI)225 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
226   const Align KnownAlignment =
227       getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
228   MaybeAlign MemSetAlign = MI->getDestAlign();
229   if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
230     MI->setDestAlignment(KnownAlignment);
231     return MI;
232   }
233 
234   // If we have a store to a location which is known constant, we can conclude
235   // that the store must be storing the constant value (else the memory
236   // wouldn't be constant), and this must be a noop.
237   if (AA->pointsToConstantMemory(MI->getDest())) {
238     // Set the size of the copy to 0, it will be deleted on the next iteration.
239     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
240     return MI;
241   }
242 
243   // Extract the length and alignment and fill if they are constant.
244   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
245   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
246   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
247     return nullptr;
248   const uint64_t Len = LenC->getLimitedValue();
249   assert(Len && "0-sized memory setting should be removed already.");
250   const Align Alignment = assumeAligned(MI->getDestAlignment());
251 
252   // If it is an atomic and alignment is less than the size then we will
253   // introduce the unaligned memory access which will be later transformed
254   // into libcall in CodeGen. This is not evident performance gain so disable
255   // it now.
256   if (isa<AtomicMemSetInst>(MI))
257     if (Alignment < Len)
258       return nullptr;
259 
260   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
261   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
262     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
263 
264     Value *Dest = MI->getDest();
265     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
266     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
267     Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
268 
269     // Extract the fill value and store.
270     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
271     StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
272                                        MI->isVolatile());
273     S->setAlignment(Alignment);
274     if (isa<AtomicMemSetInst>(MI))
275       S->setOrdering(AtomicOrdering::Unordered);
276 
277     // Set the size of the copy to 0, it will be deleted on the next iteration.
278     MI->setLength(Constant::getNullValue(LenC->getType()));
279     return MI;
280   }
281 
282   return nullptr;
283 }
284 
285 // TODO, Obvious Missing Transforms:
286 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedLoad(IntrinsicInst & II)287 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
288   Value *LoadPtr = II.getArgOperand(0);
289   const Align Alignment =
290       cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
291 
292   // If the mask is all ones or undefs, this is a plain vector load of the 1st
293   // argument.
294   if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
295     LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
296                                             "unmaskedload");
297     L->copyMetadata(II);
298     return L;
299   }
300 
301   // If we can unconditionally load from this address, replace with a
302   // load/select idiom. TODO: use DT for context sensitive query
303   if (isDereferenceablePointer(LoadPtr, II.getType(),
304                                II.getModule()->getDataLayout(), &II, nullptr)) {
305     LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
306                                              "unmaskedload");
307     LI->copyMetadata(II);
308     return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
309   }
310 
311   return nullptr;
312 }
313 
314 // TODO, Obvious Missing Transforms:
315 // * Single constant active lane -> store
316 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedStore(IntrinsicInst & II)317 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
318   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
319   if (!ConstMask)
320     return nullptr;
321 
322   // If the mask is all zeros, this instruction does nothing.
323   if (ConstMask->isNullValue())
324     return eraseInstFromFunction(II);
325 
326   // If the mask is all ones, this is a plain vector store of the 1st argument.
327   if (ConstMask->isAllOnesValue()) {
328     Value *StorePtr = II.getArgOperand(1);
329     Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
330     StoreInst *S =
331         new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
332     S->copyMetadata(II);
333     return S;
334   }
335 
336   if (isa<ScalableVectorType>(ConstMask->getType()))
337     return nullptr;
338 
339   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
340   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
341   APInt UndefElts(DemandedElts.getBitWidth(), 0);
342   if (Value *V =
343           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
344     return replaceOperand(II, 0, V);
345 
346   return nullptr;
347 }
348 
349 // TODO, Obvious Missing Transforms:
350 // * Single constant active lane load -> load
351 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
352 // * Adjacent vector addresses -> masked.load
353 // * Narrow width by halfs excluding zero/undef lanes
354 // * Vector splat address w/known mask -> scalar load
355 // * Vector incrementing address -> vector masked load
simplifyMaskedGather(IntrinsicInst & II)356 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
357   return nullptr;
358 }
359 
360 // TODO, Obvious Missing Transforms:
361 // * Single constant active lane -> store
362 // * Adjacent vector addresses -> masked.store
363 // * Narrow store width by halfs excluding zero/undef lanes
364 // * Vector splat address w/known mask -> scalar store
365 // * Vector incrementing address -> vector masked store
simplifyMaskedScatter(IntrinsicInst & II)366 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
367   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
368   if (!ConstMask)
369     return nullptr;
370 
371   // If the mask is all zeros, a scatter does nothing.
372   if (ConstMask->isNullValue())
373     return eraseInstFromFunction(II);
374 
375   if (isa<ScalableVectorType>(ConstMask->getType()))
376     return nullptr;
377 
378   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
379   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
380   APInt UndefElts(DemandedElts.getBitWidth(), 0);
381   if (Value *V =
382           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
383     return replaceOperand(II, 0, V);
384   if (Value *V =
385           SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts))
386     return replaceOperand(II, 1, V);
387 
388   return nullptr;
389 }
390 
391 /// This function transforms launder.invariant.group and strip.invariant.group
392 /// like:
393 /// launder(launder(%x)) -> launder(%x)       (the result is not the argument)
394 /// launder(strip(%x)) -> launder(%x)
395 /// strip(strip(%x)) -> strip(%x)             (the result is not the argument)
396 /// strip(launder(%x)) -> strip(%x)
397 /// This is legal because it preserves the most recent information about
398 /// the presence or absence of invariant.group.
simplifyInvariantGroupIntrinsic(IntrinsicInst & II,InstCombinerImpl & IC)399 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
400                                                     InstCombinerImpl &IC) {
401   auto *Arg = II.getArgOperand(0);
402   auto *StrippedArg = Arg->stripPointerCasts();
403   auto *StrippedInvariantGroupsArg = StrippedArg;
404   while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
405     if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
406         Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
407       break;
408     StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
409   }
410   if (StrippedArg == StrippedInvariantGroupsArg)
411     return nullptr; // No launders/strips to remove.
412 
413   Value *Result = nullptr;
414 
415   if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
416     Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
417   else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
418     Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
419   else
420     llvm_unreachable(
421         "simplifyInvariantGroupIntrinsic only handles launder and strip");
422   if (Result->getType()->getPointerAddressSpace() !=
423       II.getType()->getPointerAddressSpace())
424     Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
425   if (Result->getType() != II.getType())
426     Result = IC.Builder.CreateBitCast(Result, II.getType());
427 
428   return cast<Instruction>(Result);
429 }
430 
foldCttzCtlz(IntrinsicInst & II,InstCombinerImpl & IC)431 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
432   assert((II.getIntrinsicID() == Intrinsic::cttz ||
433           II.getIntrinsicID() == Intrinsic::ctlz) &&
434          "Expected cttz or ctlz intrinsic");
435   bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
436   Value *Op0 = II.getArgOperand(0);
437   Value *Op1 = II.getArgOperand(1);
438   Value *X;
439   // ctlz(bitreverse(x)) -> cttz(x)
440   // cttz(bitreverse(x)) -> ctlz(x)
441   if (match(Op0, m_BitReverse(m_Value(X)))) {
442     Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
443     Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
444     return CallInst::Create(F, {X, II.getArgOperand(1)});
445   }
446 
447   if (IsTZ) {
448     // cttz(-x) -> cttz(x)
449     if (match(Op0, m_Neg(m_Value(X))))
450       return IC.replaceOperand(II, 0, X);
451 
452     // cttz(sext(x)) -> cttz(zext(x))
453     if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) {
454       auto *Zext = IC.Builder.CreateZExt(X, II.getType());
455       auto *CttzZext =
456           IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1);
457       return IC.replaceInstUsesWith(II, CttzZext);
458     }
459 
460     // Zext doesn't change the number of trailing zeros, so narrow:
461     // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsUndef' parameter is 'true'.
462     if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
463       auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
464                                                     IC.Builder.getTrue());
465       auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType());
466       return IC.replaceInstUsesWith(II, ZextCttz);
467     }
468 
469     // cttz(abs(x)) -> cttz(x)
470     // cttz(nabs(x)) -> cttz(x)
471     Value *Y;
472     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
473     if (SPF == SPF_ABS || SPF == SPF_NABS)
474       return IC.replaceOperand(II, 0, X);
475 
476     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
477       return IC.replaceOperand(II, 0, X);
478   }
479 
480   KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
481 
482   // Create a mask for bits above (ctlz) or below (cttz) the first known one.
483   unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
484                                 : Known.countMaxLeadingZeros();
485   unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
486                                 : Known.countMinLeadingZeros();
487 
488   // If all bits above (ctlz) or below (cttz) the first known one are known
489   // zero, this value is constant.
490   // FIXME: This should be in InstSimplify because we're replacing an
491   // instruction with a constant.
492   if (PossibleZeros == DefiniteZeros) {
493     auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
494     return IC.replaceInstUsesWith(II, C);
495   }
496 
497   // If the input to cttz/ctlz is known to be non-zero,
498   // then change the 'ZeroIsUndef' parameter to 'true'
499   // because we know the zero behavior can't affect the result.
500   if (!Known.One.isNullValue() ||
501       isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
502                      &IC.getDominatorTree())) {
503     if (!match(II.getArgOperand(1), m_One()))
504       return IC.replaceOperand(II, 1, IC.Builder.getTrue());
505   }
506 
507   // Add range metadata since known bits can't completely reflect what we know.
508   // TODO: Handle splat vectors.
509   auto *IT = dyn_cast<IntegerType>(Op0->getType());
510   if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
511     Metadata *LowAndHigh[] = {
512         ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
513         ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
514     II.setMetadata(LLVMContext::MD_range,
515                    MDNode::get(II.getContext(), LowAndHigh));
516     return &II;
517   }
518 
519   return nullptr;
520 }
521 
foldCtpop(IntrinsicInst & II,InstCombinerImpl & IC)522 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
523   assert(II.getIntrinsicID() == Intrinsic::ctpop &&
524          "Expected ctpop intrinsic");
525   Type *Ty = II.getType();
526   unsigned BitWidth = Ty->getScalarSizeInBits();
527   Value *Op0 = II.getArgOperand(0);
528   Value *X, *Y;
529 
530   // ctpop(bitreverse(x)) -> ctpop(x)
531   // ctpop(bswap(x)) -> ctpop(x)
532   if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
533     return IC.replaceOperand(II, 0, X);
534 
535   // ctpop(rot(x)) -> ctpop(x)
536   if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) ||
537        match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) &&
538       X == Y)
539     return IC.replaceOperand(II, 0, X);
540 
541   // ctpop(x | -x) -> bitwidth - cttz(x, false)
542   if (Op0->hasOneUse() &&
543       match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
544     Function *F =
545         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
546     auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
547     auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
548     return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
549   }
550 
551   // ctpop(~x & (x - 1)) -> cttz(x, false)
552   if (match(Op0,
553             m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
554     Function *F =
555         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
556     return CallInst::Create(F, {X, IC.Builder.getFalse()});
557   }
558 
559   // Zext doesn't change the number of set bits, so narrow:
560   // ctpop (zext X) --> zext (ctpop X)
561   if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
562     Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X);
563     return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
564   }
565 
566   KnownBits Known(BitWidth);
567   IC.computeKnownBits(Op0, Known, 0, &II);
568 
569   // If all bits are zero except for exactly one fixed bit, then the result
570   // must be 0 or 1, and we can get that answer by shifting to LSB:
571   // ctpop (X & 32) --> (X & 32) >> 5
572   if ((~Known.Zero).isPowerOf2())
573     return BinaryOperator::CreateLShr(
574         Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));
575 
576   // FIXME: Try to simplify vectors of integers.
577   auto *IT = dyn_cast<IntegerType>(Ty);
578   if (!IT)
579     return nullptr;
580 
581   // Add range metadata since known bits can't completely reflect what we know.
582   unsigned MinCount = Known.countMinPopulation();
583   unsigned MaxCount = Known.countMaxPopulation();
584   if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
585     Metadata *LowAndHigh[] = {
586         ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
587         ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
588     II.setMetadata(LLVMContext::MD_range,
589                    MDNode::get(II.getContext(), LowAndHigh));
590     return &II;
591   }
592 
593   return nullptr;
594 }
595 
596 /// Convert a table lookup to shufflevector if the mask is constant.
597 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
598 /// which case we could lower the shufflevector with rev64 instructions
599 /// as it's actually a byte reverse.
simplifyNeonTbl1(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)600 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
601                                InstCombiner::BuilderTy &Builder) {
602   // Bail out if the mask is not a constant.
603   auto *C = dyn_cast<Constant>(II.getArgOperand(1));
604   if (!C)
605     return nullptr;
606 
607   auto *VecTy = cast<FixedVectorType>(II.getType());
608   unsigned NumElts = VecTy->getNumElements();
609 
610   // Only perform this transformation for <8 x i8> vector types.
611   if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
612     return nullptr;
613 
614   int Indexes[8];
615 
616   for (unsigned I = 0; I < NumElts; ++I) {
617     Constant *COp = C->getAggregateElement(I);
618 
619     if (!COp || !isa<ConstantInt>(COp))
620       return nullptr;
621 
622     Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
623 
624     // Make sure the mask indices are in range.
625     if ((unsigned)Indexes[I] >= NumElts)
626       return nullptr;
627   }
628 
629   auto *V1 = II.getArgOperand(0);
630   auto *V2 = Constant::getNullValue(V1->getType());
631   return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes));
632 }
633 
634 // Returns true iff the 2 intrinsics have the same operands, limiting the
635 // comparison to the first NumOperands.
haveSameOperands(const IntrinsicInst & I,const IntrinsicInst & E,unsigned NumOperands)636 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
637                              unsigned NumOperands) {
638   assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
639   assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
640   for (unsigned i = 0; i < NumOperands; i++)
641     if (I.getArgOperand(i) != E.getArgOperand(i))
642       return false;
643   return true;
644 }
645 
646 // Remove trivially empty start/end intrinsic ranges, i.e. a start
647 // immediately followed by an end (ignoring debuginfo or other
648 // start/end intrinsics in between). As this handles only the most trivial
649 // cases, tracking the nesting level is not needed:
650 //
651 //   call @llvm.foo.start(i1 0)
652 //   call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
653 //   call @llvm.foo.end(i1 0)
654 //   call @llvm.foo.end(i1 0) ; &I
655 static bool
removeTriviallyEmptyRange(IntrinsicInst & EndI,InstCombinerImpl & IC,std::function<bool (const IntrinsicInst &)> IsStart)656 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
657                           std::function<bool(const IntrinsicInst &)> IsStart) {
658   // We start from the end intrinsic and scan backwards, so that InstCombine
659   // has already processed (and potentially removed) all the instructions
660   // before the end intrinsic.
661   BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
662   for (; BI != BE; ++BI) {
663     if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
664       if (isa<DbgInfoIntrinsic>(I) ||
665           I->getIntrinsicID() == EndI.getIntrinsicID())
666         continue;
667       if (IsStart(*I)) {
668         if (haveSameOperands(EndI, *I, EndI.getNumArgOperands())) {
669           IC.eraseInstFromFunction(*I);
670           IC.eraseInstFromFunction(EndI);
671           return true;
672         }
673         // Skip start intrinsics that don't pair with this end intrinsic.
674         continue;
675       }
676     }
677     break;
678   }
679 
680   return false;
681 }
682 
visitVAEndInst(VAEndInst & I)683 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
684   removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) {
685     return I.getIntrinsicID() == Intrinsic::vastart ||
686            I.getIntrinsicID() == Intrinsic::vacopy;
687   });
688   return nullptr;
689 }
690 
canonicalizeConstantArg0ToArg1(CallInst & Call)691 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
692   assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
693   Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
694   if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
695     Call.setArgOperand(0, Arg1);
696     Call.setArgOperand(1, Arg0);
697     return &Call;
698   }
699   return nullptr;
700 }
701 
702 /// Creates a result tuple for an overflow intrinsic \p II with a given
703 /// \p Result and a constant \p Overflow value.
createOverflowTuple(IntrinsicInst * II,Value * Result,Constant * Overflow)704 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result,
705                                         Constant *Overflow) {
706   Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
707   StructType *ST = cast<StructType>(II->getType());
708   Constant *Struct = ConstantStruct::get(ST, V);
709   return InsertValueInst::Create(Struct, Result, 0);
710 }
711 
712 Instruction *
foldIntrinsicWithOverflowCommon(IntrinsicInst * II)713 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
714   WithOverflowInst *WO = cast<WithOverflowInst>(II);
715   Value *OperationResult = nullptr;
716   Constant *OverflowResult = nullptr;
717   if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
718                             WO->getRHS(), *WO, OperationResult, OverflowResult))
719     return createOverflowTuple(WO, OperationResult, OverflowResult);
720   return nullptr;
721 }
722 
getKnownSign(Value * Op,Instruction * CxtI,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)723 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
724                                    const DataLayout &DL, AssumptionCache *AC,
725                                    DominatorTree *DT) {
726   KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT);
727   if (Known.isNonNegative())
728     return false;
729   if (Known.isNegative())
730     return true;
731 
732   return isImpliedByDomCondition(
733       ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL);
734 }
735 
736 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output
737 /// can only be one of two possible constant values -- turn that into a select
738 /// of constants.
foldClampRangeOfTwo(IntrinsicInst * II,InstCombiner::BuilderTy & Builder)739 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II,
740                                         InstCombiner::BuilderTy &Builder) {
741   Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
742   Value *X;
743   const APInt *C0, *C1;
744   if (!match(I1, m_APInt(C1)) || !I0->hasOneUse())
745     return nullptr;
746 
747   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
748   switch (II->getIntrinsicID()) {
749   case Intrinsic::smax:
750     if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
751       Pred = ICmpInst::ICMP_SGT;
752     break;
753   case Intrinsic::smin:
754     if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
755       Pred = ICmpInst::ICMP_SLT;
756     break;
757   case Intrinsic::umax:
758     if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
759       Pred = ICmpInst::ICMP_UGT;
760     break;
761   case Intrinsic::umin:
762     if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
763       Pred = ICmpInst::ICMP_ULT;
764     break;
765   default:
766     llvm_unreachable("Expected min/max intrinsic");
767   }
768   if (Pred == CmpInst::BAD_ICMP_PREDICATE)
769     return nullptr;
770 
771   // max (min X, 42), 41 --> X > 41 ? 42 : 41
772   // min (max X, 42), 43 --> X < 43 ? 42 : 43
773   Value *Cmp = Builder.CreateICmp(Pred, X, I1);
774   return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1);
775 }
776 
777 /// CallInst simplification. This mostly only handles folding of intrinsic
778 /// instructions. For normal calls, it allows visitCallBase to do the heavy
779 /// lifting.
visitCallInst(CallInst & CI)780 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
781   // Don't try to simplify calls without uses. It will not do anything useful,
782   // but will result in the following folds being skipped.
783   if (!CI.use_empty())
784     if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
785       return replaceInstUsesWith(CI, V);
786 
787   if (isFreeCall(&CI, &TLI))
788     return visitFree(CI);
789 
790   // If the caller function is nounwind, mark the call as nounwind, even if the
791   // callee isn't.
792   if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
793     CI.setDoesNotThrow();
794     return &CI;
795   }
796 
797   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
798   if (!II) return visitCallBase(CI);
799 
800   // For atomic unordered mem intrinsics if len is not a positive or
801   // not a multiple of element size then behavior is undefined.
802   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
803     if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
804       if (NumBytes->getSExtValue() < 0 ||
805           (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
806         CreateNonTerminatorUnreachable(AMI);
807         assert(AMI->getType()->isVoidTy() &&
808                "non void atomic unordered mem intrinsic");
809         return eraseInstFromFunction(*AMI);
810       }
811 
812   // Intrinsics cannot occur in an invoke or a callbr, so handle them here
813   // instead of in visitCallBase.
814   if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
815     bool Changed = false;
816 
817     // memmove/cpy/set of zero bytes is a noop.
818     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
819       if (NumBytes->isNullValue())
820         return eraseInstFromFunction(CI);
821 
822       if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
823         if (CI->getZExtValue() == 1) {
824           // Replace the instruction with just byte operations.  We would
825           // transform other cases to loads/stores, but we don't know if
826           // alignment is sufficient.
827         }
828     }
829 
830     // No other transformations apply to volatile transfers.
831     if (auto *M = dyn_cast<MemIntrinsic>(MI))
832       if (M->isVolatile())
833         return nullptr;
834 
835     // If we have a memmove and the source operation is a constant global,
836     // then the source and dest pointers can't alias, so we can change this
837     // into a call to memcpy.
838     if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
839       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
840         if (GVSrc->isConstant()) {
841           Module *M = CI.getModule();
842           Intrinsic::ID MemCpyID =
843               isa<AtomicMemMoveInst>(MMI)
844                   ? Intrinsic::memcpy_element_unordered_atomic
845                   : Intrinsic::memcpy;
846           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
847                            CI.getArgOperand(1)->getType(),
848                            CI.getArgOperand(2)->getType() };
849           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
850           Changed = true;
851         }
852     }
853 
854     if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
855       // memmove(x,x,size) -> noop.
856       if (MTI->getSource() == MTI->getDest())
857         return eraseInstFromFunction(CI);
858     }
859 
860     // If we can determine a pointer alignment that is bigger than currently
861     // set, update the alignment.
862     if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
863       if (Instruction *I = SimplifyAnyMemTransfer(MTI))
864         return I;
865     } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
866       if (Instruction *I = SimplifyAnyMemSet(MSI))
867         return I;
868     }
869 
870     if (Changed) return II;
871   }
872 
873   // For fixed width vector result intrinsics, use the generic demanded vector
874   // support.
875   if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
876     auto VWidth = IIFVTy->getNumElements();
877     APInt UndefElts(VWidth, 0);
878     APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
879     if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
880       if (V != II)
881         return replaceInstUsesWith(*II, V);
882       return II;
883     }
884   }
885 
886   if (II->isCommutative()) {
887     if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
888       return NewCall;
889   }
890 
891   Intrinsic::ID IID = II->getIntrinsicID();
892   switch (IID) {
893   case Intrinsic::objectsize:
894     if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
895       return replaceInstUsesWith(CI, V);
896     return nullptr;
897   case Intrinsic::abs: {
898     Value *IIOperand = II->getArgOperand(0);
899     bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
900 
901     // abs(-x) -> abs(x)
902     // TODO: Copy nsw if it was present on the neg?
903     Value *X;
904     if (match(IIOperand, m_Neg(m_Value(X))))
905       return replaceOperand(*II, 0, X);
906     if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X)))))
907       return replaceOperand(*II, 0, X);
908     if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
909       return replaceOperand(*II, 0, X);
910 
911     if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
912       // abs(x) -> x if x >= 0
913       if (!*Sign)
914         return replaceInstUsesWith(*II, IIOperand);
915 
916       // abs(x) -> -x if x < 0
917       if (IntMinIsPoison)
918         return BinaryOperator::CreateNSWNeg(IIOperand);
919       return BinaryOperator::CreateNeg(IIOperand);
920     }
921 
922     // abs (sext X) --> zext (abs X*)
923     // Clear the IsIntMin (nsw) bit on the abs to allow narrowing.
924     if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) {
925       Value *NarrowAbs =
926           Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
927       return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());
928     }
929 
930     // Match a complicated way to check if a number is odd/even:
931     // abs (srem X, 2) --> and X, 1
932     const APInt *C;
933     if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2)
934       return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));
935 
936     break;
937   }
938   case Intrinsic::umax:
939   case Intrinsic::umin: {
940     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
941     Value *X, *Y;
942     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) &&
943         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
944       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
945       return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
946     }
947     Constant *C;
948     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) &&
949         I0->hasOneUse()) {
950       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
951       if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) {
952         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
953         return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
954       }
955     }
956     // If both operands of unsigned min/max are sign-extended, it is still ok
957     // to narrow the operation.
958     LLVM_FALLTHROUGH;
959   }
960   case Intrinsic::smax:
961   case Intrinsic::smin: {
962     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
963     Value *X, *Y;
964     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) &&
965         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
966       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
967       return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
968     }
969 
970     Constant *C;
971     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) &&
972         I0->hasOneUse()) {
973       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
974       if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) {
975         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
976         return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
977       }
978     }
979 
980     if (match(I0, m_Not(m_Value(X)))) {
981       // max (not X), (not Y) --> not (min X, Y)
982       Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
983       if (match(I1, m_Not(m_Value(Y))) &&
984           (I0->hasOneUse() || I1->hasOneUse())) {
985         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
986         return BinaryOperator::CreateNot(InvMaxMin);
987       }
988       // max (not X), C --> not(min X, ~C)
989       if (match(I1, m_Constant(C)) && I0->hasOneUse()) {
990         Constant *NotC = ConstantExpr::getNot(C);
991         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotC);
992         return BinaryOperator::CreateNot(InvMaxMin);
993       }
994     }
995 
996     // smax(X, -X) --> abs(X)
997     // smin(X, -X) --> -abs(X)
998     // umax(X, -X) --> -abs(X)
999     // umin(X, -X) --> abs(X)
1000     if (isKnownNegation(I0, I1)) {
1001       // We can choose either operand as the input to abs(), but if we can
1002       // eliminate the only use of a value, that's better for subsequent
1003       // transforms/analysis.
1004       if (I0->hasOneUse() && !I1->hasOneUse())
1005         std::swap(I0, I1);
1006 
1007       // This is some variant of abs(). See if we can propagate 'nsw' to the abs
1008       // operation and potentially its negation.
1009       bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true);
1010       Value *Abs = Builder.CreateBinaryIntrinsic(
1011           Intrinsic::abs, I0,
1012           ConstantInt::getBool(II->getContext(), IntMinIsPoison));
1013 
1014       // We don't have a "nabs" intrinsic, so negate if needed based on the
1015       // max/min operation.
1016       if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1017         Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison);
1018       return replaceInstUsesWith(CI, Abs);
1019     }
1020 
1021     if (Instruction *Sel = foldClampRangeOfTwo(II, Builder))
1022       return Sel;
1023 
1024     break;
1025   }
1026   case Intrinsic::bswap: {
1027     Value *IIOperand = II->getArgOperand(0);
1028     Value *X = nullptr;
1029 
1030     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1031     if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1032       unsigned C = X->getType()->getScalarSizeInBits() -
1033                    IIOperand->getType()->getScalarSizeInBits();
1034       Value *CV = ConstantInt::get(X->getType(), C);
1035       Value *V = Builder.CreateLShr(X, CV);
1036       return new TruncInst(V, IIOperand->getType());
1037     }
1038     break;
1039   }
1040   case Intrinsic::masked_load:
1041     if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1042       return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1043     break;
1044   case Intrinsic::masked_store:
1045     return simplifyMaskedStore(*II);
1046   case Intrinsic::masked_gather:
1047     return simplifyMaskedGather(*II);
1048   case Intrinsic::masked_scatter:
1049     return simplifyMaskedScatter(*II);
1050   case Intrinsic::launder_invariant_group:
1051   case Intrinsic::strip_invariant_group:
1052     if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1053       return replaceInstUsesWith(*II, SkippedBarrier);
1054     break;
1055   case Intrinsic::powi:
1056     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1057       // 0 and 1 are handled in instsimplify
1058       // powi(x, -1) -> 1/x
1059       if (Power->isMinusOne())
1060         return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0),
1061                                              II->getArgOperand(0), II);
1062       // powi(x, 2) -> x*x
1063       if (Power->equalsInt(2))
1064         return BinaryOperator::CreateFMulFMF(II->getArgOperand(0),
1065                                              II->getArgOperand(0), II);
1066     }
1067     break;
1068 
1069   case Intrinsic::cttz:
1070   case Intrinsic::ctlz:
1071     if (auto *I = foldCttzCtlz(*II, *this))
1072       return I;
1073     break;
1074 
1075   case Intrinsic::ctpop:
1076     if (auto *I = foldCtpop(*II, *this))
1077       return I;
1078     break;
1079 
1080   case Intrinsic::fshl:
1081   case Intrinsic::fshr: {
1082     Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1083     Type *Ty = II->getType();
1084     unsigned BitWidth = Ty->getScalarSizeInBits();
1085     Constant *ShAmtC;
1086     if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC)) &&
1087         !ShAmtC->containsConstantExpression()) {
1088       // Canonicalize a shift amount constant operand to modulo the bit-width.
1089       Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1090       Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1091       if (ModuloC != ShAmtC)
1092         return replaceOperand(*II, 2, ModuloC);
1093 
1094       assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1095                  ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1096              "Shift amount expected to be modulo bitwidth");
1097 
1098       // Canonicalize funnel shift right by constant to funnel shift left. This
1099       // is not entirely arbitrary. For historical reasons, the backend may
1100       // recognize rotate left patterns but miss rotate right patterns.
1101       if (IID == Intrinsic::fshr) {
1102         // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1103         Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1104         Module *Mod = II->getModule();
1105         Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1106         return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1107       }
1108       assert(IID == Intrinsic::fshl &&
1109              "All funnel shifts by simple constants should go left");
1110 
1111       // fshl(X, 0, C) --> shl X, C
1112       // fshl(X, undef, C) --> shl X, C
1113       if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1114         return BinaryOperator::CreateShl(Op0, ShAmtC);
1115 
1116       // fshl(0, X, C) --> lshr X, (BW-C)
1117       // fshl(undef, X, C) --> lshr X, (BW-C)
1118       if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1119         return BinaryOperator::CreateLShr(Op1,
1120                                           ConstantExpr::getSub(WidthC, ShAmtC));
1121 
1122       // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1123       if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1124         Module *Mod = II->getModule();
1125         Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1126         return CallInst::Create(Bswap, { Op0 });
1127       }
1128     }
1129 
1130     // Left or right might be masked.
1131     if (SimplifyDemandedInstructionBits(*II))
1132       return &CI;
1133 
1134     // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
1135     // so only the low bits of the shift amount are demanded if the bitwidth is
1136     // a power-of-2.
1137     if (!isPowerOf2_32(BitWidth))
1138       break;
1139     APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
1140     KnownBits Op2Known(BitWidth);
1141     if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
1142       return &CI;
1143     break;
1144   }
1145   case Intrinsic::uadd_with_overflow:
1146   case Intrinsic::sadd_with_overflow: {
1147     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1148       return I;
1149 
1150     // Given 2 constant operands whose sum does not overflow:
1151     // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
1152     // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
1153     Value *X;
1154     const APInt *C0, *C1;
1155     Value *Arg0 = II->getArgOperand(0);
1156     Value *Arg1 = II->getArgOperand(1);
1157     bool IsSigned = IID == Intrinsic::sadd_with_overflow;
1158     bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
1159                              : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
1160     if (HasNWAdd && match(Arg1, m_APInt(C1))) {
1161       bool Overflow;
1162       APInt NewC =
1163           IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
1164       if (!Overflow)
1165         return replaceInstUsesWith(
1166             *II, Builder.CreateBinaryIntrinsic(
1167                      IID, X, ConstantInt::get(Arg1->getType(), NewC)));
1168     }
1169     break;
1170   }
1171 
1172   case Intrinsic::umul_with_overflow:
1173   case Intrinsic::smul_with_overflow:
1174   case Intrinsic::usub_with_overflow:
1175     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1176       return I;
1177     break;
1178 
1179   case Intrinsic::ssub_with_overflow: {
1180     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1181       return I;
1182 
1183     Constant *C;
1184     Value *Arg0 = II->getArgOperand(0);
1185     Value *Arg1 = II->getArgOperand(1);
1186     // Given a constant C that is not the minimum signed value
1187     // for an integer of a given bit width:
1188     //
1189     // ssubo X, C -> saddo X, -C
1190     if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
1191       Value *NegVal = ConstantExpr::getNeg(C);
1192       // Build a saddo call that is equivalent to the discovered
1193       // ssubo call.
1194       return replaceInstUsesWith(
1195           *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
1196                                              Arg0, NegVal));
1197     }
1198 
1199     break;
1200   }
1201 
1202   case Intrinsic::uadd_sat:
1203   case Intrinsic::sadd_sat:
1204   case Intrinsic::usub_sat:
1205   case Intrinsic::ssub_sat: {
1206     SaturatingInst *SI = cast<SaturatingInst>(II);
1207     Type *Ty = SI->getType();
1208     Value *Arg0 = SI->getLHS();
1209     Value *Arg1 = SI->getRHS();
1210 
1211     // Make use of known overflow information.
1212     OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
1213                                         Arg0, Arg1, SI);
1214     switch (OR) {
1215       case OverflowResult::MayOverflow:
1216         break;
1217       case OverflowResult::NeverOverflows:
1218         if (SI->isSigned())
1219           return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
1220         else
1221           return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
1222       case OverflowResult::AlwaysOverflowsLow: {
1223         unsigned BitWidth = Ty->getScalarSizeInBits();
1224         APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
1225         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
1226       }
1227       case OverflowResult::AlwaysOverflowsHigh: {
1228         unsigned BitWidth = Ty->getScalarSizeInBits();
1229         APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
1230         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
1231       }
1232     }
1233 
1234     // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
1235     Constant *C;
1236     if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
1237         C->isNotMinSignedValue()) {
1238       Value *NegVal = ConstantExpr::getNeg(C);
1239       return replaceInstUsesWith(
1240           *II, Builder.CreateBinaryIntrinsic(
1241               Intrinsic::sadd_sat, Arg0, NegVal));
1242     }
1243 
1244     // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
1245     // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
1246     // if Val and Val2 have the same sign
1247     if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
1248       Value *X;
1249       const APInt *Val, *Val2;
1250       APInt NewVal;
1251       bool IsUnsigned =
1252           IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
1253       if (Other->getIntrinsicID() == IID &&
1254           match(Arg1, m_APInt(Val)) &&
1255           match(Other->getArgOperand(0), m_Value(X)) &&
1256           match(Other->getArgOperand(1), m_APInt(Val2))) {
1257         if (IsUnsigned)
1258           NewVal = Val->uadd_sat(*Val2);
1259         else if (Val->isNonNegative() == Val2->isNonNegative()) {
1260           bool Overflow;
1261           NewVal = Val->sadd_ov(*Val2, Overflow);
1262           if (Overflow) {
1263             // Both adds together may add more than SignedMaxValue
1264             // without saturating the final result.
1265             break;
1266           }
1267         } else {
1268           // Cannot fold saturated addition with different signs.
1269           break;
1270         }
1271 
1272         return replaceInstUsesWith(
1273             *II, Builder.CreateBinaryIntrinsic(
1274                      IID, X, ConstantInt::get(II->getType(), NewVal)));
1275       }
1276     }
1277     break;
1278   }
1279 
1280   case Intrinsic::minnum:
1281   case Intrinsic::maxnum:
1282   case Intrinsic::minimum:
1283   case Intrinsic::maximum: {
1284     Value *Arg0 = II->getArgOperand(0);
1285     Value *Arg1 = II->getArgOperand(1);
1286     Value *X, *Y;
1287     if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
1288         (Arg0->hasOneUse() || Arg1->hasOneUse())) {
1289       // If both operands are negated, invert the call and negate the result:
1290       // min(-X, -Y) --> -(max(X, Y))
1291       // max(-X, -Y) --> -(min(X, Y))
1292       Intrinsic::ID NewIID;
1293       switch (IID) {
1294       case Intrinsic::maxnum:
1295         NewIID = Intrinsic::minnum;
1296         break;
1297       case Intrinsic::minnum:
1298         NewIID = Intrinsic::maxnum;
1299         break;
1300       case Intrinsic::maximum:
1301         NewIID = Intrinsic::minimum;
1302         break;
1303       case Intrinsic::minimum:
1304         NewIID = Intrinsic::maximum;
1305         break;
1306       default:
1307         llvm_unreachable("unexpected intrinsic ID");
1308       }
1309       Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
1310       Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
1311       FNeg->copyIRFlags(II);
1312       return FNeg;
1313     }
1314 
1315     // m(m(X, C2), C1) -> m(X, C)
1316     const APFloat *C1, *C2;
1317     if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
1318       if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
1319           ((match(M->getArgOperand(0), m_Value(X)) &&
1320             match(M->getArgOperand(1), m_APFloat(C2))) ||
1321            (match(M->getArgOperand(1), m_Value(X)) &&
1322             match(M->getArgOperand(0), m_APFloat(C2))))) {
1323         APFloat Res(0.0);
1324         switch (IID) {
1325         case Intrinsic::maxnum:
1326           Res = maxnum(*C1, *C2);
1327           break;
1328         case Intrinsic::minnum:
1329           Res = minnum(*C1, *C2);
1330           break;
1331         case Intrinsic::maximum:
1332           Res = maximum(*C1, *C2);
1333           break;
1334         case Intrinsic::minimum:
1335           Res = minimum(*C1, *C2);
1336           break;
1337         default:
1338           llvm_unreachable("unexpected intrinsic ID");
1339         }
1340         Instruction *NewCall = Builder.CreateBinaryIntrinsic(
1341             IID, X, ConstantFP::get(Arg0->getType(), Res), II);
1342         // TODO: Conservatively intersecting FMF. If Res == C2, the transform
1343         //       was a simplification (so Arg0 and its original flags could
1344         //       propagate?)
1345         NewCall->andIRFlags(M);
1346         return replaceInstUsesWith(*II, NewCall);
1347       }
1348     }
1349 
1350     Value *ExtSrc0;
1351     Value *ExtSrc1;
1352 
1353     // minnum (fpext x), (fpext y) -> minnum x, y
1354     // maxnum (fpext x), (fpext y) -> maxnum x, y
1355     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc0)))) &&
1356         match(II->getArgOperand(1), m_OneUse(m_FPExt(m_Value(ExtSrc1)))) &&
1357         ExtSrc0->getType() == ExtSrc1->getType()) {
1358       Function *F = Intrinsic::getDeclaration(
1359           II->getModule(), II->getIntrinsicID(), {ExtSrc0->getType()});
1360       CallInst *NewCall = Builder.CreateCall(F, { ExtSrc0, ExtSrc1 });
1361       NewCall->copyFastMathFlags(II);
1362       NewCall->takeName(II);
1363       return new FPExtInst(NewCall, II->getType());
1364     }
1365 
1366     break;
1367   }
1368   case Intrinsic::fmuladd: {
1369     // Canonicalize fast fmuladd to the separate fmul + fadd.
1370     if (II->isFast()) {
1371       BuilderTy::FastMathFlagGuard Guard(Builder);
1372       Builder.setFastMathFlags(II->getFastMathFlags());
1373       Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
1374                                       II->getArgOperand(1));
1375       Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
1376       Add->takeName(II);
1377       return replaceInstUsesWith(*II, Add);
1378     }
1379 
1380     // Try to simplify the underlying FMul.
1381     if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
1382                                     II->getFastMathFlags(),
1383                                     SQ.getWithInstruction(II))) {
1384       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1385       FAdd->copyFastMathFlags(II);
1386       return FAdd;
1387     }
1388 
1389     LLVM_FALLTHROUGH;
1390   }
1391   case Intrinsic::fma: {
1392     // fma fneg(x), fneg(y), z -> fma x, y, z
1393     Value *Src0 = II->getArgOperand(0);
1394     Value *Src1 = II->getArgOperand(1);
1395     Value *X, *Y;
1396     if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
1397       replaceOperand(*II, 0, X);
1398       replaceOperand(*II, 1, Y);
1399       return II;
1400     }
1401 
1402     // fma fabs(x), fabs(x), z -> fma x, x, z
1403     if (match(Src0, m_FAbs(m_Value(X))) &&
1404         match(Src1, m_FAbs(m_Specific(X)))) {
1405       replaceOperand(*II, 0, X);
1406       replaceOperand(*II, 1, X);
1407       return II;
1408     }
1409 
1410     // Try to simplify the underlying FMul. We can only apply simplifications
1411     // that do not require rounding.
1412     if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
1413                                    II->getFastMathFlags(),
1414                                    SQ.getWithInstruction(II))) {
1415       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1416       FAdd->copyFastMathFlags(II);
1417       return FAdd;
1418     }
1419 
1420     // fma x, y, 0 -> fmul x, y
1421     // This is always valid for -0.0, but requires nsz for +0.0 as
1422     // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
1423     if (match(II->getArgOperand(2), m_NegZeroFP()) ||
1424         (match(II->getArgOperand(2), m_PosZeroFP()) &&
1425          II->getFastMathFlags().noSignedZeros()))
1426       return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
1427 
1428     break;
1429   }
1430   case Intrinsic::copysign: {
1431     Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
1432     if (SignBitMustBeZero(Sign, &TLI)) {
1433       // If we know that the sign argument is positive, reduce to FABS:
1434       // copysign Mag, +Sign --> fabs Mag
1435       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1436       return replaceInstUsesWith(*II, Fabs);
1437     }
1438     // TODO: There should be a ValueTracking sibling like SignBitMustBeOne.
1439     const APFloat *C;
1440     if (match(Sign, m_APFloat(C)) && C->isNegative()) {
1441       // If we know that the sign argument is negative, reduce to FNABS:
1442       // copysign Mag, -Sign --> fneg (fabs Mag)
1443       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1444       return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
1445     }
1446 
1447     // Propagate sign argument through nested calls:
1448     // copysign Mag, (copysign ?, X) --> copysign Mag, X
1449     Value *X;
1450     if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X))))
1451       return replaceOperand(*II, 1, X);
1452 
1453     // Peek through changes of magnitude's sign-bit. This call rewrites those:
1454     // copysign (fabs X), Sign --> copysign X, Sign
1455     // copysign (fneg X), Sign --> copysign X, Sign
1456     if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
1457       return replaceOperand(*II, 0, X);
1458 
1459     break;
1460   }
1461   case Intrinsic::fabs: {
1462     Value *Cond, *TVal, *FVal;
1463     if (match(II->getArgOperand(0),
1464               m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
1465       // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
1466       if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
1467         CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
1468         CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
1469         return SelectInst::Create(Cond, AbsT, AbsF);
1470       }
1471       // fabs (select Cond, -FVal, FVal) --> fabs FVal
1472       if (match(TVal, m_FNeg(m_Specific(FVal))))
1473         return replaceOperand(*II, 0, FVal);
1474       // fabs (select Cond, TVal, -TVal) --> fabs TVal
1475       if (match(FVal, m_FNeg(m_Specific(TVal))))
1476         return replaceOperand(*II, 0, TVal);
1477     }
1478 
1479     LLVM_FALLTHROUGH;
1480   }
1481   case Intrinsic::ceil:
1482   case Intrinsic::floor:
1483   case Intrinsic::round:
1484   case Intrinsic::roundeven:
1485   case Intrinsic::nearbyint:
1486   case Intrinsic::rint:
1487   case Intrinsic::trunc: {
1488     Value *ExtSrc;
1489     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
1490       // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
1491       Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
1492       return new FPExtInst(NarrowII, II->getType());
1493     }
1494     break;
1495   }
1496   case Intrinsic::cos:
1497   case Intrinsic::amdgcn_cos: {
1498     Value *X;
1499     Value *Src = II->getArgOperand(0);
1500     if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
1501       // cos(-x) -> cos(x)
1502       // cos(fabs(x)) -> cos(x)
1503       return replaceOperand(*II, 0, X);
1504     }
1505     break;
1506   }
1507   case Intrinsic::sin: {
1508     Value *X;
1509     if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
1510       // sin(-x) --> -sin(x)
1511       Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
1512       Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
1513       FNeg->copyFastMathFlags(II);
1514       return FNeg;
1515     }
1516     break;
1517   }
1518 
1519   case Intrinsic::arm_neon_vtbl1:
1520   case Intrinsic::aarch64_neon_tbl1:
1521     if (Value *V = simplifyNeonTbl1(*II, Builder))
1522       return replaceInstUsesWith(*II, V);
1523     break;
1524 
1525   case Intrinsic::arm_neon_vmulls:
1526   case Intrinsic::arm_neon_vmullu:
1527   case Intrinsic::aarch64_neon_smull:
1528   case Intrinsic::aarch64_neon_umull: {
1529     Value *Arg0 = II->getArgOperand(0);
1530     Value *Arg1 = II->getArgOperand(1);
1531 
1532     // Handle mul by zero first:
1533     if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1534       return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1535     }
1536 
1537     // Check for constant LHS & RHS - in this case we just simplify.
1538     bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
1539                  IID == Intrinsic::aarch64_neon_umull);
1540     VectorType *NewVT = cast<VectorType>(II->getType());
1541     if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1542       if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1543         CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1544         CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1545 
1546         return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1547       }
1548 
1549       // Couldn't simplify - canonicalize constant to the RHS.
1550       std::swap(Arg0, Arg1);
1551     }
1552 
1553     // Handle mul by one:
1554     if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1555       if (ConstantInt *Splat =
1556               dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1557         if (Splat->isOne())
1558           return CastInst::CreateIntegerCast(Arg0, II->getType(),
1559                                              /*isSigned=*/!Zext);
1560 
1561     break;
1562   }
1563   case Intrinsic::arm_neon_aesd:
1564   case Intrinsic::arm_neon_aese:
1565   case Intrinsic::aarch64_crypto_aesd:
1566   case Intrinsic::aarch64_crypto_aese: {
1567     Value *DataArg = II->getArgOperand(0);
1568     Value *KeyArg  = II->getArgOperand(1);
1569 
1570     // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
1571     Value *Data, *Key;
1572     if (match(KeyArg, m_ZeroInt()) &&
1573         match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
1574       replaceOperand(*II, 0, Data);
1575       replaceOperand(*II, 1, Key);
1576       return II;
1577     }
1578     break;
1579   }
1580   case Intrinsic::hexagon_V6_vandvrt:
1581   case Intrinsic::hexagon_V6_vandvrt_128B: {
1582     // Simplify Q -> V -> Q conversion.
1583     if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1584       Intrinsic::ID ID0 = Op0->getIntrinsicID();
1585       if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
1586           ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
1587         break;
1588       Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
1589       uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
1590       uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
1591       // Check if every byte has common bits in Bytes and Mask.
1592       uint64_t C = Bytes1 & Mask1;
1593       if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
1594         return replaceInstUsesWith(*II, Op0->getArgOperand(0));
1595     }
1596     break;
1597   }
1598   case Intrinsic::stackrestore: {
1599     // If the save is right next to the restore, remove the restore.  This can
1600     // happen when variable allocas are DCE'd.
1601     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1602       if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1603         // Skip over debug info.
1604         if (SS->getNextNonDebugInstruction() == II) {
1605           return eraseInstFromFunction(CI);
1606         }
1607       }
1608     }
1609 
1610     // Scan down this block to see if there is another stack restore in the
1611     // same block without an intervening call/alloca.
1612     BasicBlock::iterator BI(II);
1613     Instruction *TI = II->getParent()->getTerminator();
1614     bool CannotRemove = false;
1615     for (++BI; &*BI != TI; ++BI) {
1616       if (isa<AllocaInst>(BI)) {
1617         CannotRemove = true;
1618         break;
1619       }
1620       if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1621         if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
1622           // If there is a stackrestore below this one, remove this one.
1623           if (II2->getIntrinsicID() == Intrinsic::stackrestore)
1624             return eraseInstFromFunction(CI);
1625 
1626           // Bail if we cross over an intrinsic with side effects, such as
1627           // llvm.stacksave, or llvm.read_register.
1628           if (II2->mayHaveSideEffects()) {
1629             CannotRemove = true;
1630             break;
1631           }
1632         } else {
1633           // If we found a non-intrinsic call, we can't remove the stack
1634           // restore.
1635           CannotRemove = true;
1636           break;
1637         }
1638       }
1639     }
1640 
1641     // If the stack restore is in a return, resume, or unwind block and if there
1642     // are no allocas or calls between the restore and the return, nuke the
1643     // restore.
1644     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1645       return eraseInstFromFunction(CI);
1646     break;
1647   }
1648   case Intrinsic::lifetime_end:
1649     // Asan needs to poison memory to detect invalid access which is possible
1650     // even for empty lifetime range.
1651     if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
1652         II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
1653         II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
1654       break;
1655 
1656     if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
1657           return I.getIntrinsicID() == Intrinsic::lifetime_start;
1658         }))
1659       return nullptr;
1660     break;
1661   case Intrinsic::assume: {
1662     Value *IIOperand = II->getArgOperand(0);
1663     SmallVector<OperandBundleDef, 4> OpBundles;
1664     II->getOperandBundlesAsDefs(OpBundles);
1665 
1666     /// This will remove the boolean Condition from the assume given as
1667     /// argument and remove the assume if it becomes useless.
1668     /// always returns nullptr for use as a return values.
1669     auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * {
1670       assert(isa<AssumeInst>(Assume));
1671       if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II)))
1672         return eraseInstFromFunction(CI);
1673       replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext()));
1674       return nullptr;
1675     };
1676     // Remove an assume if it is followed by an identical assume.
1677     // TODO: Do we need this? Unless there are conflicting assumptions, the
1678     // computeKnownBits(IIOperand) below here eliminates redundant assumes.
1679     Instruction *Next = II->getNextNonDebugInstruction();
1680     if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
1681       return RemoveConditionFromAssume(Next);
1682 
1683     // Canonicalize assume(a && b) -> assume(a); assume(b);
1684     // Note: New assumption intrinsics created here are registered by
1685     // the InstCombineIRInserter object.
1686     FunctionType *AssumeIntrinsicTy = II->getFunctionType();
1687     Value *AssumeIntrinsic = II->getCalledOperand();
1688     Value *A, *B;
1689     if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) {
1690       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
1691                          II->getName());
1692       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
1693       return eraseInstFromFunction(*II);
1694     }
1695     // assume(!(a || b)) -> assume(!a); assume(!b);
1696     if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) {
1697       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
1698                          Builder.CreateNot(A), OpBundles, II->getName());
1699       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
1700                          Builder.CreateNot(B), II->getName());
1701       return eraseInstFromFunction(*II);
1702     }
1703 
1704     // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1705     // (if assume is valid at the load)
1706     CmpInst::Predicate Pred;
1707     Instruction *LHS;
1708     if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
1709         Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
1710         LHS->getType()->isPointerTy() &&
1711         isValidAssumeForContext(II, LHS, &DT)) {
1712       MDNode *MD = MDNode::get(II->getContext(), None);
1713       LHS->setMetadata(LLVMContext::MD_nonnull, MD);
1714       return RemoveConditionFromAssume(II);
1715 
1716       // TODO: apply nonnull return attributes to calls and invokes
1717       // TODO: apply range metadata for range check patterns?
1718     }
1719 
1720     // Convert nonnull assume like:
1721     // %A = icmp ne i32* %PTR, null
1722     // call void @llvm.assume(i1 %A)
1723     // into
1724     // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
1725     if (EnableKnowledgeRetention &&
1726         match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) &&
1727         Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) {
1728       if (auto *Replacement = buildAssumeFromKnowledge(
1729               {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) {
1730 
1731         Replacement->insertBefore(Next);
1732         AC.registerAssumption(Replacement);
1733         return RemoveConditionFromAssume(II);
1734       }
1735     }
1736 
1737     // Convert alignment assume like:
1738     // %B = ptrtoint i32* %A to i64
1739     // %C = and i64 %B, Constant
1740     // %D = icmp eq i64 %C, 0
1741     // call void @llvm.assume(i1 %D)
1742     // into
1743     // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64  Constant + 1)]
1744     uint64_t AlignMask;
1745     if (EnableKnowledgeRetention &&
1746         match(IIOperand,
1747               m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)),
1748                     m_Zero())) &&
1749         Pred == CmpInst::ICMP_EQ) {
1750       if (isPowerOf2_64(AlignMask + 1)) {
1751         uint64_t Offset = 0;
1752         match(A, m_Add(m_Value(A), m_ConstantInt(Offset)));
1753         if (match(A, m_PtrToInt(m_Value(A)))) {
1754           /// Note: this doesn't preserve the offset information but merges
1755           /// offset and alignment.
1756           /// TODO: we can generate a GEP instead of merging the alignment with
1757           /// the offset.
1758           RetainedKnowledge RK{Attribute::Alignment,
1759                                (unsigned)MinAlign(Offset, AlignMask + 1), A};
1760           if (auto *Replacement =
1761                   buildAssumeFromKnowledge(RK, Next, &AC, &DT)) {
1762 
1763             Replacement->insertAfter(II);
1764             AC.registerAssumption(Replacement);
1765           }
1766           return RemoveConditionFromAssume(II);
1767         }
1768       }
1769     }
1770 
1771     /// Canonicalize Knowledge in operand bundles.
1772     if (EnableKnowledgeRetention && II->hasOperandBundles()) {
1773       for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
1774         auto &BOI = II->bundle_op_info_begin()[Idx];
1775         RetainedKnowledge RK =
1776           llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI);
1777         if (BOI.End - BOI.Begin > 2)
1778           continue; // Prevent reducing knowledge in an align with offset since
1779                     // extracting a RetainedKnowledge form them looses offset
1780                     // information
1781         RetainedKnowledge CanonRK =
1782           llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK,
1783                                           &getAssumptionCache(),
1784                                           &getDominatorTree());
1785         if (CanonRK == RK)
1786           continue;
1787         if (!CanonRK) {
1788           if (BOI.End - BOI.Begin > 0) {
1789             Worklist.pushValue(II->op_begin()[BOI.Begin]);
1790             Value::dropDroppableUse(II->op_begin()[BOI.Begin]);
1791           }
1792           continue;
1793         }
1794         assert(RK.AttrKind == CanonRK.AttrKind);
1795         if (BOI.End - BOI.Begin > 0)
1796           II->op_begin()[BOI.Begin].set(CanonRK.WasOn);
1797         if (BOI.End - BOI.Begin > 1)
1798           II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
1799               Type::getInt64Ty(II->getContext()), CanonRK.ArgValue));
1800         if (RK.WasOn)
1801           Worklist.pushValue(RK.WasOn);
1802         return II;
1803       }
1804     }
1805 
1806     // If there is a dominating assume with the same condition as this one,
1807     // then this one is redundant, and should be removed.
1808     KnownBits Known(1);
1809     computeKnownBits(IIOperand, Known, 0, II);
1810     if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
1811       return eraseInstFromFunction(*II);
1812 
1813     // Update the cache of affected values for this assumption (we might be
1814     // here because we just simplified the condition).
1815     AC.updateAffectedValues(cast<AssumeInst>(II));
1816     break;
1817   }
1818   case Intrinsic::experimental_guard: {
1819     // Is this guard followed by another guard?  We scan forward over a small
1820     // fixed window of instructions to handle common cases with conditions
1821     // computed between guards.
1822     Instruction *NextInst = II->getNextNonDebugInstruction();
1823     for (unsigned i = 0; i < GuardWideningWindow; i++) {
1824       // Note: Using context-free form to avoid compile time blow up
1825       if (!isSafeToSpeculativelyExecute(NextInst))
1826         break;
1827       NextInst = NextInst->getNextNonDebugInstruction();
1828     }
1829     Value *NextCond = nullptr;
1830     if (match(NextInst,
1831               m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
1832       Value *CurrCond = II->getArgOperand(0);
1833 
1834       // Remove a guard that it is immediately preceded by an identical guard.
1835       // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
1836       if (CurrCond != NextCond) {
1837         Instruction *MoveI = II->getNextNonDebugInstruction();
1838         while (MoveI != NextInst) {
1839           auto *Temp = MoveI;
1840           MoveI = MoveI->getNextNonDebugInstruction();
1841           Temp->moveBefore(II);
1842         }
1843         replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
1844       }
1845       eraseInstFromFunction(*NextInst);
1846       return II;
1847     }
1848     break;
1849   }
1850   case Intrinsic::experimental_vector_insert: {
1851     Value *Vec = II->getArgOperand(0);
1852     Value *SubVec = II->getArgOperand(1);
1853     Value *Idx = II->getArgOperand(2);
1854     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
1855     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
1856     auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
1857 
1858     // Only canonicalize if the destination vector, Vec, and SubVec are all
1859     // fixed vectors.
1860     if (DstTy && VecTy && SubVecTy) {
1861       unsigned DstNumElts = DstTy->getNumElements();
1862       unsigned VecNumElts = VecTy->getNumElements();
1863       unsigned SubVecNumElts = SubVecTy->getNumElements();
1864       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
1865 
1866       // The result of this call is undefined if IdxN is not a constant multiple
1867       // of the SubVec's minimum vector length OR the insertion overruns Vec.
1868       if (IdxN % SubVecNumElts != 0 || IdxN + SubVecNumElts > VecNumElts) {
1869         replaceInstUsesWith(CI, UndefValue::get(CI.getType()));
1870         return eraseInstFromFunction(CI);
1871       }
1872 
1873       // An insert that entirely overwrites Vec with SubVec is a nop.
1874       if (VecNumElts == SubVecNumElts) {
1875         replaceInstUsesWith(CI, SubVec);
1876         return eraseInstFromFunction(CI);
1877       }
1878 
1879       // Widen SubVec into a vector of the same width as Vec, since
1880       // shufflevector requires the two input vectors to be the same width.
1881       // Elements beyond the bounds of SubVec within the widened vector are
1882       // undefined.
1883       SmallVector<int, 8> WidenMask;
1884       unsigned i;
1885       for (i = 0; i != SubVecNumElts; ++i)
1886         WidenMask.push_back(i);
1887       for (; i != VecNumElts; ++i)
1888         WidenMask.push_back(UndefMaskElem);
1889 
1890       Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);
1891 
1892       SmallVector<int, 8> Mask;
1893       for (unsigned i = 0; i != IdxN; ++i)
1894         Mask.push_back(i);
1895       for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
1896         Mask.push_back(i);
1897       for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
1898         Mask.push_back(i);
1899 
1900       Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
1901       replaceInstUsesWith(CI, Shuffle);
1902       return eraseInstFromFunction(CI);
1903     }
1904     break;
1905   }
1906   case Intrinsic::experimental_vector_extract: {
1907     Value *Vec = II->getArgOperand(0);
1908     Value *Idx = II->getArgOperand(1);
1909 
1910     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
1911     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
1912 
1913     // Only canonicalize if the the destination vector and Vec are fixed
1914     // vectors.
1915     if (DstTy && VecTy) {
1916       unsigned DstNumElts = DstTy->getNumElements();
1917       unsigned VecNumElts = VecTy->getNumElements();
1918       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
1919 
1920       // The result of this call is undefined if IdxN is not a constant multiple
1921       // of the result type's minimum vector length OR the extraction overruns
1922       // Vec.
1923       if (IdxN % DstNumElts != 0 || IdxN + DstNumElts > VecNumElts) {
1924         replaceInstUsesWith(CI, UndefValue::get(CI.getType()));
1925         return eraseInstFromFunction(CI);
1926       }
1927 
1928       // Extracting the entirety of Vec is a nop.
1929       if (VecNumElts == DstNumElts) {
1930         replaceInstUsesWith(CI, Vec);
1931         return eraseInstFromFunction(CI);
1932       }
1933 
1934       SmallVector<int, 8> Mask;
1935       for (unsigned i = 0; i != DstNumElts; ++i)
1936         Mask.push_back(IdxN + i);
1937 
1938       Value *Shuffle =
1939           Builder.CreateShuffleVector(Vec, UndefValue::get(VecTy), Mask);
1940       replaceInstUsesWith(CI, Shuffle);
1941       return eraseInstFromFunction(CI);
1942     }
1943     break;
1944   }
1945   case Intrinsic::vector_reduce_or:
1946   case Intrinsic::vector_reduce_and: {
1947     // Canonicalize logical or/and reductions:
1948     // Or reduction for i1 is represented as:
1949     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
1950     // %res = cmp ne iReduxWidth %val, 0
1951     // And reduction for i1 is represented as:
1952     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
1953     // %res = cmp eq iReduxWidth %val, 11111
1954     Value *Arg = II->getArgOperand(0);
1955     Type *RetTy = II->getType();
1956     if (RetTy == Builder.getInt1Ty())
1957       if (auto *FVTy = dyn_cast<FixedVectorType>(Arg->getType())) {
1958         Value *Res = Builder.CreateBitCast(
1959             Arg, Builder.getIntNTy(FVTy->getNumElements()));
1960         if (IID == Intrinsic::vector_reduce_and) {
1961           Res = Builder.CreateICmpEQ(
1962               Res, ConstantInt::getAllOnesValue(Res->getType()));
1963         } else {
1964           assert(IID == Intrinsic::vector_reduce_or &&
1965                  "Expected or reduction.");
1966           Res = Builder.CreateIsNotNull(Res);
1967         }
1968         replaceInstUsesWith(CI, Res);
1969         return eraseInstFromFunction(CI);
1970       }
1971     break;
1972   }
1973   default: {
1974     // Handle target specific intrinsics
1975     Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
1976     if (V.hasValue())
1977       return V.getValue();
1978     break;
1979   }
1980   }
1981   // Some intrinsics (like experimental_gc_statepoint) can be used in invoke
1982   // context, so it is handled in visitCallBase and we should trigger it.
1983   return visitCallBase(*II);
1984 }
1985 
1986 // Fence instruction simplification
visitFenceInst(FenceInst & FI)1987 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
1988   // Remove identical consecutive fences.
1989   Instruction *Next = FI.getNextNonDebugInstruction();
1990   if (auto *NFI = dyn_cast<FenceInst>(Next))
1991     if (FI.isIdenticalTo(NFI))
1992       return eraseInstFromFunction(FI);
1993   return nullptr;
1994 }
1995 
1996 // InvokeInst simplification
visitInvokeInst(InvokeInst & II)1997 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) {
1998   return visitCallBase(II);
1999 }
2000 
2001 // CallBrInst simplification
visitCallBrInst(CallBrInst & CBI)2002 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) {
2003   return visitCallBase(CBI);
2004 }
2005 
2006 /// If this cast does not affect the value passed through the varargs area, we
2007 /// can eliminate the use of the cast.
isSafeToEliminateVarargsCast(const CallBase & Call,const DataLayout & DL,const CastInst * const CI,const int ix)2008 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
2009                                          const DataLayout &DL,
2010                                          const CastInst *const CI,
2011                                          const int ix) {
2012   if (!CI->isLosslessCast())
2013     return false;
2014 
2015   // If this is a GC intrinsic, avoid munging types.  We need types for
2016   // statepoint reconstruction in SelectionDAG.
2017   // TODO: This is probably something which should be expanded to all
2018   // intrinsics since the entire point of intrinsics is that
2019   // they are understandable by the optimizer.
2020   if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
2021       isa<GCResultInst>(Call))
2022     return false;
2023 
2024   // The size of ByVal or InAlloca arguments is derived from the type, so we
2025   // can't change to a type with a different size.  If the size were
2026   // passed explicitly we could avoid this check.
2027   if (!Call.isPassPointeeByValueArgument(ix))
2028     return true;
2029 
2030   Type* SrcTy =
2031             cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
2032   Type *DstTy = Call.isByValArgument(ix)
2033                     ? Call.getParamByValType(ix)
2034                     : cast<PointerType>(CI->getType())->getElementType();
2035   if (!SrcTy->isSized() || !DstTy->isSized())
2036     return false;
2037   if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
2038     return false;
2039   return true;
2040 }
2041 
tryOptimizeCall(CallInst * CI)2042 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
2043   if (!CI->getCalledFunction()) return nullptr;
2044 
2045   auto InstCombineRAUW = [this](Instruction *From, Value *With) {
2046     replaceInstUsesWith(*From, With);
2047   };
2048   auto InstCombineErase = [this](Instruction *I) {
2049     eraseInstFromFunction(*I);
2050   };
2051   LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
2052                                InstCombineErase);
2053   if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
2054     ++NumSimplified;
2055     return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
2056   }
2057 
2058   return nullptr;
2059 }
2060 
findInitTrampolineFromAlloca(Value * TrampMem)2061 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
2062   // Strip off at most one level of pointer casts, looking for an alloca.  This
2063   // is good enough in practice and simpler than handling any number of casts.
2064   Value *Underlying = TrampMem->stripPointerCasts();
2065   if (Underlying != TrampMem &&
2066       (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
2067     return nullptr;
2068   if (!isa<AllocaInst>(Underlying))
2069     return nullptr;
2070 
2071   IntrinsicInst *InitTrampoline = nullptr;
2072   for (User *U : TrampMem->users()) {
2073     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2074     if (!II)
2075       return nullptr;
2076     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
2077       if (InitTrampoline)
2078         // More than one init_trampoline writes to this value.  Give up.
2079         return nullptr;
2080       InitTrampoline = II;
2081       continue;
2082     }
2083     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
2084       // Allow any number of calls to adjust.trampoline.
2085       continue;
2086     return nullptr;
2087   }
2088 
2089   // No call to init.trampoline found.
2090   if (!InitTrampoline)
2091     return nullptr;
2092 
2093   // Check that the alloca is being used in the expected way.
2094   if (InitTrampoline->getOperand(0) != TrampMem)
2095     return nullptr;
2096 
2097   return InitTrampoline;
2098 }
2099 
findInitTrampolineFromBB(IntrinsicInst * AdjustTramp,Value * TrampMem)2100 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
2101                                                Value *TrampMem) {
2102   // Visit all the previous instructions in the basic block, and try to find a
2103   // init.trampoline which has a direct path to the adjust.trampoline.
2104   for (BasicBlock::iterator I = AdjustTramp->getIterator(),
2105                             E = AdjustTramp->getParent()->begin();
2106        I != E;) {
2107     Instruction *Inst = &*--I;
2108     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2109       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
2110           II->getOperand(0) == TrampMem)
2111         return II;
2112     if (Inst->mayWriteToMemory())
2113       return nullptr;
2114   }
2115   return nullptr;
2116 }
2117 
2118 // Given a call to llvm.adjust.trampoline, find and return the corresponding
2119 // call to llvm.init.trampoline if the call to the trampoline can be optimized
2120 // to a direct call to a function.  Otherwise return NULL.
findInitTrampoline(Value * Callee)2121 static IntrinsicInst *findInitTrampoline(Value *Callee) {
2122   Callee = Callee->stripPointerCasts();
2123   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
2124   if (!AdjustTramp ||
2125       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
2126     return nullptr;
2127 
2128   Value *TrampMem = AdjustTramp->getOperand(0);
2129 
2130   if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
2131     return IT;
2132   if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
2133     return IT;
2134   return nullptr;
2135 }
2136 
annotateAnyAllocSite(CallBase & Call,const TargetLibraryInfo * TLI)2137 void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
2138   unsigned NumArgs = Call.getNumArgOperands();
2139   ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
2140   ConstantInt *Op1C =
2141       (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));
2142   // Bail out if the allocation size is zero (or an invalid alignment of zero
2143   // with aligned_alloc).
2144   if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
2145     return;
2146 
2147   if (isMallocLikeFn(&Call, TLI) && Op0C) {
2148     if (isOpNewLikeFn(&Call, TLI))
2149       Call.addAttribute(AttributeList::ReturnIndex,
2150                         Attribute::getWithDereferenceableBytes(
2151                             Call.getContext(), Op0C->getZExtValue()));
2152     else
2153       Call.addAttribute(AttributeList::ReturnIndex,
2154                         Attribute::getWithDereferenceableOrNullBytes(
2155                             Call.getContext(), Op0C->getZExtValue()));
2156   } else if (isAlignedAllocLikeFn(&Call, TLI)) {
2157     if (Op1C)
2158       Call.addAttribute(AttributeList::ReturnIndex,
2159                         Attribute::getWithDereferenceableOrNullBytes(
2160                             Call.getContext(), Op1C->getZExtValue()));
2161     // Add alignment attribute if alignment is a power of two constant.
2162     if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment) &&
2163         isKnownNonZero(Call.getOperand(1), DL, 0, &AC, &Call, &DT)) {
2164       uint64_t AlignmentVal = Op0C->getZExtValue();
2165       if (llvm::isPowerOf2_64(AlignmentVal)) {
2166         Call.removeAttribute(AttributeList::ReturnIndex, Attribute::Alignment);
2167         Call.addAttribute(AttributeList::ReturnIndex,
2168                           Attribute::getWithAlignment(Call.getContext(),
2169                                                       Align(AlignmentVal)));
2170       }
2171     }
2172   } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
2173     Call.addAttribute(AttributeList::ReturnIndex,
2174                       Attribute::getWithDereferenceableOrNullBytes(
2175                           Call.getContext(), Op1C->getZExtValue()));
2176   } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
2177     bool Overflow;
2178     const APInt &N = Op0C->getValue();
2179     APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
2180     if (!Overflow)
2181       Call.addAttribute(AttributeList::ReturnIndex,
2182                         Attribute::getWithDereferenceableOrNullBytes(
2183                             Call.getContext(), Size.getZExtValue()));
2184   } else if (isStrdupLikeFn(&Call, TLI)) {
2185     uint64_t Len = GetStringLength(Call.getOperand(0));
2186     if (Len) {
2187       // strdup
2188       if (NumArgs == 1)
2189         Call.addAttribute(AttributeList::ReturnIndex,
2190                           Attribute::getWithDereferenceableOrNullBytes(
2191                               Call.getContext(), Len));
2192       // strndup
2193       else if (NumArgs == 2 && Op1C)
2194         Call.addAttribute(
2195             AttributeList::ReturnIndex,
2196             Attribute::getWithDereferenceableOrNullBytes(
2197                 Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1)));
2198     }
2199   }
2200 }
2201 
2202 /// Improvements for call, callbr and invoke instructions.
visitCallBase(CallBase & Call)2203 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
2204   if (isAllocationFn(&Call, &TLI))
2205     annotateAnyAllocSite(Call, &TLI);
2206 
2207   bool Changed = false;
2208 
2209   // Mark any parameters that are known to be non-null with the nonnull
2210   // attribute.  This is helpful for inlining calls to functions with null
2211   // checks on their arguments.
2212   SmallVector<unsigned, 4> ArgNos;
2213   unsigned ArgNo = 0;
2214 
2215   for (Value *V : Call.args()) {
2216     if (V->getType()->isPointerTy() &&
2217         !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
2218         isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
2219       ArgNos.push_back(ArgNo);
2220     ArgNo++;
2221   }
2222 
2223   assert(ArgNo == Call.arg_size() && "sanity check");
2224 
2225   if (!ArgNos.empty()) {
2226     AttributeList AS = Call.getAttributes();
2227     LLVMContext &Ctx = Call.getContext();
2228     AS = AS.addParamAttribute(Ctx, ArgNos,
2229                               Attribute::get(Ctx, Attribute::NonNull));
2230     Call.setAttributes(AS);
2231     Changed = true;
2232   }
2233 
2234   // If the callee is a pointer to a function, attempt to move any casts to the
2235   // arguments of the call/callbr/invoke.
2236   Value *Callee = Call.getCalledOperand();
2237   if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
2238     return nullptr;
2239 
2240   if (Function *CalleeF = dyn_cast<Function>(Callee)) {
2241     // Remove the convergent attr on calls when the callee is not convergent.
2242     if (Call.isConvergent() && !CalleeF->isConvergent() &&
2243         !CalleeF->isIntrinsic()) {
2244       LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
2245                         << "\n");
2246       Call.setNotConvergent();
2247       return &Call;
2248     }
2249 
2250     // If the call and callee calling conventions don't match, and neither one
2251     // of the calling conventions is compatible with C calling convention
2252     // this call must be unreachable, as the call is undefined.
2253     if ((CalleeF->getCallingConv() != Call.getCallingConv() &&
2254          !(CalleeF->getCallingConv() == llvm::CallingConv::C &&
2255            TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) &&
2256          !(Call.getCallingConv() == llvm::CallingConv::C &&
2257            TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) &&
2258         // Only do this for calls to a function with a body.  A prototype may
2259         // not actually end up matching the implementation's calling conv for a
2260         // variety of reasons (e.g. it may be written in assembly).
2261         !CalleeF->isDeclaration()) {
2262       Instruction *OldCall = &Call;
2263       CreateNonTerminatorUnreachable(OldCall);
2264       // If OldCall does not return void then replaceInstUsesWith undef.
2265       // This allows ValueHandlers and custom metadata to adjust itself.
2266       if (!OldCall->getType()->isVoidTy())
2267         replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
2268       if (isa<CallInst>(OldCall))
2269         return eraseInstFromFunction(*OldCall);
2270 
2271       // We cannot remove an invoke or a callbr, because it would change thexi
2272       // CFG, just change the callee to a null pointer.
2273       cast<CallBase>(OldCall)->setCalledFunction(
2274           CalleeF->getFunctionType(),
2275           Constant::getNullValue(CalleeF->getType()));
2276       return nullptr;
2277     }
2278   }
2279 
2280   if ((isa<ConstantPointerNull>(Callee) &&
2281        !NullPointerIsDefined(Call.getFunction())) ||
2282       isa<UndefValue>(Callee)) {
2283     // If Call does not return void then replaceInstUsesWith undef.
2284     // This allows ValueHandlers and custom metadata to adjust itself.
2285     if (!Call.getType()->isVoidTy())
2286       replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
2287 
2288     if (Call.isTerminator()) {
2289       // Can't remove an invoke or callbr because we cannot change the CFG.
2290       return nullptr;
2291     }
2292 
2293     // This instruction is not reachable, just remove it.
2294     CreateNonTerminatorUnreachable(&Call);
2295     return eraseInstFromFunction(Call);
2296   }
2297 
2298   if (IntrinsicInst *II = findInitTrampoline(Callee))
2299     return transformCallThroughTrampoline(Call, *II);
2300 
2301   PointerType *PTy = cast<PointerType>(Callee->getType());
2302   FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2303   if (FTy->isVarArg()) {
2304     int ix = FTy->getNumParams();
2305     // See if we can optimize any arguments passed through the varargs area of
2306     // the call.
2307     for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
2308          I != E; ++I, ++ix) {
2309       CastInst *CI = dyn_cast<CastInst>(*I);
2310       if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
2311         replaceUse(*I, CI->getOperand(0));
2312 
2313         // Update the byval type to match the argument type.
2314         if (Call.isByValArgument(ix)) {
2315           Call.removeParamAttr(ix, Attribute::ByVal);
2316           Call.addParamAttr(
2317               ix, Attribute::getWithByValType(
2318                       Call.getContext(),
2319                       CI->getOperand(0)->getType()->getPointerElementType()));
2320         }
2321         Changed = true;
2322       }
2323     }
2324   }
2325 
2326   if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
2327     InlineAsm *IA = cast<InlineAsm>(Callee);
2328     if (!IA->canThrow()) {
2329       // Normal inline asm calls cannot throw - mark them
2330       // 'nounwind'.
2331       Call.setDoesNotThrow();
2332       Changed = true;
2333     }
2334   }
2335 
2336   // Try to optimize the call if possible, we require DataLayout for most of
2337   // this.  None of these calls are seen as possibly dead so go ahead and
2338   // delete the instruction now.
2339   if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
2340     Instruction *I = tryOptimizeCall(CI);
2341     // If we changed something return the result, etc. Otherwise let
2342     // the fallthrough check.
2343     if (I) return eraseInstFromFunction(*I);
2344   }
2345 
2346   if (!Call.use_empty() && !Call.isMustTailCall())
2347     if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
2348       Type *CallTy = Call.getType();
2349       Type *RetArgTy = ReturnedArg->getType();
2350       if (RetArgTy->canLosslesslyBitCastTo(CallTy))
2351         return replaceInstUsesWith(
2352             Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
2353     }
2354 
2355   if (isAllocLikeFn(&Call, &TLI))
2356     return visitAllocSite(Call);
2357 
2358   // Handle intrinsics which can be used in both call and invoke context.
2359   switch (Call.getIntrinsicID()) {
2360   case Intrinsic::experimental_gc_statepoint: {
2361     GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call);
2362     SmallPtrSet<Value *, 32> LiveGcValues;
2363     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
2364       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
2365 
2366       // Remove the relocation if unused.
2367       if (GCR.use_empty()) {
2368         eraseInstFromFunction(GCR);
2369         continue;
2370       }
2371 
2372       Value *DerivedPtr = GCR.getDerivedPtr();
2373       Value *BasePtr = GCR.getBasePtr();
2374 
2375       // Undef is undef, even after relocation.
2376       if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
2377         replaceInstUsesWith(GCR, UndefValue::get(GCR.getType()));
2378         eraseInstFromFunction(GCR);
2379         continue;
2380       }
2381 
2382       if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
2383         // The relocation of null will be null for most any collector.
2384         // TODO: provide a hook for this in GCStrategy.  There might be some
2385         // weird collector this property does not hold for.
2386         if (isa<ConstantPointerNull>(DerivedPtr)) {
2387           // Use null-pointer of gc_relocate's type to replace it.
2388           replaceInstUsesWith(GCR, ConstantPointerNull::get(PT));
2389           eraseInstFromFunction(GCR);
2390           continue;
2391         }
2392 
2393         // isKnownNonNull -> nonnull attribute
2394         if (!GCR.hasRetAttr(Attribute::NonNull) &&
2395             isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) {
2396           GCR.addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
2397           // We discovered new fact, re-check users.
2398           Worklist.pushUsersToWorkList(GCR);
2399         }
2400       }
2401 
2402       // If we have two copies of the same pointer in the statepoint argument
2403       // list, canonicalize to one.  This may let us common gc.relocates.
2404       if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
2405           GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
2406         auto *OpIntTy = GCR.getOperand(2)->getType();
2407         GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
2408       }
2409 
2410       // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
2411       // Canonicalize on the type from the uses to the defs
2412 
2413       // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
2414       LiveGcValues.insert(BasePtr);
2415       LiveGcValues.insert(DerivedPtr);
2416     }
2417     Optional<OperandBundleUse> Bundle =
2418         GCSP.getOperandBundle(LLVMContext::OB_gc_live);
2419     unsigned NumOfGCLives = LiveGcValues.size();
2420     if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size())
2421       break;
2422     // We can reduce the size of gc live bundle.
2423     DenseMap<Value *, unsigned> Val2Idx;
2424     std::vector<Value *> NewLiveGc;
2425     for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) {
2426       Value *V = Bundle->Inputs[I];
2427       if (Val2Idx.count(V))
2428         continue;
2429       if (LiveGcValues.count(V)) {
2430         Val2Idx[V] = NewLiveGc.size();
2431         NewLiveGc.push_back(V);
2432       } else
2433         Val2Idx[V] = NumOfGCLives;
2434     }
2435     // Update all gc.relocates
2436     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
2437       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
2438       Value *BasePtr = GCR.getBasePtr();
2439       assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
2440              "Missed live gc for base pointer");
2441       auto *OpIntTy1 = GCR.getOperand(1)->getType();
2442       GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
2443       Value *DerivedPtr = GCR.getDerivedPtr();
2444       assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
2445              "Missed live gc for derived pointer");
2446       auto *OpIntTy2 = GCR.getOperand(2)->getType();
2447       GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
2448     }
2449     // Create new statepoint instruction.
2450     OperandBundleDef NewBundle("gc-live", NewLiveGc);
2451     return CallBase::Create(&Call, NewBundle);
2452   }
2453   default: { break; }
2454   }
2455 
2456   return Changed ? &Call : nullptr;
2457 }
2458 
2459 /// If the callee is a constexpr cast of a function, attempt to move the cast to
2460 /// the arguments of the call/callbr/invoke.
transformConstExprCastCall(CallBase & Call)2461 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
2462   auto *Callee =
2463       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
2464   if (!Callee)
2465     return false;
2466 
2467   // If this is a call to a thunk function, don't remove the cast. Thunks are
2468   // used to transparently forward all incoming parameters and outgoing return
2469   // values, so it's important to leave the cast in place.
2470   if (Callee->hasFnAttribute("thunk"))
2471     return false;
2472 
2473   // If this is a musttail call, the callee's prototype must match the caller's
2474   // prototype with the exception of pointee types. The code below doesn't
2475   // implement that, so we can't do this transform.
2476   // TODO: Do the transform if it only requires adding pointer casts.
2477   if (Call.isMustTailCall())
2478     return false;
2479 
2480   Instruction *Caller = &Call;
2481   const AttributeList &CallerPAL = Call.getAttributes();
2482 
2483   // Okay, this is a cast from a function to a different type.  Unless doing so
2484   // would cause a type conversion of one of our arguments, change this call to
2485   // be a direct call with arguments casted to the appropriate types.
2486   FunctionType *FT = Callee->getFunctionType();
2487   Type *OldRetTy = Caller->getType();
2488   Type *NewRetTy = FT->getReturnType();
2489 
2490   // Check to see if we are changing the return type...
2491   if (OldRetTy != NewRetTy) {
2492 
2493     if (NewRetTy->isStructTy())
2494       return false; // TODO: Handle multiple return values.
2495 
2496     if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
2497       if (Callee->isDeclaration())
2498         return false;   // Cannot transform this return value.
2499 
2500       if (!Caller->use_empty() &&
2501           // void -> non-void is handled specially
2502           !NewRetTy->isVoidTy())
2503         return false;   // Cannot transform this return value.
2504     }
2505 
2506     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
2507       AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
2508       if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
2509         return false;   // Attribute not compatible with transformed value.
2510     }
2511 
2512     // If the callbase is an invoke/callbr instruction, and the return value is
2513     // used by a PHI node in a successor, we cannot change the return type of
2514     // the call because there is no place to put the cast instruction (without
2515     // breaking the critical edge).  Bail out in this case.
2516     if (!Caller->use_empty()) {
2517       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
2518         for (User *U : II->users())
2519           if (PHINode *PN = dyn_cast<PHINode>(U))
2520             if (PN->getParent() == II->getNormalDest() ||
2521                 PN->getParent() == II->getUnwindDest())
2522               return false;
2523       // FIXME: Be conservative for callbr to avoid a quadratic search.
2524       if (isa<CallBrInst>(Caller))
2525         return false;
2526     }
2527   }
2528 
2529   unsigned NumActualArgs = Call.arg_size();
2530   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
2531 
2532   // Prevent us turning:
2533   // declare void @takes_i32_inalloca(i32* inalloca)
2534   //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
2535   //
2536   // into:
2537   //  call void @takes_i32_inalloca(i32* null)
2538   //
2539   //  Similarly, avoid folding away bitcasts of byval calls.
2540   if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
2541       Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated) ||
2542       Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
2543     return false;
2544 
2545   auto AI = Call.arg_begin();
2546   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
2547     Type *ParamTy = FT->getParamType(i);
2548     Type *ActTy = (*AI)->getType();
2549 
2550     if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
2551       return false;   // Cannot transform this parameter value.
2552 
2553     if (AttrBuilder(CallerPAL.getParamAttributes(i))
2554             .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
2555       return false;   // Attribute not compatible with transformed value.
2556 
2557     if (Call.isInAllocaArgument(i))
2558       return false;   // Cannot transform to and from inalloca.
2559 
2560     if (CallerPAL.hasParamAttribute(i, Attribute::SwiftError))
2561       return false;
2562 
2563     // If the parameter is passed as a byval argument, then we have to have a
2564     // sized type and the sized type has to have the same size as the old type.
2565     if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
2566       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
2567       if (!ParamPTy || !ParamPTy->getElementType()->isSized())
2568         return false;
2569 
2570       Type *CurElTy = Call.getParamByValType(i);
2571       if (DL.getTypeAllocSize(CurElTy) !=
2572           DL.getTypeAllocSize(ParamPTy->getElementType()))
2573         return false;
2574     }
2575   }
2576 
2577   if (Callee->isDeclaration()) {
2578     // Do not delete arguments unless we have a function body.
2579     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
2580       return false;
2581 
2582     // If the callee is just a declaration, don't change the varargsness of the
2583     // call.  We don't want to introduce a varargs call where one doesn't
2584     // already exist.
2585     PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType());
2586     if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
2587       return false;
2588 
2589     // If both the callee and the cast type are varargs, we still have to make
2590     // sure the number of fixed parameters are the same or we have the same
2591     // ABI issues as if we introduce a varargs call.
2592     if (FT->isVarArg() &&
2593         cast<FunctionType>(APTy->getElementType())->isVarArg() &&
2594         FT->getNumParams() !=
2595         cast<FunctionType>(APTy->getElementType())->getNumParams())
2596       return false;
2597   }
2598 
2599   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
2600       !CallerPAL.isEmpty()) {
2601     // In this case we have more arguments than the new function type, but we
2602     // won't be dropping them.  Check that these extra arguments have attributes
2603     // that are compatible with being a vararg call argument.
2604     unsigned SRetIdx;
2605     if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
2606         SRetIdx > FT->getNumParams())
2607       return false;
2608   }
2609 
2610   // Okay, we decided that this is a safe thing to do: go ahead and start
2611   // inserting cast instructions as necessary.
2612   SmallVector<Value *, 8> Args;
2613   SmallVector<AttributeSet, 8> ArgAttrs;
2614   Args.reserve(NumActualArgs);
2615   ArgAttrs.reserve(NumActualArgs);
2616 
2617   // Get any return attributes.
2618   AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
2619 
2620   // If the return value is not being used, the type may not be compatible
2621   // with the existing attributes.  Wipe out any problematic attributes.
2622   RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2623 
2624   LLVMContext &Ctx = Call.getContext();
2625   AI = Call.arg_begin();
2626   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2627     Type *ParamTy = FT->getParamType(i);
2628 
2629     Value *NewArg = *AI;
2630     if ((*AI)->getType() != ParamTy)
2631       NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
2632     Args.push_back(NewArg);
2633 
2634     // Add any parameter attributes.
2635     if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
2636       AttrBuilder AB(CallerPAL.getParamAttributes(i));
2637       AB.addByValAttr(NewArg->getType()->getPointerElementType());
2638       ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
2639     } else
2640       ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
2641   }
2642 
2643   // If the function takes more arguments than the call was taking, add them
2644   // now.
2645   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
2646     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2647     ArgAttrs.push_back(AttributeSet());
2648   }
2649 
2650   // If we are removing arguments to the function, emit an obnoxious warning.
2651   if (FT->getNumParams() < NumActualArgs) {
2652     // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2653     if (FT->isVarArg()) {
2654       // Add all of the arguments in their promoted form to the arg list.
2655       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2656         Type *PTy = getPromotedType((*AI)->getType());
2657         Value *NewArg = *AI;
2658         if (PTy != (*AI)->getType()) {
2659           // Must promote to pass through va_arg area!
2660           Instruction::CastOps opcode =
2661             CastInst::getCastOpcode(*AI, false, PTy, false);
2662           NewArg = Builder.CreateCast(opcode, *AI, PTy);
2663         }
2664         Args.push_back(NewArg);
2665 
2666         // Add any parameter attributes.
2667         ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
2668       }
2669     }
2670   }
2671 
2672   AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2673 
2674   if (NewRetTy->isVoidTy())
2675     Caller->setName("");   // Void type should not have a name.
2676 
2677   assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
2678          "missing argument attributes");
2679   AttributeList NewCallerPAL = AttributeList::get(
2680       Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
2681 
2682   SmallVector<OperandBundleDef, 1> OpBundles;
2683   Call.getOperandBundlesAsDefs(OpBundles);
2684 
2685   CallBase *NewCall;
2686   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2687     NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
2688                                    II->getUnwindDest(), Args, OpBundles);
2689   } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
2690     NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
2691                                    CBI->getIndirectDests(), Args, OpBundles);
2692   } else {
2693     NewCall = Builder.CreateCall(Callee, Args, OpBundles);
2694     cast<CallInst>(NewCall)->setTailCallKind(
2695         cast<CallInst>(Caller)->getTailCallKind());
2696   }
2697   NewCall->takeName(Caller);
2698   NewCall->setCallingConv(Call.getCallingConv());
2699   NewCall->setAttributes(NewCallerPAL);
2700 
2701   // Preserve prof metadata if any.
2702   NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
2703 
2704   // Insert a cast of the return type as necessary.
2705   Instruction *NC = NewCall;
2706   Value *NV = NC;
2707   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2708     if (!NV->getType()->isVoidTy()) {
2709       NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2710       NC->setDebugLoc(Caller->getDebugLoc());
2711 
2712       // If this is an invoke/callbr instruction, we should insert it after the
2713       // first non-phi instruction in the normal successor block.
2714       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2715         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2716         InsertNewInstBefore(NC, *I);
2717       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
2718         BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
2719         InsertNewInstBefore(NC, *I);
2720       } else {
2721         // Otherwise, it's a call, just insert cast right after the call.
2722         InsertNewInstBefore(NC, *Caller);
2723       }
2724       Worklist.pushUsersToWorkList(*Caller);
2725     } else {
2726       NV = UndefValue::get(Caller->getType());
2727     }
2728   }
2729 
2730   if (!Caller->use_empty())
2731     replaceInstUsesWith(*Caller, NV);
2732   else if (Caller->hasValueHandle()) {
2733     if (OldRetTy == NV->getType())
2734       ValueHandleBase::ValueIsRAUWd(Caller, NV);
2735     else
2736       // We cannot call ValueIsRAUWd with a different type, and the
2737       // actual tracked value will disappear.
2738       ValueHandleBase::ValueIsDeleted(Caller);
2739   }
2740 
2741   eraseInstFromFunction(*Caller);
2742   return true;
2743 }
2744 
2745 /// Turn a call to a function created by init_trampoline / adjust_trampoline
2746 /// intrinsic pair into a direct call to the underlying function.
2747 Instruction *
transformCallThroughTrampoline(CallBase & Call,IntrinsicInst & Tramp)2748 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
2749                                                  IntrinsicInst &Tramp) {
2750   Value *Callee = Call.getCalledOperand();
2751   Type *CalleeTy = Callee->getType();
2752   FunctionType *FTy = Call.getFunctionType();
2753   AttributeList Attrs = Call.getAttributes();
2754 
2755   // If the call already has the 'nest' attribute somewhere then give up -
2756   // otherwise 'nest' would occur twice after splicing in the chain.
2757   if (Attrs.hasAttrSomewhere(Attribute::Nest))
2758     return nullptr;
2759 
2760   Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
2761   FunctionType *NestFTy = NestF->getFunctionType();
2762 
2763   AttributeList NestAttrs = NestF->getAttributes();
2764   if (!NestAttrs.isEmpty()) {
2765     unsigned NestArgNo = 0;
2766     Type *NestTy = nullptr;
2767     AttributeSet NestAttr;
2768 
2769     // Look for a parameter marked with the 'nest' attribute.
2770     for (FunctionType::param_iterator I = NestFTy->param_begin(),
2771                                       E = NestFTy->param_end();
2772          I != E; ++NestArgNo, ++I) {
2773       AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
2774       if (AS.hasAttribute(Attribute::Nest)) {
2775         // Record the parameter type and any other attributes.
2776         NestTy = *I;
2777         NestAttr = AS;
2778         break;
2779       }
2780     }
2781 
2782     if (NestTy) {
2783       std::vector<Value*> NewArgs;
2784       std::vector<AttributeSet> NewArgAttrs;
2785       NewArgs.reserve(Call.arg_size() + 1);
2786       NewArgAttrs.reserve(Call.arg_size());
2787 
2788       // Insert the nest argument into the call argument list, which may
2789       // mean appending it.  Likewise for attributes.
2790 
2791       {
2792         unsigned ArgNo = 0;
2793         auto I = Call.arg_begin(), E = Call.arg_end();
2794         do {
2795           if (ArgNo == NestArgNo) {
2796             // Add the chain argument and attributes.
2797             Value *NestVal = Tramp.getArgOperand(2);
2798             if (NestVal->getType() != NestTy)
2799               NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
2800             NewArgs.push_back(NestVal);
2801             NewArgAttrs.push_back(NestAttr);
2802           }
2803 
2804           if (I == E)
2805             break;
2806 
2807           // Add the original argument and attributes.
2808           NewArgs.push_back(*I);
2809           NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2810 
2811           ++ArgNo;
2812           ++I;
2813         } while (true);
2814       }
2815 
2816       // The trampoline may have been bitcast to a bogus type (FTy).
2817       // Handle this by synthesizing a new function type, equal to FTy
2818       // with the chain parameter inserted.
2819 
2820       std::vector<Type*> NewTypes;
2821       NewTypes.reserve(FTy->getNumParams()+1);
2822 
2823       // Insert the chain's type into the list of parameter types, which may
2824       // mean appending it.
2825       {
2826         unsigned ArgNo = 0;
2827         FunctionType::param_iterator I = FTy->param_begin(),
2828           E = FTy->param_end();
2829 
2830         do {
2831           if (ArgNo == NestArgNo)
2832             // Add the chain's type.
2833             NewTypes.push_back(NestTy);
2834 
2835           if (I == E)
2836             break;
2837 
2838           // Add the original type.
2839           NewTypes.push_back(*I);
2840 
2841           ++ArgNo;
2842           ++I;
2843         } while (true);
2844       }
2845 
2846       // Replace the trampoline call with a direct call.  Let the generic
2847       // code sort out any function type mismatches.
2848       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
2849                                                 FTy->isVarArg());
2850       Constant *NewCallee =
2851         NestF->getType() == PointerType::getUnqual(NewFTy) ?
2852         NestF : ConstantExpr::getBitCast(NestF,
2853                                          PointerType::getUnqual(NewFTy));
2854       AttributeList NewPAL =
2855           AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
2856                              Attrs.getRetAttributes(), NewArgAttrs);
2857 
2858       SmallVector<OperandBundleDef, 1> OpBundles;
2859       Call.getOperandBundlesAsDefs(OpBundles);
2860 
2861       Instruction *NewCaller;
2862       if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
2863         NewCaller = InvokeInst::Create(NewFTy, NewCallee,
2864                                        II->getNormalDest(), II->getUnwindDest(),
2865                                        NewArgs, OpBundles);
2866         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
2867         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
2868       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
2869         NewCaller =
2870             CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
2871                                CBI->getIndirectDests(), NewArgs, OpBundles);
2872         cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
2873         cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
2874       } else {
2875         NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
2876         cast<CallInst>(NewCaller)->setTailCallKind(
2877             cast<CallInst>(Call).getTailCallKind());
2878         cast<CallInst>(NewCaller)->setCallingConv(
2879             cast<CallInst>(Call).getCallingConv());
2880         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
2881       }
2882       NewCaller->setDebugLoc(Call.getDebugLoc());
2883 
2884       return NewCaller;
2885     }
2886   }
2887 
2888   // Replace the trampoline call with a direct call.  Since there is no 'nest'
2889   // parameter, there is no need to adjust the argument list.  Let the generic
2890   // code sort out any function type mismatches.
2891   Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
2892   Call.setCalledFunction(FTy, NewCallee);
2893   return &Call;
2894 }
2895