xref: /llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp (revision 21d3871b7c90f85b3ae417724d7864369173bde0)
1 //===- InstCombineCasts.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for cast operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/DebugInfo.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Support/KnownBits.h"
20 #include "llvm/Transforms/InstCombine/InstCombiner.h"
21 #include <optional>
22 
23 using namespace llvm;
24 using namespace PatternMatch;
25 
26 #define DEBUG_TYPE "instcombine"
27 
28 /// Analyze 'Val', seeing if it is a simple linear expression.
29 /// If so, decompose it, returning some value X, such that Val is
30 /// X*Scale+Offset.
31 ///
32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
33                                         uint64_t &Offset) {
34   if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
35     Offset = CI->getZExtValue();
36     Scale  = 0;
37     return ConstantInt::get(Val->getType(), 0);
38   }
39 
40   if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
41     // Cannot look past anything that might overflow.
42     // We specifically require nuw because we store the Scale in an unsigned
43     // and perform an unsigned divide on it.
44     OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
45     if (OBI && !OBI->hasNoUnsignedWrap()) {
46       Scale = 1;
47       Offset = 0;
48       return Val;
49     }
50 
51     if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
52       if (I->getOpcode() == Instruction::Shl) {
53         // This is a value scaled by '1 << the shift amt'.
54         Scale = UINT64_C(1) << RHS->getZExtValue();
55         Offset = 0;
56         return I->getOperand(0);
57       }
58 
59       if (I->getOpcode() == Instruction::Mul) {
60         // This value is scaled by 'RHS'.
61         Scale = RHS->getZExtValue();
62         Offset = 0;
63         return I->getOperand(0);
64       }
65 
66       if (I->getOpcode() == Instruction::Add) {
67         // We have X+C.  Check to see if we really have (X*C2)+C1,
68         // where C1 is divisible by C2.
69         unsigned SubScale;
70         Value *SubVal =
71           decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
72         Offset += RHS->getZExtValue();
73         Scale = SubScale;
74         return SubVal;
75       }
76     }
77   }
78 
79   // Otherwise, we can't look past this.
80   Scale = 1;
81   Offset = 0;
82   return Val;
83 }
84 
85 /// If we find a cast of an allocation instruction, try to eliminate the cast by
86 /// moving the type information into the alloc.
87 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI,
88                                                        AllocaInst &AI) {
89   PointerType *PTy = cast<PointerType>(CI.getType());
90   // Opaque pointers don't have an element type we could replace with.
91   if (PTy->isOpaque())
92     return nullptr;
93 
94   IRBuilderBase::InsertPointGuard Guard(Builder);
95   Builder.SetInsertPoint(&AI);
96 
97   // Get the type really allocated and the type casted to.
98   Type *AllocElTy = AI.getAllocatedType();
99   Type *CastElTy = PTy->getNonOpaquePointerElementType();
100   if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
101 
102   // This optimisation does not work for cases where the cast type
103   // is scalable and the allocated type is not. This because we need to
104   // know how many times the casted type fits into the allocated type.
105   // For the opposite case where the allocated type is scalable and the
106   // cast type is not this leads to poor code quality due to the
107   // introduction of 'vscale' into the calculations. It seems better to
108   // bail out for this case too until we've done a proper cost-benefit
109   // analysis.
110   bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy);
111   bool CastIsScalable = isa<ScalableVectorType>(CastElTy);
112   if (AllocIsScalable != CastIsScalable) return nullptr;
113 
114   Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy);
115   Align CastElTyAlign = DL.getABITypeAlign(CastElTy);
116   if (CastElTyAlign < AllocElTyAlign) return nullptr;
117 
118   // If the allocation has multiple uses, only promote it if we are strictly
119   // increasing the alignment of the resultant allocation.  If we keep it the
120   // same, we open the door to infinite loops of various kinds.
121   if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
122 
123   // The alloc and cast types should be either both fixed or both scalable.
124   uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize();
125   uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize();
126   if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
127 
128   // If the allocation has multiple uses, only promote it if we're not
129   // shrinking the amount of memory being allocated.
130   uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize();
131   uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize();
132   if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
133 
134   // See if we can satisfy the modulus by pulling a scale out of the array
135   // size argument.
136   unsigned ArraySizeScale;
137   uint64_t ArrayOffset;
138   Value *NumElements = // See if the array size is a decomposable linear expr.
139     decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
140 
141   // If we can now satisfy the modulus, by using a non-1 scale, we really can
142   // do the xform.
143   if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
144       (AllocElTySize*ArrayOffset   ) % CastElTySize != 0) return nullptr;
145 
146   // We don't currently support arrays of scalable types.
147   assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0));
148 
149   unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
150   Value *Amt = nullptr;
151   if (Scale == 1) {
152     Amt = NumElements;
153   } else {
154     Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
155     // Insert before the alloca, not before the cast.
156     Amt = Builder.CreateMul(Amt, NumElements);
157   }
158 
159   if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
160     Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
161                                   Offset, true);
162     Amt = Builder.CreateAdd(Amt, Off);
163   }
164 
165   AllocaInst *New = Builder.CreateAlloca(CastElTy, AI.getAddressSpace(), Amt);
166   New->setAlignment(AI.getAlign());
167   New->takeName(&AI);
168   New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
169   New->setMetadata(LLVMContext::MD_DIAssignID,
170                    AI.getMetadata(LLVMContext::MD_DIAssignID));
171 
172   replaceAllDbgUsesWith(AI, *New, *New, DT);
173 
174   // If the allocation has multiple real uses, insert a cast and change all
175   // things that used it to use the new cast.  This will also hack on CI, but it
176   // will die soon.
177   if (!AI.hasOneUse()) {
178     // New is the allocation instruction, pointer typed. AI is the original
179     // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
180     Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast");
181     replaceInstUsesWith(AI, NewCast);
182     eraseInstFromFunction(AI);
183   }
184   return replaceInstUsesWith(CI, New);
185 }
186 
187 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
188 /// true for, actually insert the code to evaluate the expression.
189 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty,
190                                                  bool isSigned) {
191   if (Constant *C = dyn_cast<Constant>(V)) {
192     C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
193     // If we got a constantexpr back, try to simplify it with DL info.
194     return ConstantFoldConstant(C, DL, &TLI);
195   }
196 
197   // Otherwise, it must be an instruction.
198   Instruction *I = cast<Instruction>(V);
199   Instruction *Res = nullptr;
200   unsigned Opc = I->getOpcode();
201   switch (Opc) {
202   case Instruction::Add:
203   case Instruction::Sub:
204   case Instruction::Mul:
205   case Instruction::And:
206   case Instruction::Or:
207   case Instruction::Xor:
208   case Instruction::AShr:
209   case Instruction::LShr:
210   case Instruction::Shl:
211   case Instruction::UDiv:
212   case Instruction::URem: {
213     Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
214     Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
215     Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
216     break;
217   }
218   case Instruction::Trunc:
219   case Instruction::ZExt:
220   case Instruction::SExt:
221     // If the source type of the cast is the type we're trying for then we can
222     // just return the source.  There's no need to insert it because it is not
223     // new.
224     if (I->getOperand(0)->getType() == Ty)
225       return I->getOperand(0);
226 
227     // Otherwise, must be the same type of cast, so just reinsert a new one.
228     // This also handles the case of zext(trunc(x)) -> zext(x).
229     Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
230                                       Opc == Instruction::SExt);
231     break;
232   case Instruction::Select: {
233     Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
234     Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
235     Res = SelectInst::Create(I->getOperand(0), True, False);
236     break;
237   }
238   case Instruction::PHI: {
239     PHINode *OPN = cast<PHINode>(I);
240     PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
241     for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
242       Value *V =
243           EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
244       NPN->addIncoming(V, OPN->getIncomingBlock(i));
245     }
246     Res = NPN;
247     break;
248   }
249   default:
250     // TODO: Can handle more cases here.
251     llvm_unreachable("Unreachable!");
252   }
253 
254   Res->takeName(I);
255   return InsertNewInstWith(Res, *I);
256 }
257 
258 Instruction::CastOps
259 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1,
260                                        const CastInst *CI2) {
261   Type *SrcTy = CI1->getSrcTy();
262   Type *MidTy = CI1->getDestTy();
263   Type *DstTy = CI2->getDestTy();
264 
265   Instruction::CastOps firstOp = CI1->getOpcode();
266   Instruction::CastOps secondOp = CI2->getOpcode();
267   Type *SrcIntPtrTy =
268       SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
269   Type *MidIntPtrTy =
270       MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
271   Type *DstIntPtrTy =
272       DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
273   unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
274                                                 DstTy, SrcIntPtrTy, MidIntPtrTy,
275                                                 DstIntPtrTy);
276 
277   // We don't want to form an inttoptr or ptrtoint that converts to an integer
278   // type that differs from the pointer size.
279   if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
280       (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
281     Res = 0;
282 
283   return Instruction::CastOps(Res);
284 }
285 
286 /// Implement the transforms common to all CastInst visitors.
287 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) {
288   Value *Src = CI.getOperand(0);
289   Type *Ty = CI.getType();
290 
291   // Try to eliminate a cast of a cast.
292   if (auto *CSrc = dyn_cast<CastInst>(Src)) {   // A->B->C cast
293     if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
294       // The first cast (CSrc) is eliminable so we need to fix up or replace
295       // the second cast (CI). CSrc will then have a good chance of being dead.
296       auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
297       // Point debug users of the dying cast to the new one.
298       if (CSrc->hasOneUse())
299         replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
300       return Res;
301     }
302   }
303 
304   if (auto *Sel = dyn_cast<SelectInst>(Src)) {
305     // We are casting a select. Try to fold the cast into the select if the
306     // select does not have a compare instruction with matching operand types
307     // or the select is likely better done in a narrow type.
308     // Creating a select with operands that are different sizes than its
309     // condition may inhibit other folds and lead to worse codegen.
310     auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
311     if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
312         (CI.getOpcode() == Instruction::Trunc &&
313          shouldChangeType(CI.getSrcTy(), CI.getType()))) {
314       if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) {
315         replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
316         return NV;
317       }
318     }
319   }
320 
321   // If we are casting a PHI, then fold the cast into the PHI.
322   if (auto *PN = dyn_cast<PHINode>(Src)) {
323     // Don't do this if it would create a PHI node with an illegal type from a
324     // legal type.
325     if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
326         shouldChangeType(CI.getSrcTy(), CI.getType()))
327       if (Instruction *NV = foldOpIntoPhi(CI, PN))
328         return NV;
329   }
330 
331   // Canonicalize a unary shuffle after the cast if neither operation changes
332   // the size or element size of the input vector.
333   // TODO: We could allow size-changing ops if that doesn't harm codegen.
334   // cast (shuffle X, Mask) --> shuffle (cast X), Mask
335   Value *X;
336   ArrayRef<int> Mask;
337   if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) {
338     // TODO: Allow scalable vectors?
339     auto *SrcTy = dyn_cast<FixedVectorType>(X->getType());
340     auto *DestTy = dyn_cast<FixedVectorType>(Ty);
341     if (SrcTy && DestTy &&
342         SrcTy->getNumElements() == DestTy->getNumElements() &&
343         SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
344       Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy);
345       return new ShuffleVectorInst(CastX, Mask);
346     }
347   }
348 
349   return nullptr;
350 }
351 
352 /// Constants and extensions/truncates from the destination type are always
353 /// free to be evaluated in that type. This is a helper for canEvaluate*.
354 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) {
355   if (isa<Constant>(V))
356     return true;
357   Value *X;
358   if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
359       X->getType() == Ty)
360     return true;
361 
362   return false;
363 }
364 
365 /// Filter out values that we can not evaluate in the destination type for free.
366 /// This is a helper for canEvaluate*.
367 static bool canNotEvaluateInType(Value *V, Type *Ty) {
368   assert(!isa<Constant>(V) && "Constant should already be handled.");
369   if (!isa<Instruction>(V))
370     return true;
371   // We don't extend or shrink something that has multiple uses --  doing so
372   // would require duplicating the instruction which isn't profitable.
373   if (!V->hasOneUse())
374     return true;
375 
376   return false;
377 }
378 
379 /// Return true if we can evaluate the specified expression tree as type Ty
380 /// instead of its larger type, and arrive with the same value.
381 /// This is used by code that tries to eliminate truncates.
382 ///
383 /// Ty will always be a type smaller than V.  We should return true if trunc(V)
384 /// can be computed by computing V in the smaller type.  If V is an instruction,
385 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
386 /// makes sense if x and y can be efficiently truncated.
387 ///
388 /// This function works on both vectors and scalars.
389 ///
390 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
391                                  Instruction *CxtI) {
392   if (canAlwaysEvaluateInType(V, Ty))
393     return true;
394   if (canNotEvaluateInType(V, Ty))
395     return false;
396 
397   auto *I = cast<Instruction>(V);
398   Type *OrigTy = V->getType();
399   switch (I->getOpcode()) {
400   case Instruction::Add:
401   case Instruction::Sub:
402   case Instruction::Mul:
403   case Instruction::And:
404   case Instruction::Or:
405   case Instruction::Xor:
406     // These operators can all arbitrarily be extended or truncated.
407     return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
408            canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
409 
410   case Instruction::UDiv:
411   case Instruction::URem: {
412     // UDiv and URem can be truncated if all the truncated bits are zero.
413     uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
414     uint32_t BitWidth = Ty->getScalarSizeInBits();
415     assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!");
416     APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
417     if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
418         IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
419       return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
420              canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
421     }
422     break;
423   }
424   case Instruction::Shl: {
425     // If we are truncating the result of this SHL, and if it's a shift of an
426     // inrange amount, we can always perform a SHL in a smaller type.
427     uint32_t BitWidth = Ty->getScalarSizeInBits();
428     KnownBits AmtKnownBits =
429         llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
430     if (AmtKnownBits.getMaxValue().ult(BitWidth))
431       return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
432              canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
433     break;
434   }
435   case Instruction::LShr: {
436     // If this is a truncate of a logical shr, we can truncate it to a smaller
437     // lshr iff we know that the bits we would otherwise be shifting in are
438     // already zeros.
439     // TODO: It is enough to check that the bits we would be shifting in are
440     //       zero - use AmtKnownBits.getMaxValue().
441     uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
442     uint32_t BitWidth = Ty->getScalarSizeInBits();
443     KnownBits AmtKnownBits =
444         llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
445     APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
446     if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
447         IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) {
448       return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
449              canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
450     }
451     break;
452   }
453   case Instruction::AShr: {
454     // If this is a truncate of an arithmetic shr, we can truncate it to a
455     // smaller ashr iff we know that all the bits from the sign bit of the
456     // original type and the sign bit of the truncate type are similar.
457     // TODO: It is enough to check that the bits we would be shifting in are
458     //       similar to sign bit of the truncate type.
459     uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
460     uint32_t BitWidth = Ty->getScalarSizeInBits();
461     KnownBits AmtKnownBits =
462         llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
463     unsigned ShiftedBits = OrigBitWidth - BitWidth;
464     if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
465         ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
466       return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
467              canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
468     break;
469   }
470   case Instruction::Trunc:
471     // trunc(trunc(x)) -> trunc(x)
472     return true;
473   case Instruction::ZExt:
474   case Instruction::SExt:
475     // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
476     // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
477     return true;
478   case Instruction::Select: {
479     SelectInst *SI = cast<SelectInst>(I);
480     return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
481            canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
482   }
483   case Instruction::PHI: {
484     // We can change a phi if we can change all operands.  Note that we never
485     // get into trouble with cyclic PHIs here because we only consider
486     // instructions with a single use.
487     PHINode *PN = cast<PHINode>(I);
488     for (Value *IncValue : PN->incoming_values())
489       if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
490         return false;
491     return true;
492   }
493   default:
494     // TODO: Can handle more cases here.
495     break;
496   }
497 
498   return false;
499 }
500 
501 /// Given a vector that is bitcast to an integer, optionally logically
502 /// right-shifted, and truncated, convert it to an extractelement.
503 /// Example (big endian):
504 ///   trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
505 ///   --->
506 ///   extractelement <4 x i32> %X, 1
507 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc,
508                                          InstCombinerImpl &IC) {
509   Value *TruncOp = Trunc.getOperand(0);
510   Type *DestType = Trunc.getType();
511   if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
512     return nullptr;
513 
514   Value *VecInput = nullptr;
515   ConstantInt *ShiftVal = nullptr;
516   if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
517                                   m_LShr(m_BitCast(m_Value(VecInput)),
518                                          m_ConstantInt(ShiftVal)))) ||
519       !isa<VectorType>(VecInput->getType()))
520     return nullptr;
521 
522   VectorType *VecType = cast<VectorType>(VecInput->getType());
523   unsigned VecWidth = VecType->getPrimitiveSizeInBits();
524   unsigned DestWidth = DestType->getPrimitiveSizeInBits();
525   unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
526 
527   if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
528     return nullptr;
529 
530   // If the element type of the vector doesn't match the result type,
531   // bitcast it to a vector type that we can extract from.
532   unsigned NumVecElts = VecWidth / DestWidth;
533   if (VecType->getElementType() != DestType) {
534     VecType = FixedVectorType::get(DestType, NumVecElts);
535     VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
536   }
537 
538   unsigned Elt = ShiftAmount / DestWidth;
539   if (IC.getDataLayout().isBigEndian())
540     Elt = NumVecElts - 1 - Elt;
541 
542   return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
543 }
544 
545 /// Funnel/Rotate left/right may occur in a wider type than necessary because of
546 /// type promotion rules. Try to narrow the inputs and convert to funnel shift.
547 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) {
548   assert((isa<VectorType>(Trunc.getSrcTy()) ||
549           shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
550          "Don't narrow to an illegal scalar type");
551 
552   // Bail out on strange types. It is possible to handle some of these patterns
553   // even with non-power-of-2 sizes, but it is not a likely scenario.
554   Type *DestTy = Trunc.getType();
555   unsigned NarrowWidth = DestTy->getScalarSizeInBits();
556   unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
557   if (!isPowerOf2_32(NarrowWidth))
558     return nullptr;
559 
560   // First, find an or'd pair of opposite shifts:
561   // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1))
562   BinaryOperator *Or0, *Or1;
563   if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
564     return nullptr;
565 
566   Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
567   if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
568       !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
569       Or0->getOpcode() == Or1->getOpcode())
570     return nullptr;
571 
572   // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
573   if (Or0->getOpcode() == BinaryOperator::LShr) {
574     std::swap(Or0, Or1);
575     std::swap(ShVal0, ShVal1);
576     std::swap(ShAmt0, ShAmt1);
577   }
578   assert(Or0->getOpcode() == BinaryOperator::Shl &&
579          Or1->getOpcode() == BinaryOperator::LShr &&
580          "Illegal or(shift,shift) pair");
581 
582   // Match the shift amount operands for a funnel/rotate pattern. This always
583   // matches a subtraction on the R operand.
584   auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
585     // The shift amounts may add up to the narrow bit width:
586     // (shl ShVal0, L) | (lshr ShVal1, Width - L)
587     // If this is a funnel shift (different operands are shifted), then the
588     // shift amount can not over-shift (create poison) in the narrow type.
589     unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth);
590     APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
591     if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask))
592       if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L)))))
593         return L;
594 
595     // The following patterns currently only work for rotation patterns.
596     // TODO: Add more general funnel-shift compatible patterns.
597     if (ShVal0 != ShVal1)
598       return nullptr;
599 
600     // The shift amount may be masked with negation:
601     // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1)))
602     Value *X;
603     unsigned Mask = Width - 1;
604     if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
605         match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
606       return X;
607 
608     // Same as above, but the shift amount may be extended after masking:
609     if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
610         match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
611       return X;
612 
613     return nullptr;
614   };
615 
616   Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
617   bool IsFshl = true; // Sub on LSHR.
618   if (!ShAmt) {
619     ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
620     IsFshl = false; // Sub on SHL.
621   }
622   if (!ShAmt)
623     return nullptr;
624 
625   // The right-shifted value must have high zeros in the wide type (for example
626   // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are
627   // truncated, so those do not matter.
628   APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
629   if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc))
630     return nullptr;
631 
632   // We have an unnecessarily wide rotate!
633   // trunc (or (shl ShVal0, ShAmt), (lshr ShVal1, BitWidth - ShAmt))
634   // Narrow the inputs and convert to funnel shift intrinsic:
635   // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
636   Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
637   Value *X, *Y;
638   X = Y = Builder.CreateTrunc(ShVal0, DestTy);
639   if (ShVal0 != ShVal1)
640     Y = Builder.CreateTrunc(ShVal1, DestTy);
641   Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
642   Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
643   return CallInst::Create(F, {X, Y, NarrowShAmt});
644 }
645 
646 /// Try to narrow the width of math or bitwise logic instructions by pulling a
647 /// truncate ahead of binary operators.
648 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) {
649   Type *SrcTy = Trunc.getSrcTy();
650   Type *DestTy = Trunc.getType();
651   unsigned SrcWidth = SrcTy->getScalarSizeInBits();
652   unsigned DestWidth = DestTy->getScalarSizeInBits();
653 
654   if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
655     return nullptr;
656 
657   BinaryOperator *BinOp;
658   if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
659     return nullptr;
660 
661   Value *BinOp0 = BinOp->getOperand(0);
662   Value *BinOp1 = BinOp->getOperand(1);
663   switch (BinOp->getOpcode()) {
664   case Instruction::And:
665   case Instruction::Or:
666   case Instruction::Xor:
667   case Instruction::Add:
668   case Instruction::Sub:
669   case Instruction::Mul: {
670     Constant *C;
671     if (match(BinOp0, m_Constant(C))) {
672       // trunc (binop C, X) --> binop (trunc C', X)
673       Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
674       Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
675       return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
676     }
677     if (match(BinOp1, m_Constant(C))) {
678       // trunc (binop X, C) --> binop (trunc X, C')
679       Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
680       Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
681       return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
682     }
683     Value *X;
684     if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
685       // trunc (binop (ext X), Y) --> binop X, (trunc Y)
686       Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
687       return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
688     }
689     if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
690       // trunc (binop Y, (ext X)) --> binop (trunc Y), X
691       Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
692       return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
693     }
694     break;
695   }
696   case Instruction::LShr:
697   case Instruction::AShr: {
698     // trunc (*shr (trunc A), C) --> trunc(*shr A, C)
699     Value *A;
700     Constant *C;
701     if (match(BinOp0, m_Trunc(m_Value(A))) && match(BinOp1, m_Constant(C))) {
702       unsigned MaxShiftAmt = SrcWidth - DestWidth;
703       // If the shift is small enough, all zero/sign bits created by the shift
704       // are removed by the trunc.
705       if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
706                                       APInt(SrcWidth, MaxShiftAmt)))) {
707         auto *OldShift = cast<Instruction>(Trunc.getOperand(0));
708         bool IsExact = OldShift->isExact();
709         auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true);
710         ShAmt = Constant::mergeUndefsWith(ShAmt, C);
711         Value *Shift =
712             OldShift->getOpcode() == Instruction::AShr
713                 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact)
714                 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact);
715         return CastInst::CreateTruncOrBitCast(Shift, DestTy);
716       }
717     }
718     break;
719   }
720   default: break;
721   }
722 
723   if (Instruction *NarrowOr = narrowFunnelShift(Trunc))
724     return NarrowOr;
725 
726   return nullptr;
727 }
728 
729 /// Try to narrow the width of a splat shuffle. This could be generalized to any
730 /// shuffle with a constant operand, but we limit the transform to avoid
731 /// creating a shuffle type that targets may not be able to lower effectively.
732 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
733                                        InstCombiner::BuilderTy &Builder) {
734   auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
735   if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) &&
736       all_equal(Shuf->getShuffleMask()) &&
737       Shuf->getType() == Shuf->getOperand(0)->getType()) {
738     // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Poison, SplatMask
739     // trunc (shuf X, Poison, SplatMask) --> shuf (trunc X), Poison, SplatMask
740     Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
741     return new ShuffleVectorInst(NarrowOp, Shuf->getShuffleMask());
742   }
743 
744   return nullptr;
745 }
746 
747 /// Try to narrow the width of an insert element. This could be generalized for
748 /// any vector constant, but we limit the transform to insertion into undef to
749 /// avoid potential backend problems from unsupported insertion widths. This
750 /// could also be extended to handle the case of inserting a scalar constant
751 /// into a vector variable.
752 static Instruction *shrinkInsertElt(CastInst &Trunc,
753                                     InstCombiner::BuilderTy &Builder) {
754   Instruction::CastOps Opcode = Trunc.getOpcode();
755   assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
756          "Unexpected instruction for shrinking");
757 
758   auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
759   if (!InsElt || !InsElt->hasOneUse())
760     return nullptr;
761 
762   Type *DestTy = Trunc.getType();
763   Type *DestScalarTy = DestTy->getScalarType();
764   Value *VecOp = InsElt->getOperand(0);
765   Value *ScalarOp = InsElt->getOperand(1);
766   Value *Index = InsElt->getOperand(2);
767 
768   if (match(VecOp, m_Undef())) {
769     // trunc   (inselt undef, X, Index) --> inselt undef,   (trunc X), Index
770     // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
771     UndefValue *NarrowUndef = UndefValue::get(DestTy);
772     Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
773     return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
774   }
775 
776   return nullptr;
777 }
778 
779 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
780   if (Instruction *Result = commonCastTransforms(Trunc))
781     return Result;
782 
783   Value *Src = Trunc.getOperand(0);
784   Type *DestTy = Trunc.getType(), *SrcTy = Src->getType();
785   unsigned DestWidth = DestTy->getScalarSizeInBits();
786   unsigned SrcWidth = SrcTy->getScalarSizeInBits();
787 
788   // Attempt to truncate the entire input expression tree to the destination
789   // type.   Only do this if the dest type is a simple type, don't convert the
790   // expression tree to something weird like i93 unless the source is also
791   // strange.
792   if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
793       canEvaluateTruncated(Src, DestTy, *this, &Trunc)) {
794 
795     // If this cast is a truncate, evaluting in a different type always
796     // eliminates the cast, so it is always a win.
797     LLVM_DEBUG(
798         dbgs() << "ICE: EvaluateInDifferentType converting expression type"
799                   " to avoid cast: "
800                << Trunc << '\n');
801     Value *Res = EvaluateInDifferentType(Src, DestTy, false);
802     assert(Res->getType() == DestTy);
803     return replaceInstUsesWith(Trunc, Res);
804   }
805 
806   // For integer types, check if we can shorten the entire input expression to
807   // DestWidth * 2, which won't allow removing the truncate, but reducing the
808   // width may enable further optimizations, e.g. allowing for larger
809   // vectorization factors.
810   if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
811     if (DestWidth * 2 < SrcWidth) {
812       auto *NewDestTy = DestITy->getExtendedType();
813       if (shouldChangeType(SrcTy, NewDestTy) &&
814           canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) {
815         LLVM_DEBUG(
816             dbgs() << "ICE: EvaluateInDifferentType converting expression type"
817                       " to reduce the width of operand of"
818                    << Trunc << '\n');
819         Value *Res = EvaluateInDifferentType(Src, NewDestTy, false);
820         return new TruncInst(Res, DestTy);
821       }
822     }
823   }
824 
825   // Test if the trunc is the user of a select which is part of a
826   // minimum or maximum operation. If so, don't do any more simplification.
827   // Even simplifying demanded bits can break the canonical form of a
828   // min/max.
829   Value *LHS, *RHS;
830   if (SelectInst *Sel = dyn_cast<SelectInst>(Src))
831     if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN)
832       return nullptr;
833 
834   // See if we can simplify any instructions used by the input whose sole
835   // purpose is to compute bits we don't care about.
836   if (SimplifyDemandedInstructionBits(Trunc))
837     return &Trunc;
838 
839   if (DestWidth == 1) {
840     Value *Zero = Constant::getNullValue(SrcTy);
841     if (DestTy->isIntegerTy()) {
842       // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
843       // TODO: We canonicalize to more instructions here because we are probably
844       // lacking equivalent analysis for trunc relative to icmp. There may also
845       // be codegen concerns. If those trunc limitations were removed, we could
846       // remove this transform.
847       Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
848       return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
849     }
850 
851     // For vectors, we do not canonicalize all truncs to icmp, so optimize
852     // patterns that would be covered within visitICmpInst.
853     Value *X;
854     Constant *C;
855     if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) {
856       // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
857       Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
858       Constant *MaskC = ConstantExpr::getShl(One, C);
859       Value *And = Builder.CreateAnd(X, MaskC);
860       return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
861     }
862     if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)),
863                                    m_Deferred(X))))) {
864       // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0
865       Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
866       Constant *MaskC = ConstantExpr::getShl(One, C);
867       MaskC = ConstantExpr::getOr(MaskC, One);
868       Value *And = Builder.CreateAnd(X, MaskC);
869       return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
870     }
871   }
872 
873   Value *A, *B;
874   Constant *C;
875   if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) {
876     unsigned AWidth = A->getType()->getScalarSizeInBits();
877     unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
878     auto *OldSh = cast<Instruction>(Src);
879     bool IsExact = OldSh->isExact();
880 
881     // If the shift is small enough, all zero bits created by the shift are
882     // removed by the trunc.
883     if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
884                                     APInt(SrcWidth, MaxShiftAmt)))) {
885       // trunc (lshr (sext A), C) --> ashr A, C
886       if (A->getType() == DestTy) {
887         Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false);
888         Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
889         ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
890         ShAmt = Constant::mergeUndefsWith(ShAmt, C);
891         return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt)
892                        : BinaryOperator::CreateAShr(A, ShAmt);
893       }
894       // The types are mismatched, so create a cast after shifting:
895       // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C)
896       if (Src->hasOneUse()) {
897         Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false);
898         Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
899         ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
900         Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact);
901         return CastInst::CreateIntegerCast(Shift, DestTy, true);
902       }
903     }
904     // TODO: Mask high bits with 'and'.
905   }
906 
907   if (Instruction *I = narrowBinOp(Trunc))
908     return I;
909 
910   if (Instruction *I = shrinkSplatShuffle(Trunc, Builder))
911     return I;
912 
913   if (Instruction *I = shrinkInsertElt(Trunc, Builder))
914     return I;
915 
916   if (Src->hasOneUse() &&
917       (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
918     // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
919     // dest type is native and cst < dest size.
920     if (match(Src, m_Shl(m_Value(A), m_Constant(C))) &&
921         !match(A, m_Shr(m_Value(), m_Constant()))) {
922       // Skip shifts of shift by constants. It undoes a combine in
923       // FoldShiftByConstant and is the extend in reg pattern.
924       APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth);
925       if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) {
926         Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
927         return BinaryOperator::Create(Instruction::Shl, NewTrunc,
928                                       ConstantExpr::getTrunc(C, DestTy));
929       }
930     }
931   }
932 
933   if (Instruction *I = foldVecTruncToExtElt(Trunc, *this))
934     return I;
935 
936   // Whenever an element is extracted from a vector, and then truncated,
937   // canonicalize by converting it to a bitcast followed by an
938   // extractelement.
939   //
940   // Example (little endian):
941   //   trunc (extractelement <4 x i64> %X, 0) to i32
942   //   --->
943   //   extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0
944   Value *VecOp;
945   ConstantInt *Cst;
946   if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) {
947     auto *VecOpTy = cast<VectorType>(VecOp->getType());
948     auto VecElts = VecOpTy->getElementCount();
949 
950     // A badly fit destination size would result in an invalid cast.
951     if (SrcWidth % DestWidth == 0) {
952       uint64_t TruncRatio = SrcWidth / DestWidth;
953       uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
954       uint64_t VecOpIdx = Cst->getZExtValue();
955       uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1
956                                          : VecOpIdx * TruncRatio;
957       assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
958              "overflow 32-bits");
959 
960       auto *BitCastTo =
961           VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable());
962       Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo);
963       return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx));
964     }
965   }
966 
967   // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C)
968   if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)),
969                                                        m_Value(B))))) {
970     unsigned AWidth = A->getType()->getScalarSizeInBits();
971     if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) {
972       Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth);
973       Value *NarrowCtlz =
974           Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B});
975       return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
976     }
977   }
978 
979   if (match(Src, m_VScale(DL))) {
980     if (Trunc.getFunction() &&
981         Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
982       Attribute Attr =
983           Trunc.getFunction()->getFnAttribute(Attribute::VScaleRange);
984       if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
985         if (Log2_32(*MaxVScale) < DestWidth) {
986           Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
987           return replaceInstUsesWith(Trunc, VScale);
988         }
989       }
990     }
991   }
992 
993   return nullptr;
994 }
995 
996 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext) {
997   // If we are just checking for a icmp eq of a single bit and zext'ing it
998   // to an integer, then shift the bit to the appropriate place and then
999   // cast to integer to avoid the comparison.
1000 
1001   // FIXME: This set of transforms does not check for extra uses and/or creates
1002   //        an extra instruction (an optional final cast is not included
1003   //        in the transform comments). We may also want to favor icmp over
1004   //        shifts in cases of equal instructions because icmp has better
1005   //        analysis in general (invert the transform).
1006 
1007   const APInt *Op1CV;
1008   if (match(Cmp->getOperand(1), m_APInt(Op1CV))) {
1009 
1010     // zext (x <s  0) to i32 --> x>>u31      true if signbit set.
1011     if (Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) {
1012       Value *In = Cmp->getOperand(0);
1013       Value *Sh = ConstantInt::get(In->getType(),
1014                                    In->getType()->getScalarSizeInBits() - 1);
1015       In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
1016       if (In->getType() != Zext.getType())
1017         In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/);
1018 
1019       return replaceInstUsesWith(Zext, In);
1020     }
1021 
1022     // zext (X == 0) to i32 --> X^1      iff X has only the low bit set.
1023     // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
1024     // zext (X != 0) to i32 --> X        iff X has only the low bit set.
1025     // zext (X != 0) to i32 --> X>>1     iff X has only the 2nd bit set.
1026     if (Op1CV->isZero() && Cmp->isEquality()) {
1027       // If Op1C some other power of two, convert:
1028       KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
1029 
1030       // Exactly 1 possible 1? But not the high-bit because that is
1031       // canonicalized to this form.
1032       APInt KnownZeroMask(~Known.Zero);
1033       if (KnownZeroMask.isPowerOf2() &&
1034           (Zext.getType()->getScalarSizeInBits() != KnownZeroMask.logBase2() + 1)) {
1035         bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE;
1036         uint32_t ShAmt = KnownZeroMask.logBase2();
1037         Value *In = Cmp->getOperand(0);
1038         if (ShAmt) {
1039           // Perform a logical shr by shiftamt.
1040           // Insert the shift to put the result in the low bit.
1041           In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
1042                                   In->getName() + ".lobit");
1043         }
1044 
1045         if (!isNE) { // Toggle the low bit.
1046           Constant *One = ConstantInt::get(In->getType(), 1);
1047           In = Builder.CreateXor(In, One);
1048         }
1049 
1050         if (Zext.getType() == In->getType())
1051           return replaceInstUsesWith(Zext, In);
1052 
1053         Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false);
1054         return replaceInstUsesWith(Zext, IntCast);
1055       }
1056     }
1057   }
1058 
1059   if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) {
1060     // Test if a bit is clear/set using a shifted-one mask:
1061     // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1
1062     // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1
1063     Value *X, *ShAmt;
1064     if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) &&
1065         match(Cmp->getOperand(0),
1066               m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) {
1067       if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1068         X = Builder.CreateNot(X);
1069       Value *Lshr = Builder.CreateLShr(X, ShAmt);
1070       Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1));
1071       return replaceInstUsesWith(Zext, And1);
1072     }
1073 
1074     // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
1075     // It is also profitable to transform icmp eq into not(xor(A, B)) because
1076     // that may lead to additional simplifications.
1077     if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) {
1078       Value *LHS = Cmp->getOperand(0);
1079       Value *RHS = Cmp->getOperand(1);
1080 
1081       KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext);
1082       KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext);
1083 
1084       if (KnownLHS == KnownRHS) {
1085         APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
1086         APInt UnknownBit = ~KnownBits;
1087         if (UnknownBit.countPopulation() == 1) {
1088           Value *Result = Builder.CreateXor(LHS, RHS);
1089 
1090           // Mask off any bits that are set and won't be shifted away.
1091           if (KnownLHS.One.uge(UnknownBit))
1092             Result = Builder.CreateAnd(Result,
1093                                         ConstantInt::get(ITy, UnknownBit));
1094 
1095           // Shift the bit we're testing down to the lsb.
1096           Result = Builder.CreateLShr(
1097                Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
1098 
1099           if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1100             Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
1101           Result->takeName(Cmp);
1102           return replaceInstUsesWith(Zext, Result);
1103         }
1104       }
1105     }
1106   }
1107 
1108   return nullptr;
1109 }
1110 
1111 /// Determine if the specified value can be computed in the specified wider type
1112 /// and produce the same low bits. If not, return false.
1113 ///
1114 /// If this function returns true, it can also return a non-zero number of bits
1115 /// (in BitsToClear) which indicates that the value it computes is correct for
1116 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
1117 /// out.  For example, to promote something like:
1118 ///
1119 ///   %B = trunc i64 %A to i32
1120 ///   %C = lshr i32 %B, 8
1121 ///   %E = zext i32 %C to i64
1122 ///
1123 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
1124 /// set to 8 to indicate that the promoted value needs to have bits 24-31
1125 /// cleared in addition to bits 32-63.  Since an 'and' will be generated to
1126 /// clear the top bits anyway, doing this has no extra cost.
1127 ///
1128 /// This function works on both vectors and scalars.
1129 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
1130                              InstCombinerImpl &IC, Instruction *CxtI) {
1131   BitsToClear = 0;
1132   if (canAlwaysEvaluateInType(V, Ty))
1133     return true;
1134   if (canNotEvaluateInType(V, Ty))
1135     return false;
1136 
1137   auto *I = cast<Instruction>(V);
1138   unsigned Tmp;
1139   switch (I->getOpcode()) {
1140   case Instruction::ZExt:  // zext(zext(x)) -> zext(x).
1141   case Instruction::SExt:  // zext(sext(x)) -> sext(x).
1142   case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
1143     return true;
1144   case Instruction::And:
1145   case Instruction::Or:
1146   case Instruction::Xor:
1147   case Instruction::Add:
1148   case Instruction::Sub:
1149   case Instruction::Mul:
1150     if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1151         !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
1152       return false;
1153     // These can all be promoted if neither operand has 'bits to clear'.
1154     if (BitsToClear == 0 && Tmp == 0)
1155       return true;
1156 
1157     // If the operation is an AND/OR/XOR and the bits to clear are zero in the
1158     // other side, BitsToClear is ok.
1159     if (Tmp == 0 && I->isBitwiseLogicOp()) {
1160       // We use MaskedValueIsZero here for generality, but the case we care
1161       // about the most is constant RHS.
1162       unsigned VSize = V->getType()->getScalarSizeInBits();
1163       if (IC.MaskedValueIsZero(I->getOperand(1),
1164                                APInt::getHighBitsSet(VSize, BitsToClear),
1165                                0, CxtI)) {
1166         // If this is an And instruction and all of the BitsToClear are
1167         // known to be zero we can reset BitsToClear.
1168         if (I->getOpcode() == Instruction::And)
1169           BitsToClear = 0;
1170         return true;
1171       }
1172     }
1173 
1174     // Otherwise, we don't know how to analyze this BitsToClear case yet.
1175     return false;
1176 
1177   case Instruction::Shl: {
1178     // We can promote shl(x, cst) if we can promote x.  Since shl overwrites the
1179     // upper bits we can reduce BitsToClear by the shift amount.
1180     const APInt *Amt;
1181     if (match(I->getOperand(1), m_APInt(Amt))) {
1182       if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1183         return false;
1184       uint64_t ShiftAmt = Amt->getZExtValue();
1185       BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1186       return true;
1187     }
1188     return false;
1189   }
1190   case Instruction::LShr: {
1191     // We can promote lshr(x, cst) if we can promote x.  This requires the
1192     // ultimate 'and' to clear out the high zero bits we're clearing out though.
1193     const APInt *Amt;
1194     if (match(I->getOperand(1), m_APInt(Amt))) {
1195       if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1196         return false;
1197       BitsToClear += Amt->getZExtValue();
1198       if (BitsToClear > V->getType()->getScalarSizeInBits())
1199         BitsToClear = V->getType()->getScalarSizeInBits();
1200       return true;
1201     }
1202     // Cannot promote variable LSHR.
1203     return false;
1204   }
1205   case Instruction::Select:
1206     if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1207         !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1208         // TODO: If important, we could handle the case when the BitsToClear are
1209         // known zero in the disagreeing side.
1210         Tmp != BitsToClear)
1211       return false;
1212     return true;
1213 
1214   case Instruction::PHI: {
1215     // We can change a phi if we can change all operands.  Note that we never
1216     // get into trouble with cyclic PHIs here because we only consider
1217     // instructions with a single use.
1218     PHINode *PN = cast<PHINode>(I);
1219     if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1220       return false;
1221     for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1222       if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1223           // TODO: If important, we could handle the case when the BitsToClear
1224           // are known zero in the disagreeing input.
1225           Tmp != BitsToClear)
1226         return false;
1227     return true;
1228   }
1229   default:
1230     // TODO: Can handle more cases here.
1231     return false;
1232   }
1233 }
1234 
1235 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) {
1236   // If this zero extend is only used by a truncate, let the truncate be
1237   // eliminated before we try to optimize this zext.
1238   if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1239     return nullptr;
1240 
1241   // If one of the common conversion will work, do it.
1242   if (Instruction *Result = commonCastTransforms(CI))
1243     return Result;
1244 
1245   Value *Src = CI.getOperand(0);
1246   Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1247 
1248   // Try to extend the entire expression tree to the wide destination type.
1249   unsigned BitsToClear;
1250   if (shouldChangeType(SrcTy, DestTy) &&
1251       canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1252     assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1253            "Can't clear more bits than in SrcTy");
1254 
1255     // Okay, we can transform this!  Insert the new expression now.
1256     LLVM_DEBUG(
1257         dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1258                   " to avoid zero extend: "
1259                << CI << '\n');
1260     Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1261     assert(Res->getType() == DestTy);
1262 
1263     // Preserve debug values referring to Src if the zext is its last use.
1264     if (auto *SrcOp = dyn_cast<Instruction>(Src))
1265       if (SrcOp->hasOneUse())
1266         replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1267 
1268     uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1269     uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1270 
1271     // If the high bits are already filled with zeros, just replace this
1272     // cast with the result.
1273     if (MaskedValueIsZero(Res,
1274                           APInt::getHighBitsSet(DestBitSize,
1275                                                 DestBitSize-SrcBitsKept),
1276                              0, &CI))
1277       return replaceInstUsesWith(CI, Res);
1278 
1279     // We need to emit an AND to clear the high bits.
1280     Constant *C = ConstantInt::get(Res->getType(),
1281                                APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1282     return BinaryOperator::CreateAnd(Res, C);
1283   }
1284 
1285   // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1286   // types and if the sizes are just right we can convert this into a logical
1287   // 'and' which will be much cheaper than the pair of casts.
1288   if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) {   // A->B->C cast
1289     // TODO: Subsume this into EvaluateInDifferentType.
1290 
1291     // Get the sizes of the types involved.  We know that the intermediate type
1292     // will be smaller than A or C, but don't know the relation between A and C.
1293     Value *A = CSrc->getOperand(0);
1294     unsigned SrcSize = A->getType()->getScalarSizeInBits();
1295     unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1296     unsigned DstSize = CI.getType()->getScalarSizeInBits();
1297     // If we're actually extending zero bits, then if
1298     // SrcSize <  DstSize: zext(a & mask)
1299     // SrcSize == DstSize: a & mask
1300     // SrcSize  > DstSize: trunc(a) & mask
1301     if (SrcSize < DstSize) {
1302       APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1303       Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1304       Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1305       return new ZExtInst(And, CI.getType());
1306     }
1307 
1308     if (SrcSize == DstSize) {
1309       APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1310       return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1311                                                            AndValue));
1312     }
1313     if (SrcSize > DstSize) {
1314       Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1315       APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1316       return BinaryOperator::CreateAnd(Trunc,
1317                                        ConstantInt::get(Trunc->getType(),
1318                                                         AndValue));
1319     }
1320   }
1321 
1322   if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src))
1323     return transformZExtICmp(Cmp, CI);
1324 
1325   // zext(trunc(X) & C) -> (X & zext(C)).
1326   Constant *C;
1327   Value *X;
1328   if (match(Src, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1329       X->getType() == CI.getType())
1330     return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1331 
1332   // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1333   Value *And;
1334   if (match(Src, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1335       match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1336       X->getType() == CI.getType()) {
1337     Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1338     return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1339   }
1340 
1341   // If we are truncating, masking, and then zexting back to the original type,
1342   // that's just a mask. This is not handled by canEvaluateZextd if the
1343   // intermediate values have extra uses. This could be generalized further for
1344   // a non-constant mask operand.
1345   // zext (and (trunc X), C) --> and X, (zext C)
1346   if (match(Src, m_And(m_Trunc(m_Value(X)), m_Constant(C))) &&
1347       X->getType() == DestTy) {
1348     Constant *ZextC = ConstantExpr::getZExt(C, DestTy);
1349     return BinaryOperator::CreateAnd(X, ZextC);
1350   }
1351 
1352   if (match(Src, m_VScale(DL))) {
1353     if (CI.getFunction() &&
1354         CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1355       Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
1356       if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
1357         unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1358         if (Log2_32(*MaxVScale) < TypeWidth) {
1359           Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1360           return replaceInstUsesWith(CI, VScale);
1361         }
1362       }
1363     }
1364   }
1365 
1366   return nullptr;
1367 }
1368 
1369 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
1370 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI,
1371                                                  Instruction &CI) {
1372   Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1373   ICmpInst::Predicate Pred = ICI->getPredicate();
1374 
1375   // Don't bother if Op1 isn't of vector or integer type.
1376   if (!Op1->getType()->isIntOrIntVectorTy())
1377     return nullptr;
1378 
1379   if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1380       (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1381     // (x <s  0) ? -1 : 0 -> ashr x, 31        -> all ones if negative
1382     // (x >s -1) ? -1 : 0 -> not (ashr x, 31)  -> all ones if positive
1383     Value *Sh = ConstantInt::get(Op0->getType(),
1384                                  Op0->getType()->getScalarSizeInBits() - 1);
1385     Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1386     if (In->getType() != CI.getType())
1387       In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1388 
1389     if (Pred == ICmpInst::ICMP_SGT)
1390       In = Builder.CreateNot(In, In->getName() + ".not");
1391     return replaceInstUsesWith(CI, In);
1392   }
1393 
1394   if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1395     // If we know that only one bit of the LHS of the icmp can be set and we
1396     // have an equality comparison with zero or a power of 2, we can transform
1397     // the icmp and sext into bitwise/integer operations.
1398     if (ICI->hasOneUse() &&
1399         ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1400       KnownBits Known = computeKnownBits(Op0, 0, &CI);
1401 
1402       APInt KnownZeroMask(~Known.Zero);
1403       if (KnownZeroMask.isPowerOf2()) {
1404         Value *In = ICI->getOperand(0);
1405 
1406         // If the icmp tests for a known zero bit we can constant fold it.
1407         if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1408           Value *V = Pred == ICmpInst::ICMP_NE ?
1409                        ConstantInt::getAllOnesValue(CI.getType()) :
1410                        ConstantInt::getNullValue(CI.getType());
1411           return replaceInstUsesWith(CI, V);
1412         }
1413 
1414         if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1415           // sext ((x & 2^n) == 0)   -> (x >> n) - 1
1416           // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1417           unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1418           // Perform a right shift to place the desired bit in the LSB.
1419           if (ShiftAmt)
1420             In = Builder.CreateLShr(In,
1421                                     ConstantInt::get(In->getType(), ShiftAmt));
1422 
1423           // At this point "In" is either 1 or 0. Subtract 1 to turn
1424           // {1, 0} -> {0, -1}.
1425           In = Builder.CreateAdd(In,
1426                                  ConstantInt::getAllOnesValue(In->getType()),
1427                                  "sext");
1428         } else {
1429           // sext ((x & 2^n) != 0)   -> (x << bitwidth-n) a>> bitwidth-1
1430           // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1431           unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1432           // Perform a left shift to place the desired bit in the MSB.
1433           if (ShiftAmt)
1434             In = Builder.CreateShl(In,
1435                                    ConstantInt::get(In->getType(), ShiftAmt));
1436 
1437           // Distribute the bit over the whole bit width.
1438           In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1439                                   KnownZeroMask.getBitWidth() - 1), "sext");
1440         }
1441 
1442         if (CI.getType() == In->getType())
1443           return replaceInstUsesWith(CI, In);
1444         return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1445       }
1446     }
1447   }
1448 
1449   return nullptr;
1450 }
1451 
1452 /// Return true if we can take the specified value and return it as type Ty
1453 /// without inserting any new casts and without changing the value of the common
1454 /// low bits.  This is used by code that tries to promote integer operations to
1455 /// a wider types will allow us to eliminate the extension.
1456 ///
1457 /// This function works on both vectors and scalars.
1458 ///
1459 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1460   assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1461          "Can't sign extend type to a smaller type");
1462   if (canAlwaysEvaluateInType(V, Ty))
1463     return true;
1464   if (canNotEvaluateInType(V, Ty))
1465     return false;
1466 
1467   auto *I = cast<Instruction>(V);
1468   switch (I->getOpcode()) {
1469   case Instruction::SExt:  // sext(sext(x)) -> sext(x)
1470   case Instruction::ZExt:  // sext(zext(x)) -> zext(x)
1471   case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1472     return true;
1473   case Instruction::And:
1474   case Instruction::Or:
1475   case Instruction::Xor:
1476   case Instruction::Add:
1477   case Instruction::Sub:
1478   case Instruction::Mul:
1479     // These operators can all arbitrarily be extended if their inputs can.
1480     return canEvaluateSExtd(I->getOperand(0), Ty) &&
1481            canEvaluateSExtd(I->getOperand(1), Ty);
1482 
1483   //case Instruction::Shl:   TODO
1484   //case Instruction::LShr:  TODO
1485 
1486   case Instruction::Select:
1487     return canEvaluateSExtd(I->getOperand(1), Ty) &&
1488            canEvaluateSExtd(I->getOperand(2), Ty);
1489 
1490   case Instruction::PHI: {
1491     // We can change a phi if we can change all operands.  Note that we never
1492     // get into trouble with cyclic PHIs here because we only consider
1493     // instructions with a single use.
1494     PHINode *PN = cast<PHINode>(I);
1495     for (Value *IncValue : PN->incoming_values())
1496       if (!canEvaluateSExtd(IncValue, Ty)) return false;
1497     return true;
1498   }
1499   default:
1500     // TODO: Can handle more cases here.
1501     break;
1502   }
1503 
1504   return false;
1505 }
1506 
1507 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
1508   // If this sign extend is only used by a truncate, let the truncate be
1509   // eliminated before we try to optimize this sext.
1510   if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1511     return nullptr;
1512 
1513   if (Instruction *I = commonCastTransforms(CI))
1514     return I;
1515 
1516   Value *Src = CI.getOperand(0);
1517   Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1518   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1519   unsigned DestBitSize = DestTy->getScalarSizeInBits();
1520 
1521   // If the value being extended is zero or positive, use a zext instead.
1522   if (isKnownNonNegative(Src, DL, 0, &AC, &CI, &DT))
1523     return CastInst::Create(Instruction::ZExt, Src, DestTy);
1524 
1525   // Try to extend the entire expression tree to the wide destination type.
1526   if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
1527     // Okay, we can transform this!  Insert the new expression now.
1528     LLVM_DEBUG(
1529         dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1530                   " to avoid sign extend: "
1531                << CI << '\n');
1532     Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1533     assert(Res->getType() == DestTy);
1534 
1535     // If the high bits are already filled with sign bit, just replace this
1536     // cast with the result.
1537     if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1538       return replaceInstUsesWith(CI, Res);
1539 
1540     // We need to emit a shl + ashr to do the sign extend.
1541     Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1542     return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1543                                       ShAmt);
1544   }
1545 
1546   Value *X;
1547   if (match(Src, m_Trunc(m_Value(X)))) {
1548     // If the input has more sign bits than bits truncated, then convert
1549     // directly to final type.
1550     unsigned XBitSize = X->getType()->getScalarSizeInBits();
1551     if (ComputeNumSignBits(X, 0, &CI) > XBitSize - SrcBitSize)
1552       return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true);
1553 
1554     // If input is a trunc from the destination type, then convert into shifts.
1555     if (Src->hasOneUse() && X->getType() == DestTy) {
1556       // sext (trunc X) --> ashr (shl X, C), C
1557       Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1558       return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1559     }
1560 
1561     // If we are replacing shifted-in high zero bits with sign bits, convert
1562     // the logic shift to arithmetic shift and eliminate the cast to
1563     // intermediate type:
1564     // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C)
1565     Value *Y;
1566     if (Src->hasOneUse() &&
1567         match(X, m_LShr(m_Value(Y),
1568                         m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) {
1569       Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize);
1570       return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
1571     }
1572   }
1573 
1574   if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1575     return transformSExtICmp(ICI, CI);
1576 
1577   // If the input is a shl/ashr pair of a same constant, then this is a sign
1578   // extension from a smaller value.  If we could trust arbitrary bitwidth
1579   // integers, we could turn this into a truncate to the smaller bit and then
1580   // use a sext for the whole extension.  Since we don't, look deeper and check
1581   // for a truncate.  If the source and dest are the same type, eliminate the
1582   // trunc and extend and just do shifts.  For example, turn:
1583   //   %a = trunc i32 %i to i8
1584   //   %b = shl i8 %a, C
1585   //   %c = ashr i8 %b, C
1586   //   %d = sext i8 %c to i32
1587   // into:
1588   //   %a = shl i32 %i, 32-(8-C)
1589   //   %d = ashr i32 %a, 32-(8-C)
1590   Value *A = nullptr;
1591   // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1592   Constant *BA = nullptr, *CA = nullptr;
1593   if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)),
1594                         m_Constant(CA))) &&
1595       BA->isElementWiseEqual(CA) && A->getType() == DestTy) {
1596     Constant *WideCurrShAmt = ConstantExpr::getSExt(CA, DestTy);
1597     Constant *NumLowbitsLeft = ConstantExpr::getSub(
1598         ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt);
1599     Constant *NewShAmt = ConstantExpr::getSub(
1600         ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()),
1601         NumLowbitsLeft);
1602     NewShAmt =
1603         Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA);
1604     A = Builder.CreateShl(A, NewShAmt, CI.getName());
1605     return BinaryOperator::CreateAShr(A, NewShAmt);
1606   }
1607 
1608   // Splatting a bit of constant-index across a value:
1609   // sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1
1610   // If the dest type is different, use a cast (adjust use check).
1611   if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)),
1612                                  m_SpecificInt(SrcBitSize - 1))))) {
1613     Type *XTy = X->getType();
1614     unsigned XBitSize = XTy->getScalarSizeInBits();
1615     Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1616     Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1617     if (XTy == DestTy)
1618       return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShlAmtC),
1619                                         AshrAmtC);
1620     if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) {
1621       Value *Ashr = Builder.CreateAShr(Builder.CreateShl(X, ShlAmtC), AshrAmtC);
1622       return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
1623     }
1624   }
1625 
1626   if (match(Src, m_VScale(DL))) {
1627     if (CI.getFunction() &&
1628         CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1629       Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
1630       if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
1631         if (Log2_32(*MaxVScale) < (SrcBitSize - 1)) {
1632           Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1633           return replaceInstUsesWith(CI, VScale);
1634         }
1635       }
1636     }
1637   }
1638 
1639   return nullptr;
1640 }
1641 
1642 /// Return a Constant* for the specified floating-point constant if it fits
1643 /// in the specified FP type without changing its value.
1644 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1645   bool losesInfo;
1646   APFloat F = CFP->getValueAPF();
1647   (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1648   return !losesInfo;
1649 }
1650 
1651 static Type *shrinkFPConstant(ConstantFP *CFP) {
1652   if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1653     return nullptr;  // No constant folding of this.
1654   // See if the value can be truncated to half and then reextended.
1655   if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1656     return Type::getHalfTy(CFP->getContext());
1657   // See if the value can be truncated to float and then reextended.
1658   if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1659     return Type::getFloatTy(CFP->getContext());
1660   if (CFP->getType()->isDoubleTy())
1661     return nullptr;  // Won't shrink.
1662   if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1663     return Type::getDoubleTy(CFP->getContext());
1664   // Don't try to shrink to various long double types.
1665   return nullptr;
1666 }
1667 
1668 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1669 // type we can safely truncate all elements to.
1670 static Type *shrinkFPConstantVector(Value *V) {
1671   auto *CV = dyn_cast<Constant>(V);
1672   auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
1673   if (!CV || !CVVTy)
1674     return nullptr;
1675 
1676   Type *MinType = nullptr;
1677 
1678   unsigned NumElts = CVVTy->getNumElements();
1679 
1680   // For fixed-width vectors we find the minimal type by looking
1681   // through the constant values of the vector.
1682   for (unsigned i = 0; i != NumElts; ++i) {
1683     if (isa<UndefValue>(CV->getAggregateElement(i)))
1684       continue;
1685 
1686     auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1687     if (!CFP)
1688       return nullptr;
1689 
1690     Type *T = shrinkFPConstant(CFP);
1691     if (!T)
1692       return nullptr;
1693 
1694     // If we haven't found a type yet or this type has a larger mantissa than
1695     // our previous type, this is our new minimal type.
1696     if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1697       MinType = T;
1698   }
1699 
1700   // Make a vector type from the minimal type.
1701   return MinType ? FixedVectorType::get(MinType, NumElts) : nullptr;
1702 }
1703 
1704 /// Find the minimum FP type we can safely truncate to.
1705 static Type *getMinimumFPType(Value *V) {
1706   if (auto *FPExt = dyn_cast<FPExtInst>(V))
1707     return FPExt->getOperand(0)->getType();
1708 
1709   // If this value is a constant, return the constant in the smallest FP type
1710   // that can accurately represent it.  This allows us to turn
1711   // (float)((double)X+2.0) into x+2.0f.
1712   if (auto *CFP = dyn_cast<ConstantFP>(V))
1713     if (Type *T = shrinkFPConstant(CFP))
1714       return T;
1715 
1716   // We can only correctly find a minimum type for a scalable vector when it is
1717   // a splat. For splats of constant values the fpext is wrapped up as a
1718   // ConstantExpr.
1719   if (auto *FPCExt = dyn_cast<ConstantExpr>(V))
1720     if (FPCExt->getOpcode() == Instruction::FPExt)
1721       return FPCExt->getOperand(0)->getType();
1722 
1723   // Try to shrink a vector of FP constants. This returns nullptr on scalable
1724   // vectors
1725   if (Type *T = shrinkFPConstantVector(V))
1726     return T;
1727 
1728   return V->getType();
1729 }
1730 
1731 /// Return true if the cast from integer to FP can be proven to be exact for all
1732 /// possible inputs (the conversion does not lose any precision).
1733 static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC) {
1734   CastInst::CastOps Opcode = I.getOpcode();
1735   assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1736          "Unexpected cast");
1737   Value *Src = I.getOperand(0);
1738   Type *SrcTy = Src->getType();
1739   Type *FPTy = I.getType();
1740   bool IsSigned = Opcode == Instruction::SIToFP;
1741   int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
1742 
1743   // Easy case - if the source integer type has less bits than the FP mantissa,
1744   // then the cast must be exact.
1745   int DestNumSigBits = FPTy->getFPMantissaWidth();
1746   if (SrcSize <= DestNumSigBits)
1747     return true;
1748 
1749   // Cast from FP to integer and back to FP is independent of the intermediate
1750   // integer width because of poison on overflow.
1751   Value *F;
1752   if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) {
1753     // If this is uitofp (fptosi F), the source needs an extra bit to avoid
1754     // potential rounding of negative FP input values.
1755     int SrcNumSigBits = F->getType()->getFPMantissaWidth();
1756     if (!IsSigned && match(Src, m_FPToSI(m_Value())))
1757       SrcNumSigBits++;
1758 
1759     // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal
1760     // significant bits than the destination (and make sure neither type is
1761     // weird -- ppc_fp128).
1762     if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1763         SrcNumSigBits <= DestNumSigBits)
1764       return true;
1765   }
1766 
1767   // TODO:
1768   // Try harder to find if the source integer type has less significant bits.
1769   // For example, compute number of sign bits.
1770   KnownBits SrcKnown = IC.computeKnownBits(Src, 0, &I);
1771   int SigBits = (int)SrcTy->getScalarSizeInBits() -
1772                 SrcKnown.countMinLeadingZeros() -
1773                 SrcKnown.countMinTrailingZeros();
1774   if (SigBits <= DestNumSigBits)
1775     return true;
1776 
1777   return false;
1778 }
1779 
1780 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
1781   if (Instruction *I = commonCastTransforms(FPT))
1782     return I;
1783 
1784   // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1785   // simplify this expression to avoid one or more of the trunc/extend
1786   // operations if we can do so without changing the numerical results.
1787   //
1788   // The exact manner in which the widths of the operands interact to limit
1789   // what we can and cannot do safely varies from operation to operation, and
1790   // is explained below in the various case statements.
1791   Type *Ty = FPT.getType();
1792   auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1793   if (BO && BO->hasOneUse()) {
1794     Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
1795     Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
1796     unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1797     unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1798     unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1799     unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1800     unsigned DstWidth = Ty->getFPMantissaWidth();
1801     switch (BO->getOpcode()) {
1802       default: break;
1803       case Instruction::FAdd:
1804       case Instruction::FSub:
1805         // For addition and subtraction, the infinitely precise result can
1806         // essentially be arbitrarily wide; proving that double rounding
1807         // will not occur because the result of OpI is exact (as we will for
1808         // FMul, for example) is hopeless.  However, we *can* nonetheless
1809         // frequently know that double rounding cannot occur (or that it is
1810         // innocuous) by taking advantage of the specific structure of
1811         // infinitely-precise results that admit double rounding.
1812         //
1813         // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1814         // to represent both sources, we can guarantee that the double
1815         // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1816         // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1817         // for proof of this fact).
1818         //
1819         // Note: Figueroa does not consider the case where DstFormat !=
1820         // SrcFormat.  It's possible (likely even!) that this analysis
1821         // could be tightened for those cases, but they are rare (the main
1822         // case of interest here is (float)((double)float + float)).
1823         if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1824           Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1825           Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1826           Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS);
1827           RI->copyFastMathFlags(BO);
1828           return RI;
1829         }
1830         break;
1831       case Instruction::FMul:
1832         // For multiplication, the infinitely precise result has at most
1833         // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1834         // that such a value can be exactly represented, then no double
1835         // rounding can possibly occur; we can safely perform the operation
1836         // in the destination format if it can represent both sources.
1837         if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1838           Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1839           Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1840           return BinaryOperator::CreateFMulFMF(LHS, RHS, BO);
1841         }
1842         break;
1843       case Instruction::FDiv:
1844         // For division, we use again use the bound from Figueroa's
1845         // dissertation.  I am entirely certain that this bound can be
1846         // tightened in the unbalanced operand case by an analysis based on
1847         // the diophantine rational approximation bound, but the well-known
1848         // condition used here is a good conservative first pass.
1849         // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1850         if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1851           Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1852           Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1853           return BinaryOperator::CreateFDivFMF(LHS, RHS, BO);
1854         }
1855         break;
1856       case Instruction::FRem: {
1857         // Remainder is straightforward.  Remainder is always exact, so the
1858         // type of OpI doesn't enter into things at all.  We simply evaluate
1859         // in whichever source type is larger, then convert to the
1860         // destination type.
1861         if (SrcWidth == OpWidth)
1862           break;
1863         Value *LHS, *RHS;
1864         if (LHSWidth == SrcWidth) {
1865            LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1866            RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1867         } else {
1868            LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1869            RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1870         }
1871 
1872         Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO);
1873         return CastInst::CreateFPCast(ExactResult, Ty);
1874       }
1875     }
1876   }
1877 
1878   // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1879   Value *X;
1880   Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
1881   if (Op && Op->hasOneUse()) {
1882     // FIXME: The FMF should propagate from the fptrunc, not the source op.
1883     IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1884     if (isa<FPMathOperator>(Op))
1885       Builder.setFastMathFlags(Op->getFastMathFlags());
1886 
1887     if (match(Op, m_FNeg(m_Value(X)))) {
1888       Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
1889 
1890       return UnaryOperator::CreateFNegFMF(InnerTrunc, Op);
1891     }
1892 
1893     // If we are truncating a select that has an extended operand, we can
1894     // narrow the other operand and do the select as a narrow op.
1895     Value *Cond, *X, *Y;
1896     if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) &&
1897         X->getType() == Ty) {
1898       // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
1899       Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1900       Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
1901       return replaceInstUsesWith(FPT, Sel);
1902     }
1903     if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
1904         X->getType() == Ty) {
1905       // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
1906       Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1907       Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
1908       return replaceInstUsesWith(FPT, Sel);
1909     }
1910   }
1911 
1912   if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1913     switch (II->getIntrinsicID()) {
1914     default: break;
1915     case Intrinsic::ceil:
1916     case Intrinsic::fabs:
1917     case Intrinsic::floor:
1918     case Intrinsic::nearbyint:
1919     case Intrinsic::rint:
1920     case Intrinsic::round:
1921     case Intrinsic::roundeven:
1922     case Intrinsic::trunc: {
1923       Value *Src = II->getArgOperand(0);
1924       if (!Src->hasOneUse())
1925         break;
1926 
1927       // Except for fabs, this transformation requires the input of the unary FP
1928       // operation to be itself an fpext from the type to which we're
1929       // truncating.
1930       if (II->getIntrinsicID() != Intrinsic::fabs) {
1931         FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1932         if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1933           break;
1934       }
1935 
1936       // Do unary FP operation on smaller type.
1937       // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1938       Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1939       Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
1940                                                      II->getIntrinsicID(), Ty);
1941       SmallVector<OperandBundleDef, 1> OpBundles;
1942       II->getOperandBundlesAsDefs(OpBundles);
1943       CallInst *NewCI =
1944           CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName());
1945       NewCI->copyFastMathFlags(II);
1946       return NewCI;
1947     }
1948     }
1949   }
1950 
1951   if (Instruction *I = shrinkInsertElt(FPT, Builder))
1952     return I;
1953 
1954   Value *Src = FPT.getOperand(0);
1955   if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1956     auto *FPCast = cast<CastInst>(Src);
1957     if (isKnownExactCastIntToFP(*FPCast, *this))
1958       return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1959   }
1960 
1961   return nullptr;
1962 }
1963 
1964 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) {
1965   // If the source operand is a cast from integer to FP and known exact, then
1966   // cast the integer operand directly to the destination type.
1967   Type *Ty = FPExt.getType();
1968   Value *Src = FPExt.getOperand(0);
1969   if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1970     auto *FPCast = cast<CastInst>(Src);
1971     if (isKnownExactCastIntToFP(*FPCast, *this))
1972       return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1973   }
1974 
1975   return commonCastTransforms(FPExt);
1976 }
1977 
1978 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1979 /// This is safe if the intermediate type has enough bits in its mantissa to
1980 /// accurately represent all values of X.  For example, this won't work with
1981 /// i64 -> float -> i64.
1982 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) {
1983   if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1984     return nullptr;
1985 
1986   auto *OpI = cast<CastInst>(FI.getOperand(0));
1987   Value *X = OpI->getOperand(0);
1988   Type *XType = X->getType();
1989   Type *DestType = FI.getType();
1990   bool IsOutputSigned = isa<FPToSIInst>(FI);
1991 
1992   // Since we can assume the conversion won't overflow, our decision as to
1993   // whether the input will fit in the float should depend on the minimum
1994   // of the input range and output range.
1995 
1996   // This means this is also safe for a signed input and unsigned output, since
1997   // a negative input would lead to undefined behavior.
1998   if (!isKnownExactCastIntToFP(*OpI, *this)) {
1999     // The first cast may not round exactly based on the source integer width
2000     // and FP width, but the overflow UB rules can still allow this to fold.
2001     // If the destination type is narrow, that means the intermediate FP value
2002     // must be large enough to hold the source value exactly.
2003     // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior.
2004     int OutputSize = (int)DestType->getScalarSizeInBits();
2005     if (OutputSize > OpI->getType()->getFPMantissaWidth())
2006       return nullptr;
2007   }
2008 
2009   if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) {
2010     bool IsInputSigned = isa<SIToFPInst>(OpI);
2011     if (IsInputSigned && IsOutputSigned)
2012       return new SExtInst(X, DestType);
2013     return new ZExtInst(X, DestType);
2014   }
2015   if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits())
2016     return new TruncInst(X, DestType);
2017 
2018   assert(XType == DestType && "Unexpected types for int to FP to int casts");
2019   return replaceInstUsesWith(FI, X);
2020 }
2021 
2022 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) {
2023   if (Instruction *I = foldItoFPtoI(FI))
2024     return I;
2025 
2026   return commonCastTransforms(FI);
2027 }
2028 
2029 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) {
2030   if (Instruction *I = foldItoFPtoI(FI))
2031     return I;
2032 
2033   return commonCastTransforms(FI);
2034 }
2035 
2036 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) {
2037   return commonCastTransforms(CI);
2038 }
2039 
2040 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) {
2041   return commonCastTransforms(CI);
2042 }
2043 
2044 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) {
2045   // If the source integer type is not the intptr_t type for this target, do a
2046   // trunc or zext to the intptr_t type, then inttoptr of it.  This allows the
2047   // cast to be exposed to other transforms.
2048   unsigned AS = CI.getAddressSpace();
2049   if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
2050       DL.getPointerSizeInBits(AS)) {
2051     Type *Ty = CI.getOperand(0)->getType()->getWithNewType(
2052         DL.getIntPtrType(CI.getContext(), AS));
2053     Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
2054     return new IntToPtrInst(P, CI.getType());
2055   }
2056 
2057   if (Instruction *I = commonCastTransforms(CI))
2058     return I;
2059 
2060   return nullptr;
2061 }
2062 
2063 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
2064 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) {
2065   Value *Src = CI.getOperand(0);
2066 
2067   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
2068     // If casting the result of a getelementptr instruction with no offset, turn
2069     // this into a cast of the original pointer!
2070     if (GEP->hasAllZeroIndices() &&
2071         // If CI is an addrspacecast and GEP changes the poiner type, merging
2072         // GEP into CI would undo canonicalizing addrspacecast with different
2073         // pointer types, causing infinite loops.
2074         (!isa<AddrSpaceCastInst>(CI) ||
2075          GEP->getType() == GEP->getPointerOperandType())) {
2076       // Changing the cast operand is usually not a good idea but it is safe
2077       // here because the pointer operand is being replaced with another
2078       // pointer operand so the opcode doesn't need to change.
2079       return replaceOperand(CI, 0, GEP->getOperand(0));
2080     }
2081   }
2082 
2083   return commonCastTransforms(CI);
2084 }
2085 
2086 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {
2087   // If the destination integer type is not the intptr_t type for this target,
2088   // do a ptrtoint to intptr_t then do a trunc or zext.  This allows the cast
2089   // to be exposed to other transforms.
2090   Value *SrcOp = CI.getPointerOperand();
2091   Type *SrcTy = SrcOp->getType();
2092   Type *Ty = CI.getType();
2093   unsigned AS = CI.getPointerAddressSpace();
2094   unsigned TySize = Ty->getScalarSizeInBits();
2095   unsigned PtrSize = DL.getPointerSizeInBits(AS);
2096   if (TySize != PtrSize) {
2097     Type *IntPtrTy =
2098         SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS));
2099     Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy);
2100     return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
2101   }
2102 
2103   if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) {
2104     // Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use.
2105     // While this can increase the number of instructions it doesn't actually
2106     // increase the overall complexity since the arithmetic is just part of
2107     // the GEP otherwise.
2108     if (GEP->hasOneUse() &&
2109         isa<ConstantPointerNull>(GEP->getPointerOperand())) {
2110       return replaceInstUsesWith(CI,
2111                                  Builder.CreateIntCast(EmitGEPOffset(GEP), Ty,
2112                                                        /*isSigned=*/false));
2113     }
2114   }
2115 
2116   Value *Vec, *Scalar, *Index;
2117   if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)),
2118                                         m_Value(Scalar), m_Value(Index)))) &&
2119       Vec->getType() == Ty) {
2120     assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type");
2121     // Convert the scalar to int followed by insert to eliminate one cast:
2122     // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index
2123     Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
2124     return InsertElementInst::Create(Vec, NewCast, Index);
2125   }
2126 
2127   return commonPointerCastTransforms(CI);
2128 }
2129 
2130 /// This input value (which is known to have vector type) is being zero extended
2131 /// or truncated to the specified vector type. Since the zext/trunc is done
2132 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern,
2133 /// endianness will impact which end of the vector that is extended or
2134 /// truncated.
2135 ///
2136 /// A vector is always stored with index 0 at the lowest address, which
2137 /// corresponds to the most significant bits for a big endian stored integer and
2138 /// the least significant bits for little endian. A trunc/zext of an integer
2139 /// impacts the big end of the integer. Thus, we need to add/remove elements at
2140 /// the front of the vector for big endian targets, and the back of the vector
2141 /// for little endian targets.
2142 ///
2143 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
2144 ///
2145 /// The source and destination vector types may have different element types.
2146 static Instruction *
2147 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy,
2148                                         InstCombinerImpl &IC) {
2149   // We can only do this optimization if the output is a multiple of the input
2150   // element size, or the input is a multiple of the output element size.
2151   // Convert the input type to have the same element type as the output.
2152   VectorType *SrcTy = cast<VectorType>(InVal->getType());
2153 
2154   if (SrcTy->getElementType() != DestTy->getElementType()) {
2155     // The input types don't need to be identical, but for now they must be the
2156     // same size.  There is no specific reason we couldn't handle things like
2157     // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
2158     // there yet.
2159     if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2160         DestTy->getElementType()->getPrimitiveSizeInBits())
2161       return nullptr;
2162 
2163     SrcTy =
2164         FixedVectorType::get(DestTy->getElementType(),
2165                              cast<FixedVectorType>(SrcTy)->getNumElements());
2166     InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
2167   }
2168 
2169   bool IsBigEndian = IC.getDataLayout().isBigEndian();
2170   unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2171   unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2172 
2173   assert(SrcElts != DestElts && "Element counts should be different.");
2174 
2175   // Now that the element types match, get the shuffle mask and RHS of the
2176   // shuffle to use, which depends on whether we're increasing or decreasing the
2177   // size of the input.
2178   auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts));
2179   ArrayRef<int> ShuffleMask;
2180   Value *V2;
2181 
2182   if (SrcElts > DestElts) {
2183     // If we're shrinking the number of elements (rewriting an integer
2184     // truncate), just shuffle in the elements corresponding to the least
2185     // significant bits from the input and use poison as the second shuffle
2186     // input.
2187     V2 = PoisonValue::get(SrcTy);
2188     // Make sure the shuffle mask selects the "least significant bits" by
2189     // keeping elements from back of the src vector for big endian, and from the
2190     // front for little endian.
2191     ShuffleMask = ShuffleMaskStorage;
2192     if (IsBigEndian)
2193       ShuffleMask = ShuffleMask.take_back(DestElts);
2194     else
2195       ShuffleMask = ShuffleMask.take_front(DestElts);
2196   } else {
2197     // If we're increasing the number of elements (rewriting an integer zext),
2198     // shuffle in all of the elements from InVal. Fill the rest of the result
2199     // elements with zeros from a constant zero.
2200     V2 = Constant::getNullValue(SrcTy);
2201     // Use first elt from V2 when indicating zero in the shuffle mask.
2202     uint32_t NullElt = SrcElts;
2203     // Extend with null values in the "most significant bits" by adding elements
2204     // in front of the src vector for big endian, and at the back for little
2205     // endian.
2206     unsigned DeltaElts = DestElts - SrcElts;
2207     if (IsBigEndian)
2208       ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2209     else
2210       ShuffleMaskStorage.append(DeltaElts, NullElt);
2211     ShuffleMask = ShuffleMaskStorage;
2212   }
2213 
2214   return new ShuffleVectorInst(InVal, V2, ShuffleMask);
2215 }
2216 
2217 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
2218   return Value % Ty->getPrimitiveSizeInBits() == 0;
2219 }
2220 
2221 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
2222   return Value / Ty->getPrimitiveSizeInBits();
2223 }
2224 
2225 /// V is a value which is inserted into a vector of VecEltTy.
2226 /// Look through the value to see if we can decompose it into
2227 /// insertions into the vector.  See the example in the comment for
2228 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
2229 /// The type of V is always a non-zero multiple of VecEltTy's size.
2230 /// Shift is the number of bits between the lsb of V and the lsb of
2231 /// the vector.
2232 ///
2233 /// This returns false if the pattern can't be matched or true if it can,
2234 /// filling in Elements with the elements found here.
2235 static bool collectInsertionElements(Value *V, unsigned Shift,
2236                                      SmallVectorImpl<Value *> &Elements,
2237                                      Type *VecEltTy, bool isBigEndian) {
2238   assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
2239          "Shift should be a multiple of the element type size");
2240 
2241   // Undef values never contribute useful bits to the result.
2242   if (isa<UndefValue>(V)) return true;
2243 
2244   // If we got down to a value of the right type, we win, try inserting into the
2245   // right element.
2246   if (V->getType() == VecEltTy) {
2247     // Inserting null doesn't actually insert any elements.
2248     if (Constant *C = dyn_cast<Constant>(V))
2249       if (C->isNullValue())
2250         return true;
2251 
2252     unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
2253     if (isBigEndian)
2254       ElementIndex = Elements.size() - ElementIndex - 1;
2255 
2256     // Fail if multiple elements are inserted into this slot.
2257     if (Elements[ElementIndex])
2258       return false;
2259 
2260     Elements[ElementIndex] = V;
2261     return true;
2262   }
2263 
2264   if (Constant *C = dyn_cast<Constant>(V)) {
2265     // Figure out the # elements this provides, and bitcast it or slice it up
2266     // as required.
2267     unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
2268                                         VecEltTy);
2269     // If the constant is the size of a vector element, we just need to bitcast
2270     // it to the right type so it gets properly inserted.
2271     if (NumElts == 1)
2272       return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
2273                                       Shift, Elements, VecEltTy, isBigEndian);
2274 
2275     // Okay, this is a constant that covers multiple elements.  Slice it up into
2276     // pieces and insert each element-sized piece into the vector.
2277     if (!isa<IntegerType>(C->getType()))
2278       C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
2279                                        C->getType()->getPrimitiveSizeInBits()));
2280     unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
2281     Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
2282 
2283     for (unsigned i = 0; i != NumElts; ++i) {
2284       unsigned ShiftI = Shift+i*ElementSize;
2285       Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
2286                                                                   ShiftI));
2287       Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
2288       if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
2289                                     isBigEndian))
2290         return false;
2291     }
2292     return true;
2293   }
2294 
2295   if (!V->hasOneUse()) return false;
2296 
2297   Instruction *I = dyn_cast<Instruction>(V);
2298   if (!I) return false;
2299   switch (I->getOpcode()) {
2300   default: return false; // Unhandled case.
2301   case Instruction::BitCast:
2302     if (I->getOperand(0)->getType()->isVectorTy())
2303       return false;
2304     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2305                                     isBigEndian);
2306   case Instruction::ZExt:
2307     if (!isMultipleOfTypeSize(
2308                           I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2309                               VecEltTy))
2310       return false;
2311     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2312                                     isBigEndian);
2313   case Instruction::Or:
2314     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2315                                     isBigEndian) &&
2316            collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
2317                                     isBigEndian);
2318   case Instruction::Shl: {
2319     // Must be shifting by a constant that is a multiple of the element size.
2320     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
2321     if (!CI) return false;
2322     Shift += CI->getZExtValue();
2323     if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
2324     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2325                                     isBigEndian);
2326   }
2327 
2328   }
2329 }
2330 
2331 
2332 /// If the input is an 'or' instruction, we may be doing shifts and ors to
2333 /// assemble the elements of the vector manually.
2334 /// Try to rip the code out and replace it with insertelements.  This is to
2335 /// optimize code like this:
2336 ///
2337 ///    %tmp37 = bitcast float %inc to i32
2338 ///    %tmp38 = zext i32 %tmp37 to i64
2339 ///    %tmp31 = bitcast float %inc5 to i32
2340 ///    %tmp32 = zext i32 %tmp31 to i64
2341 ///    %tmp33 = shl i64 %tmp32, 32
2342 ///    %ins35 = or i64 %tmp33, %tmp38
2343 ///    %tmp43 = bitcast i64 %ins35 to <2 x float>
2344 ///
2345 /// Into two insertelements that do "buildvector{%inc, %inc5}".
2346 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
2347                                                 InstCombinerImpl &IC) {
2348   auto *DestVecTy = cast<FixedVectorType>(CI.getType());
2349   Value *IntInput = CI.getOperand(0);
2350 
2351   SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
2352   if (!collectInsertionElements(IntInput, 0, Elements,
2353                                 DestVecTy->getElementType(),
2354                                 IC.getDataLayout().isBigEndian()))
2355     return nullptr;
2356 
2357   // If we succeeded, we know that all of the element are specified by Elements
2358   // or are zero if Elements has a null entry.  Recast this as a set of
2359   // insertions.
2360   Value *Result = Constant::getNullValue(CI.getType());
2361   for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
2362     if (!Elements[i]) continue;  // Unset element.
2363 
2364     Result = IC.Builder.CreateInsertElement(Result, Elements[i],
2365                                             IC.Builder.getInt32(i));
2366   }
2367 
2368   return Result;
2369 }
2370 
2371 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2372 /// vector followed by extract element. The backend tends to handle bitcasts of
2373 /// vectors better than bitcasts of scalars because vector registers are
2374 /// usually not type-specific like scalar integer or scalar floating-point.
2375 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
2376                                               InstCombinerImpl &IC) {
2377   Value *VecOp, *Index;
2378   if (!match(BitCast.getOperand(0),
2379              m_OneUse(m_ExtractElt(m_Value(VecOp), m_Value(Index)))))
2380     return nullptr;
2381 
2382   // The bitcast must be to a vectorizable type, otherwise we can't make a new
2383   // type to extract from.
2384   Type *DestType = BitCast.getType();
2385   VectorType *VecType = cast<VectorType>(VecOp->getType());
2386   if (VectorType::isValidElementType(DestType)) {
2387     auto *NewVecType = VectorType::get(DestType, VecType);
2388     auto *NewBC = IC.Builder.CreateBitCast(VecOp, NewVecType, "bc");
2389     return ExtractElementInst::Create(NewBC, Index);
2390   }
2391 
2392   // Only solve DestType is vector to avoid inverse transform in visitBitCast.
2393   // bitcast (extractelement <1 x elt>, dest) -> bitcast(<1 x elt>, dest)
2394   auto *FixedVType = dyn_cast<FixedVectorType>(VecType);
2395   if (DestType->isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2396     return CastInst::Create(Instruction::BitCast, VecOp, DestType);
2397 
2398   return nullptr;
2399 }
2400 
2401 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
2402 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2403                                             InstCombiner::BuilderTy &Builder) {
2404   Type *DestTy = BitCast.getType();
2405   BinaryOperator *BO;
2406 
2407   if (!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2408       !BO->isBitwiseLogicOp())
2409     return nullptr;
2410 
2411   // FIXME: This transform is restricted to vector types to avoid backend
2412   // problems caused by creating potentially illegal operations. If a fix-up is
2413   // added to handle that situation, we can remove this check.
2414   if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2415     return nullptr;
2416 
2417   if (DestTy->isFPOrFPVectorTy()) {
2418     Value *X, *Y;
2419     // bitcast(logic(bitcast(X), bitcast(Y))) -> bitcast'(logic(bitcast'(X), Y))
2420     if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2421         match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(Y))))) {
2422       if (X->getType()->isFPOrFPVectorTy() &&
2423           Y->getType()->isIntOrIntVectorTy()) {
2424         Value *CastedOp =
2425             Builder.CreateBitCast(BO->getOperand(0), Y->getType());
2426         Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, Y);
2427         return CastInst::CreateBitOrPointerCast(NewBO, DestTy);
2428       }
2429       if (X->getType()->isIntOrIntVectorTy() &&
2430           Y->getType()->isFPOrFPVectorTy()) {
2431         Value *CastedOp =
2432             Builder.CreateBitCast(BO->getOperand(1), X->getType());
2433         Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, X);
2434         return CastInst::CreateBitOrPointerCast(NewBO, DestTy);
2435       }
2436     }
2437     return nullptr;
2438   }
2439 
2440   if (!DestTy->isIntOrIntVectorTy())
2441     return nullptr;
2442 
2443   Value *X;
2444   if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2445       X->getType() == DestTy && !isa<Constant>(X)) {
2446     // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2447     Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2448     return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2449   }
2450 
2451   if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2452       X->getType() == DestTy && !isa<Constant>(X)) {
2453     // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2454     Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2455     return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2456   }
2457 
2458   // Canonicalize vector bitcasts to come before vector bitwise logic with a
2459   // constant. This eases recognition of special constants for later ops.
2460   // Example:
2461   // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2462   Constant *C;
2463   if (match(BO->getOperand(1), m_Constant(C))) {
2464     // bitcast (logic X, C) --> logic (bitcast X, C')
2465     Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2466     Value *CastedC = Builder.CreateBitCast(C, DestTy);
2467     return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2468   }
2469 
2470   return nullptr;
2471 }
2472 
2473 /// Change the type of a select if we can eliminate a bitcast.
2474 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2475                                       InstCombiner::BuilderTy &Builder) {
2476   Value *Cond, *TVal, *FVal;
2477   if (!match(BitCast.getOperand(0),
2478              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2479     return nullptr;
2480 
2481   // A vector select must maintain the same number of elements in its operands.
2482   Type *CondTy = Cond->getType();
2483   Type *DestTy = BitCast.getType();
2484   if (auto *CondVTy = dyn_cast<VectorType>(CondTy))
2485     if (!DestTy->isVectorTy() ||
2486         CondVTy->getElementCount() !=
2487             cast<VectorType>(DestTy)->getElementCount())
2488       return nullptr;
2489 
2490   // FIXME: This transform is restricted from changing the select between
2491   // scalars and vectors to avoid backend problems caused by creating
2492   // potentially illegal operations. If a fix-up is added to handle that
2493   // situation, we can remove this check.
2494   if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2495     return nullptr;
2496 
2497   auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2498   Value *X;
2499   if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2500       !isa<Constant>(X)) {
2501     // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2502     Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2503     return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2504   }
2505 
2506   if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2507       !isa<Constant>(X)) {
2508     // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2509     Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2510     return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2511   }
2512 
2513   return nullptr;
2514 }
2515 
2516 /// Check if all users of CI are StoreInsts.
2517 static bool hasStoreUsersOnly(CastInst &CI) {
2518   for (User *U : CI.users()) {
2519     if (!isa<StoreInst>(U))
2520       return false;
2521   }
2522   return true;
2523 }
2524 
2525 /// This function handles following case
2526 ///
2527 ///     A  ->  B    cast
2528 ///     PHI
2529 ///     B  ->  A    cast
2530 ///
2531 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2532 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
2533 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
2534                                                       PHINode *PN) {
2535   // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2536   if (hasStoreUsersOnly(CI))
2537     return nullptr;
2538 
2539   Value *Src = CI.getOperand(0);
2540   Type *SrcTy = Src->getType();         // Type B
2541   Type *DestTy = CI.getType();          // Type A
2542 
2543   SmallVector<PHINode *, 4> PhiWorklist;
2544   SmallSetVector<PHINode *, 4> OldPhiNodes;
2545 
2546   // Find all of the A->B casts and PHI nodes.
2547   // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
2548   // OldPhiNodes is used to track all known PHI nodes, before adding a new
2549   // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2550   PhiWorklist.push_back(PN);
2551   OldPhiNodes.insert(PN);
2552   while (!PhiWorklist.empty()) {
2553     auto *OldPN = PhiWorklist.pop_back_val();
2554     for (Value *IncValue : OldPN->incoming_values()) {
2555       if (isa<Constant>(IncValue))
2556         continue;
2557 
2558       if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2559         // If there is a sequence of one or more load instructions, each loaded
2560         // value is used as address of later load instruction, bitcast is
2561         // necessary to change the value type, don't optimize it. For
2562         // simplicity we give up if the load address comes from another load.
2563         Value *Addr = LI->getOperand(0);
2564         if (Addr == &CI || isa<LoadInst>(Addr))
2565           return nullptr;
2566         // Don't tranform "load <256 x i32>, <256 x i32>*" to
2567         // "load x86_amx, x86_amx*", because x86_amx* is invalid.
2568         // TODO: Remove this check when bitcast between vector and x86_amx
2569         // is replaced with a specific intrinsic.
2570         if (DestTy->isX86_AMXTy())
2571           return nullptr;
2572         if (LI->hasOneUse() && LI->isSimple())
2573           continue;
2574         // If a LoadInst has more than one use, changing the type of loaded
2575         // value may create another bitcast.
2576         return nullptr;
2577       }
2578 
2579       if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2580         if (OldPhiNodes.insert(PNode))
2581           PhiWorklist.push_back(PNode);
2582         continue;
2583       }
2584 
2585       auto *BCI = dyn_cast<BitCastInst>(IncValue);
2586       // We can't handle other instructions.
2587       if (!BCI)
2588         return nullptr;
2589 
2590       // Verify it's a A->B cast.
2591       Type *TyA = BCI->getOperand(0)->getType();
2592       Type *TyB = BCI->getType();
2593       if (TyA != DestTy || TyB != SrcTy)
2594         return nullptr;
2595     }
2596   }
2597 
2598   // Check that each user of each old PHI node is something that we can
2599   // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
2600   for (auto *OldPN : OldPhiNodes) {
2601     for (User *V : OldPN->users()) {
2602       if (auto *SI = dyn_cast<StoreInst>(V)) {
2603         if (!SI->isSimple() || SI->getOperand(0) != OldPN)
2604           return nullptr;
2605       } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2606         // Verify it's a B->A cast.
2607         Type *TyB = BCI->getOperand(0)->getType();
2608         Type *TyA = BCI->getType();
2609         if (TyA != DestTy || TyB != SrcTy)
2610           return nullptr;
2611       } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2612         // As long as the user is another old PHI node, then even if we don't
2613         // rewrite it, the PHI web we're considering won't have any users
2614         // outside itself, so it'll be dead.
2615         if (!OldPhiNodes.contains(PHI))
2616           return nullptr;
2617       } else {
2618         return nullptr;
2619       }
2620     }
2621   }
2622 
2623   // For each old PHI node, create a corresponding new PHI node with a type A.
2624   SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2625   for (auto *OldPN : OldPhiNodes) {
2626     Builder.SetInsertPoint(OldPN);
2627     PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2628     NewPNodes[OldPN] = NewPN;
2629   }
2630 
2631   // Fill in the operands of new PHI nodes.
2632   for (auto *OldPN : OldPhiNodes) {
2633     PHINode *NewPN = NewPNodes[OldPN];
2634     for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2635       Value *V = OldPN->getOperand(j);
2636       Value *NewV = nullptr;
2637       if (auto *C = dyn_cast<Constant>(V)) {
2638         NewV = ConstantExpr::getBitCast(C, DestTy);
2639       } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2640         // Explicitly perform load combine to make sure no opposing transform
2641         // can remove the bitcast in the meantime and trigger an infinite loop.
2642         Builder.SetInsertPoint(LI);
2643         NewV = combineLoadToNewType(*LI, DestTy);
2644         // Remove the old load and its use in the old phi, which itself becomes
2645         // dead once the whole transform finishes.
2646         replaceInstUsesWith(*LI, PoisonValue::get(LI->getType()));
2647         eraseInstFromFunction(*LI);
2648       } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2649         NewV = BCI->getOperand(0);
2650       } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2651         NewV = NewPNodes[PrevPN];
2652       }
2653       assert(NewV);
2654       NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2655     }
2656   }
2657 
2658   // Traverse all accumulated PHI nodes and process its users,
2659   // which are Stores and BitcCasts. Without this processing
2660   // NewPHI nodes could be replicated and could lead to extra
2661   // moves generated after DeSSA.
2662   // If there is a store with type B, change it to type A.
2663 
2664 
2665   // Replace users of BitCast B->A with NewPHI. These will help
2666   // later to get rid off a closure formed by OldPHI nodes.
2667   Instruction *RetVal = nullptr;
2668   for (auto *OldPN : OldPhiNodes) {
2669     PHINode *NewPN = NewPNodes[OldPN];
2670     for (User *V : make_early_inc_range(OldPN->users())) {
2671       if (auto *SI = dyn_cast<StoreInst>(V)) {
2672         assert(SI->isSimple() && SI->getOperand(0) == OldPN);
2673         Builder.SetInsertPoint(SI);
2674         auto *NewBC =
2675           cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy));
2676         SI->setOperand(0, NewBC);
2677         Worklist.push(SI);
2678         assert(hasStoreUsersOnly(*NewBC));
2679       }
2680       else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2681         Type *TyB = BCI->getOperand(0)->getType();
2682         Type *TyA = BCI->getType();
2683         assert(TyA == DestTy && TyB == SrcTy);
2684         (void) TyA;
2685         (void) TyB;
2686         Instruction *I = replaceInstUsesWith(*BCI, NewPN);
2687         if (BCI == &CI)
2688           RetVal = I;
2689       } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2690         assert(OldPhiNodes.contains(PHI));
2691         (void) PHI;
2692       } else {
2693         llvm_unreachable("all uses should be handled");
2694       }
2695     }
2696   }
2697 
2698   return RetVal;
2699 }
2700 
2701 static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder,
2702                                         const DataLayout &DL) {
2703   Value *Src = CI.getOperand(0);
2704   PointerType *SrcPTy = cast<PointerType>(Src->getType());
2705   PointerType *DstPTy = cast<PointerType>(CI.getType());
2706 
2707   // Bitcasts involving opaque pointers cannot be converted into a GEP.
2708   if (SrcPTy->isOpaque() || DstPTy->isOpaque())
2709     return nullptr;
2710 
2711   Type *DstElTy = DstPTy->getNonOpaquePointerElementType();
2712   Type *SrcElTy = SrcPTy->getNonOpaquePointerElementType();
2713 
2714   // When the type pointed to is not sized the cast cannot be
2715   // turned into a gep.
2716   if (!SrcElTy->isSized())
2717     return nullptr;
2718 
2719   // If the source and destination are pointers, and this cast is equivalent
2720   // to a getelementptr X, 0, 0, 0...  turn it into the appropriate gep.
2721   // This can enhance SROA and other transforms that want type-safe pointers.
2722   unsigned NumZeros = 0;
2723   while (SrcElTy && SrcElTy != DstElTy) {
2724     SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0);
2725     ++NumZeros;
2726   }
2727 
2728   // If we found a path from the src to dest, create the getelementptr now.
2729   if (SrcElTy == DstElTy) {
2730     SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2731     GetElementPtrInst *GEP = GetElementPtrInst::Create(
2732         SrcPTy->getNonOpaquePointerElementType(), Src, Idxs);
2733 
2734     // If the source pointer is dereferenceable, then assume it points to an
2735     // allocated object and apply "inbounds" to the GEP.
2736     bool CanBeNull, CanBeFreed;
2737     if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) {
2738       // In a non-default address space (not 0), a null pointer can not be
2739       // assumed inbounds, so ignore that case (dereferenceable_or_null).
2740       // The reason is that 'null' is not treated differently in these address
2741       // spaces, and we consequently ignore the 'gep inbounds' special case
2742       // for 'null' which allows 'inbounds' on 'null' if the indices are
2743       // zeros.
2744       if (SrcPTy->getAddressSpace() == 0 || !CanBeNull)
2745         GEP->setIsInBounds();
2746     }
2747     return GEP;
2748   }
2749   return nullptr;
2750 }
2751 
2752 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) {
2753   // If the operands are integer typed then apply the integer transforms,
2754   // otherwise just apply the common ones.
2755   Value *Src = CI.getOperand(0);
2756   Type *SrcTy = Src->getType();
2757   Type *DestTy = CI.getType();
2758 
2759   // Get rid of casts from one type to the same type. These are useless and can
2760   // be replaced by the operand.
2761   if (DestTy == Src->getType())
2762     return replaceInstUsesWith(CI, Src);
2763 
2764   if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) {
2765     // If we are casting a alloca to a pointer to a type of the same
2766     // size, rewrite the allocation instruction to allocate the "right" type.
2767     // There is no need to modify malloc calls because it is their bitcast that
2768     // needs to be cleaned up.
2769     if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2770       if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2771         return V;
2772 
2773     if (Instruction *I = convertBitCastToGEP(CI, Builder, DL))
2774       return I;
2775   }
2776 
2777   if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) {
2778     // Beware: messing with this target-specific oddity may cause trouble.
2779     if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) {
2780       Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2781       return InsertElementInst::Create(PoisonValue::get(DestTy), Elem,
2782                      Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2783     }
2784 
2785     if (isa<IntegerType>(SrcTy)) {
2786       // If this is a cast from an integer to vector, check to see if the input
2787       // is a trunc or zext of a bitcast from vector.  If so, we can replace all
2788       // the casts with a shuffle and (potentially) a bitcast.
2789       if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2790         CastInst *SrcCast = cast<CastInst>(Src);
2791         if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2792           if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2793             if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts(
2794                     BCIn->getOperand(0), cast<VectorType>(DestTy), *this))
2795               return I;
2796       }
2797 
2798       // If the input is an 'or' instruction, we may be doing shifts and ors to
2799       // assemble the elements of the vector manually.  Try to rip the code out
2800       // and replace it with insertelements.
2801       if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2802         return replaceInstUsesWith(CI, V);
2803     }
2804   }
2805 
2806   if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) {
2807     if (SrcVTy->getNumElements() == 1) {
2808       // If our destination is not a vector, then make this a straight
2809       // scalar-scalar cast.
2810       if (!DestTy->isVectorTy()) {
2811         Value *Elem =
2812           Builder.CreateExtractElement(Src,
2813                      Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2814         return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2815       }
2816 
2817       // Otherwise, see if our source is an insert. If so, then use the scalar
2818       // component directly:
2819       // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m>
2820       if (auto *InsElt = dyn_cast<InsertElementInst>(Src))
2821         return new BitCastInst(InsElt->getOperand(1), DestTy);
2822     }
2823 
2824     // Convert an artificial vector insert into more analyzable bitwise logic.
2825     unsigned BitWidth = DestTy->getScalarSizeInBits();
2826     Value *X, *Y;
2827     uint64_t IndexC;
2828     if (match(Src, m_OneUse(m_InsertElt(m_OneUse(m_BitCast(m_Value(X))),
2829                                         m_Value(Y), m_ConstantInt(IndexC)))) &&
2830         DestTy->isIntegerTy() && X->getType() == DestTy &&
2831         Y->getType()->isIntegerTy() && isDesirableIntType(BitWidth)) {
2832       // Adjust for big endian - the LSBs are at the high index.
2833       if (DL.isBigEndian())
2834         IndexC = SrcVTy->getNumElements() - 1 - IndexC;
2835 
2836       // We only handle (endian-normalized) insert to index 0. Any other insert
2837       // would require a left-shift, so that is an extra instruction.
2838       if (IndexC == 0) {
2839         // bitcast (inselt (bitcast X), Y, 0) --> or (and X, MaskC), (zext Y)
2840         unsigned EltWidth = Y->getType()->getScalarSizeInBits();
2841         APInt MaskC = APInt::getHighBitsSet(BitWidth, BitWidth - EltWidth);
2842         Value *AndX = Builder.CreateAnd(X, MaskC);
2843         Value *ZextY = Builder.CreateZExt(Y, DestTy);
2844         return BinaryOperator::CreateOr(AndX, ZextY);
2845       }
2846     }
2847   }
2848 
2849   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2850     // Okay, we have (bitcast (shuffle ..)).  Check to see if this is
2851     // a bitcast to a vector with the same # elts.
2852     Value *ShufOp0 = Shuf->getOperand(0);
2853     Value *ShufOp1 = Shuf->getOperand(1);
2854     auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount();
2855     auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount();
2856     if (Shuf->hasOneUse() && DestTy->isVectorTy() &&
2857         cast<VectorType>(DestTy)->getElementCount() == ShufElts &&
2858         ShufElts == SrcVecElts) {
2859       BitCastInst *Tmp;
2860       // If either of the operands is a cast from CI.getType(), then
2861       // evaluating the shuffle in the casted destination's type will allow
2862       // us to eliminate at least one cast.
2863       if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2864            Tmp->getOperand(0)->getType() == DestTy) ||
2865           ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2866            Tmp->getOperand(0)->getType() == DestTy)) {
2867         Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy);
2868         Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy);
2869         // Return a new shuffle vector.  Use the same element ID's, as we
2870         // know the vector types match #elts.
2871         return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask());
2872       }
2873     }
2874 
2875     // A bitcasted-to-scalar and byte/bit reversing shuffle is better recognized
2876     // as a byte/bit swap:
2877     // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) -> bswap (bitcast X)
2878     // bitcast <N x i1> (shuf X, undef, <N, N-1,...0>) -> bitreverse (bitcast X)
2879     if (DestTy->isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 &&
2880         Shuf->hasOneUse() && Shuf->isReverse()) {
2881       unsigned IntrinsicNum = 0;
2882       if (DL.isLegalInteger(DestTy->getScalarSizeInBits()) &&
2883           SrcTy->getScalarSizeInBits() == 8) {
2884         IntrinsicNum = Intrinsic::bswap;
2885       } else if (SrcTy->getScalarSizeInBits() == 1) {
2886         IntrinsicNum = Intrinsic::bitreverse;
2887       }
2888       if (IntrinsicNum != 0) {
2889         assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
2890         assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op");
2891         Function *BswapOrBitreverse =
2892             Intrinsic::getDeclaration(CI.getModule(), IntrinsicNum, DestTy);
2893         Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
2894         return CallInst::Create(BswapOrBitreverse, {ScalarX});
2895       }
2896     }
2897   }
2898 
2899   // Handle the A->B->A cast, and there is an intervening PHI node.
2900   if (PHINode *PN = dyn_cast<PHINode>(Src))
2901     if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2902       return I;
2903 
2904   if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2905     return I;
2906 
2907   if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2908     return I;
2909 
2910   if (Instruction *I = foldBitCastSelect(CI, Builder))
2911     return I;
2912 
2913   if (SrcTy->isPointerTy())
2914     return commonPointerCastTransforms(CI);
2915   return commonCastTransforms(CI);
2916 }
2917 
2918 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2919   // If the destination pointer element type is not the same as the source's
2920   // first do a bitcast to the destination type, and then the addrspacecast.
2921   // This allows the cast to be exposed to other transforms.
2922   Value *Src = CI.getOperand(0);
2923   PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2924   PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2925 
2926   if (!SrcTy->hasSameElementTypeAs(DestTy)) {
2927     Type *MidTy =
2928         PointerType::getWithSamePointeeType(DestTy, SrcTy->getAddressSpace());
2929     // Handle vectors of pointers.
2930     if (VectorType *VT = dyn_cast<VectorType>(CI.getType()))
2931       MidTy = VectorType::get(MidTy, VT->getElementCount());
2932 
2933     Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2934     return new AddrSpaceCastInst(NewBitCast, CI.getType());
2935   }
2936 
2937   return commonPointerCastTransforms(CI);
2938 }
2939