1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for cast operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/Analysis/ConstantFolding.h" 17 #include "llvm/Analysis/TargetLibraryInfo.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 using namespace llvm; 22 using namespace PatternMatch; 23 24 #define DEBUG_TYPE "instcombine" 25 26 /// Analyze 'Val', seeing if it is a simple linear expression. 27 /// If so, decompose it, returning some value X, such that Val is 28 /// X*Scale+Offset. 29 /// 30 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 31 uint64_t &Offset) { 32 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 33 Offset = CI->getZExtValue(); 34 Scale = 0; 35 return ConstantInt::get(Val->getType(), 0); 36 } 37 38 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 39 // Cannot look past anything that might overflow. 40 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 41 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 42 Scale = 1; 43 Offset = 0; 44 return Val; 45 } 46 47 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 48 if (I->getOpcode() == Instruction::Shl) { 49 // This is a value scaled by '1 << the shift amt'. 50 Scale = UINT64_C(1) << RHS->getZExtValue(); 51 Offset = 0; 52 return I->getOperand(0); 53 } 54 55 if (I->getOpcode() == Instruction::Mul) { 56 // This value is scaled by 'RHS'. 57 Scale = RHS->getZExtValue(); 58 Offset = 0; 59 return I->getOperand(0); 60 } 61 62 if (I->getOpcode() == Instruction::Add) { 63 // We have X+C. Check to see if we really have (X*C2)+C1, 64 // where C1 is divisible by C2. 65 unsigned SubScale; 66 Value *SubVal = 67 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 68 Offset += RHS->getZExtValue(); 69 Scale = SubScale; 70 return SubVal; 71 } 72 } 73 } 74 75 // Otherwise, we can't look past this. 76 Scale = 1; 77 Offset = 0; 78 return Val; 79 } 80 81 /// If we find a cast of an allocation instruction, try to eliminate the cast by 82 /// moving the type information into the alloc. 83 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, 84 AllocaInst &AI) { 85 PointerType *PTy = cast<PointerType>(CI.getType()); 86 87 BuilderTy AllocaBuilder(Builder); 88 AllocaBuilder.SetInsertPoint(&AI); 89 90 // Get the type really allocated and the type casted to. 91 Type *AllocElTy = AI.getAllocatedType(); 92 Type *CastElTy = PTy->getElementType(); 93 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 94 95 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); 96 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); 97 if (CastElTyAlign < AllocElTyAlign) return nullptr; 98 99 // If the allocation has multiple uses, only promote it if we are strictly 100 // increasing the alignment of the resultant allocation. If we keep it the 101 // same, we open the door to infinite loops of various kinds. 102 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 103 104 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy); 105 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy); 106 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 107 108 // If the allocation has multiple uses, only promote it if we're not 109 // shrinking the amount of memory being allocated. 110 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy); 111 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy); 112 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 113 114 // See if we can satisfy the modulus by pulling a scale out of the array 115 // size argument. 116 unsigned ArraySizeScale; 117 uint64_t ArrayOffset; 118 Value *NumElements = // See if the array size is a decomposable linear expr. 119 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 120 121 // If we can now satisfy the modulus, by using a non-1 scale, we really can 122 // do the xform. 123 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 124 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 125 126 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 127 Value *Amt = nullptr; 128 if (Scale == 1) { 129 Amt = NumElements; 130 } else { 131 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 132 // Insert before the alloca, not before the cast. 133 Amt = AllocaBuilder.CreateMul(Amt, NumElements); 134 } 135 136 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 137 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 138 Offset, true); 139 Amt = AllocaBuilder.CreateAdd(Amt, Off); 140 } 141 142 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); 143 New->setAlignment(AI.getAlignment()); 144 New->takeName(&AI); 145 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 146 147 // If the allocation has multiple real uses, insert a cast and change all 148 // things that used it to use the new cast. This will also hack on CI, but it 149 // will die soon. 150 if (!AI.hasOneUse()) { 151 // New is the allocation instruction, pointer typed. AI is the original 152 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 153 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast"); 154 replaceInstUsesWith(AI, NewCast); 155 } 156 return replaceInstUsesWith(CI, New); 157 } 158 159 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 160 /// true for, actually insert the code to evaluate the expression. 161 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty, 162 bool isSigned) { 163 if (Constant *C = dyn_cast<Constant>(V)) { 164 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 165 // If we got a constantexpr back, try to simplify it with DL info. 166 if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI)) 167 C = FoldedC; 168 return C; 169 } 170 171 // Otherwise, it must be an instruction. 172 Instruction *I = cast<Instruction>(V); 173 Instruction *Res = nullptr; 174 unsigned Opc = I->getOpcode(); 175 switch (Opc) { 176 case Instruction::Add: 177 case Instruction::Sub: 178 case Instruction::Mul: 179 case Instruction::And: 180 case Instruction::Or: 181 case Instruction::Xor: 182 case Instruction::AShr: 183 case Instruction::LShr: 184 case Instruction::Shl: 185 case Instruction::UDiv: 186 case Instruction::URem: { 187 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 188 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 189 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 190 break; 191 } 192 case Instruction::Trunc: 193 case Instruction::ZExt: 194 case Instruction::SExt: 195 // If the source type of the cast is the type we're trying for then we can 196 // just return the source. There's no need to insert it because it is not 197 // new. 198 if (I->getOperand(0)->getType() == Ty) 199 return I->getOperand(0); 200 201 // Otherwise, must be the same type of cast, so just reinsert a new one. 202 // This also handles the case of zext(trunc(x)) -> zext(x). 203 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 204 Opc == Instruction::SExt); 205 break; 206 case Instruction::Select: { 207 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 208 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 209 Res = SelectInst::Create(I->getOperand(0), True, False); 210 break; 211 } 212 case Instruction::PHI: { 213 PHINode *OPN = cast<PHINode>(I); 214 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 215 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 216 Value *V = 217 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 218 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 219 } 220 Res = NPN; 221 break; 222 } 223 default: 224 // TODO: Can handle more cases here. 225 llvm_unreachable("Unreachable!"); 226 } 227 228 Res->takeName(I); 229 return InsertNewInstWith(Res, *I); 230 } 231 232 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1, 233 const CastInst *CI2) { 234 Type *SrcTy = CI1->getSrcTy(); 235 Type *MidTy = CI1->getDestTy(); 236 Type *DstTy = CI2->getDestTy(); 237 238 Instruction::CastOps firstOp = CI1->getOpcode(); 239 Instruction::CastOps secondOp = CI2->getOpcode(); 240 Type *SrcIntPtrTy = 241 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 242 Type *MidIntPtrTy = 243 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 244 Type *DstIntPtrTy = 245 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 246 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 247 DstTy, SrcIntPtrTy, MidIntPtrTy, 248 DstIntPtrTy); 249 250 // We don't want to form an inttoptr or ptrtoint that converts to an integer 251 // type that differs from the pointer size. 252 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 253 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 254 Res = 0; 255 256 return Instruction::CastOps(Res); 257 } 258 259 /// @brief Implement the transforms common to all CastInst visitors. 260 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) { 261 Value *Src = CI.getOperand(0); 262 263 // Try to eliminate a cast of a cast. 264 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 265 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 266 // The first cast (CSrc) is eliminable so we need to fix up or replace 267 // the second cast (CI). CSrc will then have a good chance of being dead. 268 return CastInst::Create(NewOpc, CSrc->getOperand(0), CI.getType()); 269 } 270 } 271 272 // If we are casting a select, then fold the cast into the select. 273 if (auto *SI = dyn_cast<SelectInst>(Src)) 274 if (Instruction *NV = FoldOpIntoSelect(CI, SI)) 275 return NV; 276 277 // If we are casting a PHI, then fold the cast into the PHI. 278 if (auto *PN = dyn_cast<PHINode>(Src)) { 279 // Don't do this if it would create a PHI node with an illegal type from a 280 // legal type. 281 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 282 shouldChangeType(CI.getType(), Src->getType())) 283 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 284 return NV; 285 } 286 287 return nullptr; 288 } 289 290 /// Return true if we can evaluate the specified expression tree as type Ty 291 /// instead of its larger type, and arrive with the same value. 292 /// This is used by code that tries to eliminate truncates. 293 /// 294 /// Ty will always be a type smaller than V. We should return true if trunc(V) 295 /// can be computed by computing V in the smaller type. If V is an instruction, 296 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 297 /// makes sense if x and y can be efficiently truncated. 298 /// 299 /// This function works on both vectors and scalars. 300 /// 301 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC, 302 Instruction *CxtI) { 303 // We can always evaluate constants in another type. 304 if (isa<Constant>(V)) 305 return true; 306 307 Instruction *I = dyn_cast<Instruction>(V); 308 if (!I) return false; 309 310 Type *OrigTy = V->getType(); 311 312 // If this is an extension from the dest type, we can eliminate it, even if it 313 // has multiple uses. 314 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) && 315 I->getOperand(0)->getType() == Ty) 316 return true; 317 318 // We can't extend or shrink something that has multiple uses: doing so would 319 // require duplicating the instruction in general, which isn't profitable. 320 if (!I->hasOneUse()) return false; 321 322 unsigned Opc = I->getOpcode(); 323 switch (Opc) { 324 case Instruction::Add: 325 case Instruction::Sub: 326 case Instruction::Mul: 327 case Instruction::And: 328 case Instruction::Or: 329 case Instruction::Xor: 330 // These operators can all arbitrarily be extended or truncated. 331 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 332 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 333 334 case Instruction::UDiv: 335 case Instruction::URem: { 336 // UDiv and URem can be truncated if all the truncated bits are zero. 337 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 338 uint32_t BitWidth = Ty->getScalarSizeInBits(); 339 if (BitWidth < OrigBitWidth) { 340 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); 341 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 342 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 343 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 344 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 345 } 346 } 347 break; 348 } 349 case Instruction::Shl: 350 // If we are truncating the result of this SHL, and if it's a shift of a 351 // constant amount, we can always perform a SHL in a smaller type. 352 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 353 uint32_t BitWidth = Ty->getScalarSizeInBits(); 354 if (CI->getLimitedValue(BitWidth) < BitWidth) 355 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 356 } 357 break; 358 case Instruction::LShr: 359 // If this is a truncate of a logical shr, we can truncate it to a smaller 360 // lshr iff we know that the bits we would otherwise be shifting in are 361 // already zeros. 362 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 363 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 364 uint32_t BitWidth = Ty->getScalarSizeInBits(); 365 if (IC.MaskedValueIsZero(I->getOperand(0), 366 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth), 0, CxtI) && 367 CI->getLimitedValue(BitWidth) < BitWidth) { 368 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 369 } 370 } 371 break; 372 case Instruction::Trunc: 373 // trunc(trunc(x)) -> trunc(x) 374 return true; 375 case Instruction::ZExt: 376 case Instruction::SExt: 377 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 378 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 379 return true; 380 case Instruction::Select: { 381 SelectInst *SI = cast<SelectInst>(I); 382 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 383 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 384 } 385 case Instruction::PHI: { 386 // We can change a phi if we can change all operands. Note that we never 387 // get into trouble with cyclic PHIs here because we only consider 388 // instructions with a single use. 389 PHINode *PN = cast<PHINode>(I); 390 for (Value *IncValue : PN->incoming_values()) 391 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 392 return false; 393 return true; 394 } 395 default: 396 // TODO: Can handle more cases here. 397 break; 398 } 399 400 return false; 401 } 402 403 /// Given a vector that is bitcast to an integer, optionally logically 404 /// right-shifted, and truncated, convert it to an extractelement. 405 /// Example (big endian): 406 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 407 /// ---> 408 /// extractelement <4 x i32> %X, 1 409 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) { 410 Value *TruncOp = Trunc.getOperand(0); 411 Type *DestType = Trunc.getType(); 412 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 413 return nullptr; 414 415 Value *VecInput = nullptr; 416 ConstantInt *ShiftVal = nullptr; 417 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 418 m_LShr(m_BitCast(m_Value(VecInput)), 419 m_ConstantInt(ShiftVal)))) || 420 !isa<VectorType>(VecInput->getType())) 421 return nullptr; 422 423 VectorType *VecType = cast<VectorType>(VecInput->getType()); 424 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 425 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 426 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 427 428 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 429 return nullptr; 430 431 // If the element type of the vector doesn't match the result type, 432 // bitcast it to a vector type that we can extract from. 433 unsigned NumVecElts = VecWidth / DestWidth; 434 if (VecType->getElementType() != DestType) { 435 VecType = VectorType::get(DestType, NumVecElts); 436 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 437 } 438 439 unsigned Elt = ShiftAmount / DestWidth; 440 if (IC.getDataLayout().isBigEndian()) 441 Elt = NumVecElts - 1 - Elt; 442 443 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 444 } 445 446 /// Try to narrow the width of bitwise logic instructions with constants. 447 Instruction *InstCombiner::shrinkBitwiseLogic(TruncInst &Trunc) { 448 Type *SrcTy = Trunc.getSrcTy(); 449 Type *DestTy = Trunc.getType(); 450 if (isa<IntegerType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 451 return nullptr; 452 453 BinaryOperator *LogicOp; 454 Constant *C; 455 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(LogicOp))) || 456 !LogicOp->isBitwiseLogicOp() || 457 !match(LogicOp->getOperand(1), m_Constant(C))) 458 return nullptr; 459 460 // trunc (logic X, C) --> logic (trunc X, C') 461 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 462 Value *NarrowOp0 = Builder.CreateTrunc(LogicOp->getOperand(0), DestTy); 463 return BinaryOperator::Create(LogicOp->getOpcode(), NarrowOp0, NarrowC); 464 } 465 466 /// Try to narrow the width of a splat shuffle. This could be generalized to any 467 /// shuffle with a constant operand, but we limit the transform to avoid 468 /// creating a shuffle type that targets may not be able to lower effectively. 469 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 470 InstCombiner::BuilderTy &Builder) { 471 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 472 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 473 Shuf->getMask()->getSplatValue() && 474 Shuf->getType() == Shuf->getOperand(0)->getType()) { 475 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 476 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 477 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 478 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask()); 479 } 480 481 return nullptr; 482 } 483 484 /// Try to narrow the width of an insert element. This could be generalized for 485 /// any vector constant, but we limit the transform to insertion into undef to 486 /// avoid potential backend problems from unsupported insertion widths. This 487 /// could also be extended to handle the case of inserting a scalar constant 488 /// into a vector variable. 489 static Instruction *shrinkInsertElt(CastInst &Trunc, 490 InstCombiner::BuilderTy &Builder) { 491 Instruction::CastOps Opcode = Trunc.getOpcode(); 492 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 493 "Unexpected instruction for shrinking"); 494 495 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 496 if (!InsElt || !InsElt->hasOneUse()) 497 return nullptr; 498 499 Type *DestTy = Trunc.getType(); 500 Type *DestScalarTy = DestTy->getScalarType(); 501 Value *VecOp = InsElt->getOperand(0); 502 Value *ScalarOp = InsElt->getOperand(1); 503 Value *Index = InsElt->getOperand(2); 504 505 if (isa<UndefValue>(VecOp)) { 506 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 507 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 508 UndefValue *NarrowUndef = UndefValue::get(DestTy); 509 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 510 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 511 } 512 513 return nullptr; 514 } 515 516 Instruction *InstCombiner::visitTrunc(TruncInst &CI) { 517 if (Instruction *Result = commonCastTransforms(CI)) 518 return Result; 519 520 // Test if the trunc is the user of a select which is part of a 521 // minimum or maximum operation. If so, don't do any more simplification. 522 // Even simplifying demanded bits can break the canonical form of a 523 // min/max. 524 Value *LHS, *RHS; 525 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0))) 526 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN) 527 return nullptr; 528 529 // See if we can simplify any instructions used by the input whose sole 530 // purpose is to compute bits we don't care about. 531 if (SimplifyDemandedInstructionBits(CI)) 532 return &CI; 533 534 Value *Src = CI.getOperand(0); 535 Type *DestTy = CI.getType(), *SrcTy = Src->getType(); 536 537 // Attempt to truncate the entire input expression tree to the destination 538 // type. Only do this if the dest type is a simple type, don't convert the 539 // expression tree to something weird like i93 unless the source is also 540 // strange. 541 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 542 canEvaluateTruncated(Src, DestTy, *this, &CI)) { 543 544 // If this cast is a truncate, evaluting in a different type always 545 // eliminates the cast, so it is always a win. 546 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 547 " to avoid cast: " << CI << '\n'); 548 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 549 assert(Res->getType() == DestTy); 550 return replaceInstUsesWith(CI, Res); 551 } 552 553 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector. 554 if (DestTy->getScalarSizeInBits() == 1) { 555 Constant *One = ConstantInt::get(SrcTy, 1); 556 Src = Builder.CreateAnd(Src, One); 557 Value *Zero = Constant::getNullValue(Src->getType()); 558 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero); 559 } 560 561 // FIXME: Maybe combine the next two transforms to handle the no cast case 562 // more efficiently. Support vector types. Cleanup code by using m_OneUse. 563 564 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion. 565 Value *A = nullptr; ConstantInt *Cst = nullptr; 566 if (Src->hasOneUse() && 567 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) { 568 // We have three types to worry about here, the type of A, the source of 569 // the truncate (MidSize), and the destination of the truncate. We know that 570 // ASize < MidSize and MidSize > ResultSize, but don't know the relation 571 // between ASize and ResultSize. 572 unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 573 574 // If the shift amount is larger than the size of A, then the result is 575 // known to be zero because all the input bits got shifted out. 576 if (Cst->getZExtValue() >= ASize) 577 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy)); 578 579 // Since we're doing an lshr and a zero extend, and know that the shift 580 // amount is smaller than ASize, it is always safe to do the shift in A's 581 // type, then zero extend or truncate to the result. 582 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue()); 583 Shift->takeName(Src); 584 return CastInst::CreateIntegerCast(Shift, DestTy, false); 585 } 586 587 // FIXME: We should canonicalize to zext/trunc and remove this transform. 588 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type 589 // conversion. 590 // It works because bits coming from sign extension have the same value as 591 // the sign bit of the original value; performing ashr instead of lshr 592 // generates bits of the same value as the sign bit. 593 if (Src->hasOneUse() && 594 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) { 595 Value *SExt = cast<Instruction>(Src)->getOperand(0); 596 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits(); 597 const unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 598 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits(); 599 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize); 600 unsigned ShiftAmt = Cst->getZExtValue(); 601 602 // This optimization can be only performed when zero bits generated by 603 // the original lshr aren't pulled into the value after truncation, so we 604 // can only shift by values no larger than the number of extension bits. 605 // FIXME: Instead of bailing when the shift is too large, use and to clear 606 // the extra bits. 607 if (ShiftAmt <= MaxAmt) { 608 if (CISize == ASize) 609 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(), 610 std::min(ShiftAmt, ASize - 1))); 611 if (SExt->hasOneUse()) { 612 Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1)); 613 Shift->takeName(Src); 614 return CastInst::CreateIntegerCast(Shift, CI.getType(), true); 615 } 616 } 617 } 618 619 if (Instruction *I = shrinkBitwiseLogic(CI)) 620 return I; 621 622 if (Instruction *I = shrinkSplatShuffle(CI, Builder)) 623 return I; 624 625 if (Instruction *I = shrinkInsertElt(CI, Builder)) 626 return I; 627 628 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && 629 shouldChangeType(SrcTy, DestTy)) { 630 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 631 // dest type is native and cst < dest size. 632 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) && 633 !match(A, m_Shr(m_Value(), m_Constant()))) { 634 // Skip shifts of shift by constants. It undoes a combine in 635 // FoldShiftByConstant and is the extend in reg pattern. 636 const unsigned DestSize = DestTy->getScalarSizeInBits(); 637 if (Cst->getValue().ult(DestSize)) { 638 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 639 640 return BinaryOperator::Create( 641 Instruction::Shl, NewTrunc, 642 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize))); 643 } 644 } 645 } 646 647 if (Instruction *I = foldVecTruncToExtElt(CI, *this)) 648 return I; 649 650 return nullptr; 651 } 652 653 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 654 bool DoTransform) { 655 // If we are just checking for a icmp eq of a single bit and zext'ing it 656 // to an integer, then shift the bit to the appropriate place and then 657 // cast to integer to avoid the comparison. 658 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) { 659 const APInt &Op1CV = Op1C->getValue(); 660 661 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 662 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 663 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV.isNullValue()) || 664 (ICI->getPredicate() == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) { 665 if (!DoTransform) return ICI; 666 667 Value *In = ICI->getOperand(0); 668 Value *Sh = ConstantInt::get(In->getType(), 669 In->getType()->getScalarSizeInBits() - 1); 670 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 671 if (In->getType() != CI.getType()) 672 In = Builder.CreateIntCast(In, CI.getType(), false /*ZExt*/); 673 674 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) { 675 Constant *One = ConstantInt::get(In->getType(), 1); 676 In = Builder.CreateXor(In, One, In->getName() + ".not"); 677 } 678 679 return replaceInstUsesWith(CI, In); 680 } 681 682 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 683 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 684 // zext (X == 1) to i32 --> X iff X has only the low bit set. 685 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 686 // zext (X != 0) to i32 --> X iff X has only the low bit set. 687 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 688 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 689 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 690 if ((Op1CV.isNullValue() || Op1CV.isPowerOf2()) && 691 // This only works for EQ and NE 692 ICI->isEquality()) { 693 // If Op1C some other power of two, convert: 694 KnownBits Known = computeKnownBits(ICI->getOperand(0), 0, &CI); 695 696 APInt KnownZeroMask(~Known.Zero); 697 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 698 if (!DoTransform) return ICI; 699 700 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE; 701 if (!Op1CV.isNullValue() && (Op1CV != KnownZeroMask)) { 702 // (X&4) == 2 --> false 703 // (X&4) != 2 --> true 704 Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()), 705 isNE); 706 Res = ConstantExpr::getZExt(Res, CI.getType()); 707 return replaceInstUsesWith(CI, Res); 708 } 709 710 uint32_t ShAmt = KnownZeroMask.logBase2(); 711 Value *In = ICI->getOperand(0); 712 if (ShAmt) { 713 // Perform a logical shr by shiftamt. 714 // Insert the shift to put the result in the low bit. 715 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 716 In->getName() + ".lobit"); 717 } 718 719 if (!Op1CV.isNullValue() == isNE) { // Toggle the low bit. 720 Constant *One = ConstantInt::get(In->getType(), 1); 721 In = Builder.CreateXor(In, One); 722 } 723 724 if (CI.getType() == In->getType()) 725 return replaceInstUsesWith(CI, In); 726 727 Value *IntCast = Builder.CreateIntCast(In, CI.getType(), false); 728 return replaceInstUsesWith(CI, IntCast); 729 } 730 } 731 } 732 733 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 734 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 735 // may lead to additional simplifications. 736 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) { 737 if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) { 738 Value *LHS = ICI->getOperand(0); 739 Value *RHS = ICI->getOperand(1); 740 741 KnownBits KnownLHS = computeKnownBits(LHS, 0, &CI); 742 KnownBits KnownRHS = computeKnownBits(RHS, 0, &CI); 743 744 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 745 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 746 APInt UnknownBit = ~KnownBits; 747 if (UnknownBit.countPopulation() == 1) { 748 if (!DoTransform) return ICI; 749 750 Value *Result = Builder.CreateXor(LHS, RHS); 751 752 // Mask off any bits that are set and won't be shifted away. 753 if (KnownLHS.One.uge(UnknownBit)) 754 Result = Builder.CreateAnd(Result, 755 ConstantInt::get(ITy, UnknownBit)); 756 757 // Shift the bit we're testing down to the lsb. 758 Result = Builder.CreateLShr( 759 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 760 761 if (ICI->getPredicate() == ICmpInst::ICMP_EQ) 762 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 763 Result->takeName(ICI); 764 return replaceInstUsesWith(CI, Result); 765 } 766 } 767 } 768 } 769 770 return nullptr; 771 } 772 773 /// Determine if the specified value can be computed in the specified wider type 774 /// and produce the same low bits. If not, return false. 775 /// 776 /// If this function returns true, it can also return a non-zero number of bits 777 /// (in BitsToClear) which indicates that the value it computes is correct for 778 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 779 /// out. For example, to promote something like: 780 /// 781 /// %B = trunc i64 %A to i32 782 /// %C = lshr i32 %B, 8 783 /// %E = zext i32 %C to i64 784 /// 785 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 786 /// set to 8 to indicate that the promoted value needs to have bits 24-31 787 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 788 /// clear the top bits anyway, doing this has no extra cost. 789 /// 790 /// This function works on both vectors and scalars. 791 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 792 InstCombiner &IC, Instruction *CxtI) { 793 BitsToClear = 0; 794 if (isa<Constant>(V)) 795 return true; 796 797 Instruction *I = dyn_cast<Instruction>(V); 798 if (!I) return false; 799 800 // If the input is a truncate from the destination type, we can trivially 801 // eliminate it. 802 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty) 803 return true; 804 805 // We can't extend or shrink something that has multiple uses: doing so would 806 // require duplicating the instruction in general, which isn't profitable. 807 if (!I->hasOneUse()) return false; 808 809 unsigned Opc = I->getOpcode(), Tmp; 810 switch (Opc) { 811 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 812 case Instruction::SExt: // zext(sext(x)) -> sext(x). 813 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 814 return true; 815 case Instruction::And: 816 case Instruction::Or: 817 case Instruction::Xor: 818 case Instruction::Add: 819 case Instruction::Sub: 820 case Instruction::Mul: 821 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 822 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 823 return false; 824 // These can all be promoted if neither operand has 'bits to clear'. 825 if (BitsToClear == 0 && Tmp == 0) 826 return true; 827 828 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 829 // other side, BitsToClear is ok. 830 if (Tmp == 0 && I->isBitwiseLogicOp()) { 831 // We use MaskedValueIsZero here for generality, but the case we care 832 // about the most is constant RHS. 833 unsigned VSize = V->getType()->getScalarSizeInBits(); 834 if (IC.MaskedValueIsZero(I->getOperand(1), 835 APInt::getHighBitsSet(VSize, BitsToClear), 836 0, CxtI)) 837 return true; 838 } 839 840 // Otherwise, we don't know how to analyze this BitsToClear case yet. 841 return false; 842 843 case Instruction::Shl: 844 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 845 // upper bits we can reduce BitsToClear by the shift amount. 846 if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) { 847 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 848 return false; 849 uint64_t ShiftAmt = Amt->getZExtValue(); 850 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 851 return true; 852 } 853 return false; 854 case Instruction::LShr: 855 // We can promote lshr(x, cst) if we can promote x. This requires the 856 // ultimate 'and' to clear out the high zero bits we're clearing out though. 857 if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) { 858 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 859 return false; 860 BitsToClear += Amt->getZExtValue(); 861 if (BitsToClear > V->getType()->getScalarSizeInBits()) 862 BitsToClear = V->getType()->getScalarSizeInBits(); 863 return true; 864 } 865 // Cannot promote variable LSHR. 866 return false; 867 case Instruction::Select: 868 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 869 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 870 // TODO: If important, we could handle the case when the BitsToClear are 871 // known zero in the disagreeing side. 872 Tmp != BitsToClear) 873 return false; 874 return true; 875 876 case Instruction::PHI: { 877 // We can change a phi if we can change all operands. Note that we never 878 // get into trouble with cyclic PHIs here because we only consider 879 // instructions with a single use. 880 PHINode *PN = cast<PHINode>(I); 881 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 882 return false; 883 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 884 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 885 // TODO: If important, we could handle the case when the BitsToClear 886 // are known zero in the disagreeing input. 887 Tmp != BitsToClear) 888 return false; 889 return true; 890 } 891 default: 892 // TODO: Can handle more cases here. 893 return false; 894 } 895 } 896 897 Instruction *InstCombiner::visitZExt(ZExtInst &CI) { 898 // If this zero extend is only used by a truncate, let the truncate be 899 // eliminated before we try to optimize this zext. 900 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 901 return nullptr; 902 903 // If one of the common conversion will work, do it. 904 if (Instruction *Result = commonCastTransforms(CI)) 905 return Result; 906 907 Value *Src = CI.getOperand(0); 908 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 909 910 // Attempt to extend the entire input expression tree to the destination 911 // type. Only do this if the dest type is a simple type, don't convert the 912 // expression tree to something weird like i93 unless the source is also 913 // strange. 914 unsigned BitsToClear; 915 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 916 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 917 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 918 "Can't clear more bits than in SrcTy"); 919 920 // Okay, we can transform this! Insert the new expression now. 921 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 922 " to avoid zero extend: " << CI << '\n'); 923 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 924 assert(Res->getType() == DestTy); 925 926 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 927 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 928 929 // If the high bits are already filled with zeros, just replace this 930 // cast with the result. 931 if (MaskedValueIsZero(Res, 932 APInt::getHighBitsSet(DestBitSize, 933 DestBitSize-SrcBitsKept), 934 0, &CI)) 935 return replaceInstUsesWith(CI, Res); 936 937 // We need to emit an AND to clear the high bits. 938 Constant *C = ConstantInt::get(Res->getType(), 939 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 940 return BinaryOperator::CreateAnd(Res, C); 941 } 942 943 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 944 // types and if the sizes are just right we can convert this into a logical 945 // 'and' which will be much cheaper than the pair of casts. 946 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 947 // TODO: Subsume this into EvaluateInDifferentType. 948 949 // Get the sizes of the types involved. We know that the intermediate type 950 // will be smaller than A or C, but don't know the relation between A and C. 951 Value *A = CSrc->getOperand(0); 952 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 953 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 954 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 955 // If we're actually extending zero bits, then if 956 // SrcSize < DstSize: zext(a & mask) 957 // SrcSize == DstSize: a & mask 958 // SrcSize > DstSize: trunc(a) & mask 959 if (SrcSize < DstSize) { 960 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 961 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 962 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 963 return new ZExtInst(And, CI.getType()); 964 } 965 966 if (SrcSize == DstSize) { 967 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 968 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 969 AndValue)); 970 } 971 if (SrcSize > DstSize) { 972 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 973 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 974 return BinaryOperator::CreateAnd(Trunc, 975 ConstantInt::get(Trunc->getType(), 976 AndValue)); 977 } 978 } 979 980 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 981 return transformZExtICmp(ICI, CI); 982 983 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 984 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 985 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 986 // of the (zext icmp) can be eliminated. If so, immediately perform the 987 // according elimination. 988 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 989 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 990 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 991 (transformZExtICmp(LHS, CI, false) || 992 transformZExtICmp(RHS, CI, false))) { 993 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 994 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 995 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 996 BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast); 997 998 // Perform the elimination. 999 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1000 transformZExtICmp(LHS, *LZExt); 1001 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1002 transformZExtICmp(RHS, *RZExt); 1003 1004 return Or; 1005 } 1006 } 1007 1008 // zext(trunc(X) & C) -> (X & zext(C)). 1009 Constant *C; 1010 Value *X; 1011 if (SrcI && 1012 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1013 X->getType() == CI.getType()) 1014 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1015 1016 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1017 Value *And; 1018 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1019 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1020 X->getType() == CI.getType()) { 1021 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1022 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1023 } 1024 1025 return nullptr; 1026 } 1027 1028 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1029 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) { 1030 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1031 ICmpInst::Predicate Pred = ICI->getPredicate(); 1032 1033 // Don't bother if Op1 isn't of vector or integer type. 1034 if (!Op1->getType()->isIntOrIntVectorTy()) 1035 return nullptr; 1036 1037 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 1038 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1039 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1040 if ((Pred == ICmpInst::ICMP_SLT && Op1C->isNullValue()) || 1041 (Pred == ICmpInst::ICMP_SGT && Op1C->isAllOnesValue())) { 1042 1043 Value *Sh = ConstantInt::get(Op0->getType(), 1044 Op0->getType()->getScalarSizeInBits()-1); 1045 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1046 if (In->getType() != CI.getType()) 1047 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1048 1049 if (Pred == ICmpInst::ICMP_SGT) 1050 In = Builder.CreateNot(In, In->getName() + ".not"); 1051 return replaceInstUsesWith(CI, In); 1052 } 1053 } 1054 1055 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1056 // If we know that only one bit of the LHS of the icmp can be set and we 1057 // have an equality comparison with zero or a power of 2, we can transform 1058 // the icmp and sext into bitwise/integer operations. 1059 if (ICI->hasOneUse() && 1060 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1061 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1062 1063 APInt KnownZeroMask(~Known.Zero); 1064 if (KnownZeroMask.isPowerOf2()) { 1065 Value *In = ICI->getOperand(0); 1066 1067 // If the icmp tests for a known zero bit we can constant fold it. 1068 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1069 Value *V = Pred == ICmpInst::ICMP_NE ? 1070 ConstantInt::getAllOnesValue(CI.getType()) : 1071 ConstantInt::getNullValue(CI.getType()); 1072 return replaceInstUsesWith(CI, V); 1073 } 1074 1075 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1076 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1077 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1078 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1079 // Perform a right shift to place the desired bit in the LSB. 1080 if (ShiftAmt) 1081 In = Builder.CreateLShr(In, 1082 ConstantInt::get(In->getType(), ShiftAmt)); 1083 1084 // At this point "In" is either 1 or 0. Subtract 1 to turn 1085 // {1, 0} -> {0, -1}. 1086 In = Builder.CreateAdd(In, 1087 ConstantInt::getAllOnesValue(In->getType()), 1088 "sext"); 1089 } else { 1090 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1091 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1092 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1093 // Perform a left shift to place the desired bit in the MSB. 1094 if (ShiftAmt) 1095 In = Builder.CreateShl(In, 1096 ConstantInt::get(In->getType(), ShiftAmt)); 1097 1098 // Distribute the bit over the whole bit width. 1099 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1100 KnownZeroMask.getBitWidth() - 1), "sext"); 1101 } 1102 1103 if (CI.getType() == In->getType()) 1104 return replaceInstUsesWith(CI, In); 1105 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1106 } 1107 } 1108 } 1109 1110 return nullptr; 1111 } 1112 1113 /// Return true if we can take the specified value and return it as type Ty 1114 /// without inserting any new casts and without changing the value of the common 1115 /// low bits. This is used by code that tries to promote integer operations to 1116 /// a wider types will allow us to eliminate the extension. 1117 /// 1118 /// This function works on both vectors and scalars. 1119 /// 1120 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1121 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1122 "Can't sign extend type to a smaller type"); 1123 // If this is a constant, it can be trivially promoted. 1124 if (isa<Constant>(V)) 1125 return true; 1126 1127 Instruction *I = dyn_cast<Instruction>(V); 1128 if (!I) return false; 1129 1130 // If this is a truncate from the dest type, we can trivially eliminate it. 1131 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty) 1132 return true; 1133 1134 // We can't extend or shrink something that has multiple uses: doing so would 1135 // require duplicating the instruction in general, which isn't profitable. 1136 if (!I->hasOneUse()) return false; 1137 1138 switch (I->getOpcode()) { 1139 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1140 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1141 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1142 return true; 1143 case Instruction::And: 1144 case Instruction::Or: 1145 case Instruction::Xor: 1146 case Instruction::Add: 1147 case Instruction::Sub: 1148 case Instruction::Mul: 1149 // These operators can all arbitrarily be extended if their inputs can. 1150 return canEvaluateSExtd(I->getOperand(0), Ty) && 1151 canEvaluateSExtd(I->getOperand(1), Ty); 1152 1153 //case Instruction::Shl: TODO 1154 //case Instruction::LShr: TODO 1155 1156 case Instruction::Select: 1157 return canEvaluateSExtd(I->getOperand(1), Ty) && 1158 canEvaluateSExtd(I->getOperand(2), Ty); 1159 1160 case Instruction::PHI: { 1161 // We can change a phi if we can change all operands. Note that we never 1162 // get into trouble with cyclic PHIs here because we only consider 1163 // instructions with a single use. 1164 PHINode *PN = cast<PHINode>(I); 1165 for (Value *IncValue : PN->incoming_values()) 1166 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1167 return true; 1168 } 1169 default: 1170 // TODO: Can handle more cases here. 1171 break; 1172 } 1173 1174 return false; 1175 } 1176 1177 Instruction *InstCombiner::visitSExt(SExtInst &CI) { 1178 // If this sign extend is only used by a truncate, let the truncate be 1179 // eliminated before we try to optimize this sext. 1180 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1181 return nullptr; 1182 1183 if (Instruction *I = commonCastTransforms(CI)) 1184 return I; 1185 1186 Value *Src = CI.getOperand(0); 1187 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1188 1189 // If we know that the value being extended is positive, we can use a zext 1190 // instead. 1191 KnownBits Known = computeKnownBits(Src, 0, &CI); 1192 if (Known.isNonNegative()) { 1193 Value *ZExt = Builder.CreateZExt(Src, DestTy); 1194 return replaceInstUsesWith(CI, ZExt); 1195 } 1196 1197 // Attempt to extend the entire input expression tree to the destination 1198 // type. Only do this if the dest type is a simple type, don't convert the 1199 // expression tree to something weird like i93 unless the source is also 1200 // strange. 1201 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 1202 canEvaluateSExtd(Src, DestTy)) { 1203 // Okay, we can transform this! Insert the new expression now. 1204 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1205 " to avoid sign extend: " << CI << '\n'); 1206 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1207 assert(Res->getType() == DestTy); 1208 1209 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1210 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1211 1212 // If the high bits are already filled with sign bit, just replace this 1213 // cast with the result. 1214 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1215 return replaceInstUsesWith(CI, Res); 1216 1217 // We need to emit a shl + ashr to do the sign extend. 1218 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1219 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1220 ShAmt); 1221 } 1222 1223 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1224 // into shifts. 1225 Value *X; 1226 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1227 // sext(trunc(X)) --> ashr(shl(X, C), C) 1228 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1229 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1230 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1231 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1232 } 1233 1234 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1235 return transformSExtICmp(ICI, CI); 1236 1237 // If the input is a shl/ashr pair of a same constant, then this is a sign 1238 // extension from a smaller value. If we could trust arbitrary bitwidth 1239 // integers, we could turn this into a truncate to the smaller bit and then 1240 // use a sext for the whole extension. Since we don't, look deeper and check 1241 // for a truncate. If the source and dest are the same type, eliminate the 1242 // trunc and extend and just do shifts. For example, turn: 1243 // %a = trunc i32 %i to i8 1244 // %b = shl i8 %a, 6 1245 // %c = ashr i8 %b, 6 1246 // %d = sext i8 %c to i32 1247 // into: 1248 // %a = shl i32 %i, 30 1249 // %d = ashr i32 %a, 30 1250 Value *A = nullptr; 1251 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1252 ConstantInt *BA = nullptr, *CA = nullptr; 1253 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)), 1254 m_ConstantInt(CA))) && 1255 BA == CA && A->getType() == CI.getType()) { 1256 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1257 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1258 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; 1259 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); 1260 A = Builder.CreateShl(A, ShAmtV, CI.getName()); 1261 return BinaryOperator::CreateAShr(A, ShAmtV); 1262 } 1263 1264 return nullptr; 1265 } 1266 1267 1268 /// Return a Constant* for the specified floating-point constant if it fits 1269 /// in the specified FP type without changing its value. 1270 static Constant *fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1271 bool losesInfo; 1272 APFloat F = CFP->getValueAPF(); 1273 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1274 if (!losesInfo) 1275 return ConstantFP::get(CFP->getContext(), F); 1276 return nullptr; 1277 } 1278 1279 /// Look through floating-point extensions until we get the source value. 1280 static Value *lookThroughFPExtensions(Value *V) { 1281 while (auto *FPExt = dyn_cast<FPExtInst>(V)) 1282 V = FPExt->getOperand(0); 1283 1284 // If this value is a constant, return the constant in the smallest FP type 1285 // that can accurately represent it. This allows us to turn 1286 // (float)((double)X+2.0) into x+2.0f. 1287 if (auto *CFP = dyn_cast<ConstantFP>(V)) { 1288 if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext())) 1289 return V; // No constant folding of this. 1290 // See if the value can be truncated to half and then reextended. 1291 if (Value *V = fitsInFPType(CFP, APFloat::IEEEhalf())) 1292 return V; 1293 // See if the value can be truncated to float and then reextended. 1294 if (Value *V = fitsInFPType(CFP, APFloat::IEEEsingle())) 1295 return V; 1296 if (CFP->getType()->isDoubleTy()) 1297 return V; // Won't shrink. 1298 if (Value *V = fitsInFPType(CFP, APFloat::IEEEdouble())) 1299 return V; 1300 // Don't try to shrink to various long double types. 1301 } 1302 1303 return V; 1304 } 1305 1306 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) { 1307 if (Instruction *I = commonCastTransforms(CI)) 1308 return I; 1309 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1310 // simplify this expression to avoid one or more of the trunc/extend 1311 // operations if we can do so without changing the numerical results. 1312 // 1313 // The exact manner in which the widths of the operands interact to limit 1314 // what we can and cannot do safely varies from operation to operation, and 1315 // is explained below in the various case statements. 1316 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0)); 1317 if (OpI && OpI->hasOneUse()) { 1318 Value *LHSOrig = lookThroughFPExtensions(OpI->getOperand(0)); 1319 Value *RHSOrig = lookThroughFPExtensions(OpI->getOperand(1)); 1320 unsigned OpWidth = OpI->getType()->getFPMantissaWidth(); 1321 unsigned LHSWidth = LHSOrig->getType()->getFPMantissaWidth(); 1322 unsigned RHSWidth = RHSOrig->getType()->getFPMantissaWidth(); 1323 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1324 unsigned DstWidth = CI.getType()->getFPMantissaWidth(); 1325 switch (OpI->getOpcode()) { 1326 default: break; 1327 case Instruction::FAdd: 1328 case Instruction::FSub: 1329 // For addition and subtraction, the infinitely precise result can 1330 // essentially be arbitrarily wide; proving that double rounding 1331 // will not occur because the result of OpI is exact (as we will for 1332 // FMul, for example) is hopeless. However, we *can* nonetheless 1333 // frequently know that double rounding cannot occur (or that it is 1334 // innocuous) by taking advantage of the specific structure of 1335 // infinitely-precise results that admit double rounding. 1336 // 1337 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1338 // to represent both sources, we can guarantee that the double 1339 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1340 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1341 // for proof of this fact). 1342 // 1343 // Note: Figueroa does not consider the case where DstFormat != 1344 // SrcFormat. It's possible (likely even!) that this analysis 1345 // could be tightened for those cases, but they are rare (the main 1346 // case of interest here is (float)((double)float + float)). 1347 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1348 if (LHSOrig->getType() != CI.getType()) 1349 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType()); 1350 if (RHSOrig->getType() != CI.getType()) 1351 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType()); 1352 Instruction *RI = 1353 BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig); 1354 RI->copyFastMathFlags(OpI); 1355 return RI; 1356 } 1357 break; 1358 case Instruction::FMul: 1359 // For multiplication, the infinitely precise result has at most 1360 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1361 // that such a value can be exactly represented, then no double 1362 // rounding can possibly occur; we can safely perform the operation 1363 // in the destination format if it can represent both sources. 1364 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1365 if (LHSOrig->getType() != CI.getType()) 1366 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType()); 1367 if (RHSOrig->getType() != CI.getType()) 1368 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType()); 1369 Instruction *RI = 1370 BinaryOperator::CreateFMul(LHSOrig, RHSOrig); 1371 RI->copyFastMathFlags(OpI); 1372 return RI; 1373 } 1374 break; 1375 case Instruction::FDiv: 1376 // For division, we use again use the bound from Figueroa's 1377 // dissertation. I am entirely certain that this bound can be 1378 // tightened in the unbalanced operand case by an analysis based on 1379 // the diophantine rational approximation bound, but the well-known 1380 // condition used here is a good conservative first pass. 1381 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1382 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1383 if (LHSOrig->getType() != CI.getType()) 1384 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType()); 1385 if (RHSOrig->getType() != CI.getType()) 1386 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType()); 1387 Instruction *RI = 1388 BinaryOperator::CreateFDiv(LHSOrig, RHSOrig); 1389 RI->copyFastMathFlags(OpI); 1390 return RI; 1391 } 1392 break; 1393 case Instruction::FRem: 1394 // Remainder is straightforward. Remainder is always exact, so the 1395 // type of OpI doesn't enter into things at all. We simply evaluate 1396 // in whichever source type is larger, then convert to the 1397 // destination type. 1398 if (SrcWidth == OpWidth) 1399 break; 1400 if (LHSWidth < SrcWidth) 1401 LHSOrig = Builder.CreateFPExt(LHSOrig, RHSOrig->getType()); 1402 else if (RHSWidth <= SrcWidth) 1403 RHSOrig = Builder.CreateFPExt(RHSOrig, LHSOrig->getType()); 1404 if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) { 1405 Value *ExactResult = Builder.CreateFRem(LHSOrig, RHSOrig); 1406 if (Instruction *RI = dyn_cast<Instruction>(ExactResult)) 1407 RI->copyFastMathFlags(OpI); 1408 return CastInst::CreateFPCast(ExactResult, CI.getType()); 1409 } 1410 } 1411 1412 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1413 if (BinaryOperator::isFNeg(OpI)) { 1414 Value *InnerTrunc = Builder.CreateFPTrunc(OpI->getOperand(1), 1415 CI.getType()); 1416 Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc); 1417 RI->copyFastMathFlags(OpI); 1418 return RI; 1419 } 1420 } 1421 1422 // (fptrunc (select cond, R1, Cst)) --> 1423 // (select cond, (fptrunc R1), (fptrunc Cst)) 1424 // 1425 // - but only if this isn't part of a min/max operation, else we'll 1426 // ruin min/max canonical form which is to have the select and 1427 // compare's operands be of the same type with no casts to look through. 1428 Value *LHS, *RHS; 1429 SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0)); 1430 if (SI && 1431 (isa<ConstantFP>(SI->getOperand(1)) || 1432 isa<ConstantFP>(SI->getOperand(2))) && 1433 matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) { 1434 Value *LHSTrunc = Builder.CreateFPTrunc(SI->getOperand(1), CI.getType()); 1435 Value *RHSTrunc = Builder.CreateFPTrunc(SI->getOperand(2), CI.getType()); 1436 return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc); 1437 } 1438 1439 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI.getOperand(0)); 1440 if (II) { 1441 switch (II->getIntrinsicID()) { 1442 default: break; 1443 case Intrinsic::fabs: 1444 case Intrinsic::ceil: 1445 case Intrinsic::floor: 1446 case Intrinsic::rint: 1447 case Intrinsic::round: 1448 case Intrinsic::nearbyint: 1449 case Intrinsic::trunc: { 1450 Value *Src = II->getArgOperand(0); 1451 if (!Src->hasOneUse()) 1452 break; 1453 1454 // Except for fabs, this transformation requires the input of the unary FP 1455 // operation to be itself an fpext from the type to which we're 1456 // truncating. 1457 if (II->getIntrinsicID() != Intrinsic::fabs) { 1458 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1459 if (!FPExtSrc || FPExtSrc->getOperand(0)->getType() != CI.getType()) 1460 break; 1461 } 1462 1463 // Do unary FP operation on smaller type. 1464 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1465 Value *InnerTrunc = Builder.CreateFPTrunc(Src, CI.getType()); 1466 Type *IntrinsicType[] = { CI.getType() }; 1467 Function *Overload = Intrinsic::getDeclaration( 1468 CI.getModule(), II->getIntrinsicID(), IntrinsicType); 1469 1470 SmallVector<OperandBundleDef, 1> OpBundles; 1471 II->getOperandBundlesAsDefs(OpBundles); 1472 1473 Value *Args[] = { InnerTrunc }; 1474 CallInst *NewCI = CallInst::Create(Overload, Args, 1475 OpBundles, II->getName()); 1476 NewCI->copyFastMathFlags(II); 1477 return NewCI; 1478 } 1479 } 1480 } 1481 1482 if (Instruction *I = shrinkInsertElt(CI, Builder)) 1483 return I; 1484 1485 return nullptr; 1486 } 1487 1488 Instruction *InstCombiner::visitFPExt(CastInst &CI) { 1489 return commonCastTransforms(CI); 1490 } 1491 1492 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1493 // This is safe if the intermediate type has enough bits in its mantissa to 1494 // accurately represent all values of X. For example, this won't work with 1495 // i64 -> float -> i64. 1496 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) { 1497 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1498 return nullptr; 1499 Instruction *OpI = cast<Instruction>(FI.getOperand(0)); 1500 1501 Value *SrcI = OpI->getOperand(0); 1502 Type *FITy = FI.getType(); 1503 Type *OpITy = OpI->getType(); 1504 Type *SrcTy = SrcI->getType(); 1505 bool IsInputSigned = isa<SIToFPInst>(OpI); 1506 bool IsOutputSigned = isa<FPToSIInst>(FI); 1507 1508 // We can safely assume the conversion won't overflow the output range, 1509 // because (for example) (uint8_t)18293.f is undefined behavior. 1510 1511 // Since we can assume the conversion won't overflow, our decision as to 1512 // whether the input will fit in the float should depend on the minimum 1513 // of the input range and output range. 1514 1515 // This means this is also safe for a signed input and unsigned output, since 1516 // a negative input would lead to undefined behavior. 1517 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned; 1518 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned; 1519 int ActualSize = std::min(InputSize, OutputSize); 1520 1521 if (ActualSize <= OpITy->getFPMantissaWidth()) { 1522 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) { 1523 if (IsInputSigned && IsOutputSigned) 1524 return new SExtInst(SrcI, FITy); 1525 return new ZExtInst(SrcI, FITy); 1526 } 1527 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits()) 1528 return new TruncInst(SrcI, FITy); 1529 if (SrcTy == FITy) 1530 return replaceInstUsesWith(FI, SrcI); 1531 return new BitCastInst(SrcI, FITy); 1532 } 1533 return nullptr; 1534 } 1535 1536 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) { 1537 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1538 if (!OpI) 1539 return commonCastTransforms(FI); 1540 1541 if (Instruction *I = FoldItoFPtoI(FI)) 1542 return I; 1543 1544 return commonCastTransforms(FI); 1545 } 1546 1547 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) { 1548 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1549 if (!OpI) 1550 return commonCastTransforms(FI); 1551 1552 if (Instruction *I = FoldItoFPtoI(FI)) 1553 return I; 1554 1555 return commonCastTransforms(FI); 1556 } 1557 1558 Instruction *InstCombiner::visitUIToFP(CastInst &CI) { 1559 return commonCastTransforms(CI); 1560 } 1561 1562 Instruction *InstCombiner::visitSIToFP(CastInst &CI) { 1563 return commonCastTransforms(CI); 1564 } 1565 1566 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { 1567 // If the source integer type is not the intptr_t type for this target, do a 1568 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1569 // cast to be exposed to other transforms. 1570 unsigned AS = CI.getAddressSpace(); 1571 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1572 DL.getPointerSizeInBits(AS)) { 1573 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1574 if (CI.getType()->isVectorTy()) // Handle vectors of pointers. 1575 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); 1576 1577 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1578 return new IntToPtrInst(P, CI.getType()); 1579 } 1580 1581 if (Instruction *I = commonCastTransforms(CI)) 1582 return I; 1583 1584 return nullptr; 1585 } 1586 1587 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint) 1588 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { 1589 Value *Src = CI.getOperand(0); 1590 1591 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1592 // If casting the result of a getelementptr instruction with no offset, turn 1593 // this into a cast of the original pointer! 1594 if (GEP->hasAllZeroIndices() && 1595 // If CI is an addrspacecast and GEP changes the poiner type, merging 1596 // GEP into CI would undo canonicalizing addrspacecast with different 1597 // pointer types, causing infinite loops. 1598 (!isa<AddrSpaceCastInst>(CI) || 1599 GEP->getType() == GEP->getPointerOperandType())) { 1600 // Changing the cast operand is usually not a good idea but it is safe 1601 // here because the pointer operand is being replaced with another 1602 // pointer operand so the opcode doesn't need to change. 1603 Worklist.Add(GEP); 1604 CI.setOperand(0, GEP->getOperand(0)); 1605 return &CI; 1606 } 1607 } 1608 1609 return commonCastTransforms(CI); 1610 } 1611 1612 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { 1613 // If the destination integer type is not the intptr_t type for this target, 1614 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1615 // to be exposed to other transforms. 1616 1617 Type *Ty = CI.getType(); 1618 unsigned AS = CI.getPointerAddressSpace(); 1619 1620 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) 1621 return commonPointerCastTransforms(CI); 1622 1623 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); 1624 if (Ty->isVectorTy()) // Handle vectors of pointers. 1625 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); 1626 1627 Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); 1628 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1629 } 1630 1631 /// This input value (which is known to have vector type) is being zero extended 1632 /// or truncated to the specified vector type. 1633 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 1634 /// 1635 /// The source and destination vector types may have different element types. 1636 static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy, 1637 InstCombiner &IC) { 1638 // We can only do this optimization if the output is a multiple of the input 1639 // element size, or the input is a multiple of the output element size. 1640 // Convert the input type to have the same element type as the output. 1641 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 1642 1643 if (SrcTy->getElementType() != DestTy->getElementType()) { 1644 // The input types don't need to be identical, but for now they must be the 1645 // same size. There is no specific reason we couldn't handle things like 1646 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 1647 // there yet. 1648 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 1649 DestTy->getElementType()->getPrimitiveSizeInBits()) 1650 return nullptr; 1651 1652 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); 1653 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 1654 } 1655 1656 // Now that the element types match, get the shuffle mask and RHS of the 1657 // shuffle to use, which depends on whether we're increasing or decreasing the 1658 // size of the input. 1659 SmallVector<uint32_t, 16> ShuffleMask; 1660 Value *V2; 1661 1662 if (SrcTy->getNumElements() > DestTy->getNumElements()) { 1663 // If we're shrinking the number of elements, just shuffle in the low 1664 // elements from the input and use undef as the second shuffle input. 1665 V2 = UndefValue::get(SrcTy); 1666 for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i) 1667 ShuffleMask.push_back(i); 1668 1669 } else { 1670 // If we're increasing the number of elements, shuffle in all of the 1671 // elements from InVal and fill the rest of the result elements with zeros 1672 // from a constant zero. 1673 V2 = Constant::getNullValue(SrcTy); 1674 unsigned SrcElts = SrcTy->getNumElements(); 1675 for (unsigned i = 0, e = SrcElts; i != e; ++i) 1676 ShuffleMask.push_back(i); 1677 1678 // The excess elements reference the first element of the zero input. 1679 for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i) 1680 ShuffleMask.push_back(SrcElts); 1681 } 1682 1683 return new ShuffleVectorInst(InVal, V2, 1684 ConstantDataVector::get(V2->getContext(), 1685 ShuffleMask)); 1686 } 1687 1688 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 1689 return Value % Ty->getPrimitiveSizeInBits() == 0; 1690 } 1691 1692 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 1693 return Value / Ty->getPrimitiveSizeInBits(); 1694 } 1695 1696 /// V is a value which is inserted into a vector of VecEltTy. 1697 /// Look through the value to see if we can decompose it into 1698 /// insertions into the vector. See the example in the comment for 1699 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 1700 /// The type of V is always a non-zero multiple of VecEltTy's size. 1701 /// Shift is the number of bits between the lsb of V and the lsb of 1702 /// the vector. 1703 /// 1704 /// This returns false if the pattern can't be matched or true if it can, 1705 /// filling in Elements with the elements found here. 1706 static bool collectInsertionElements(Value *V, unsigned Shift, 1707 SmallVectorImpl<Value *> &Elements, 1708 Type *VecEltTy, bool isBigEndian) { 1709 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 1710 "Shift should be a multiple of the element type size"); 1711 1712 // Undef values never contribute useful bits to the result. 1713 if (isa<UndefValue>(V)) return true; 1714 1715 // If we got down to a value of the right type, we win, try inserting into the 1716 // right element. 1717 if (V->getType() == VecEltTy) { 1718 // Inserting null doesn't actually insert any elements. 1719 if (Constant *C = dyn_cast<Constant>(V)) 1720 if (C->isNullValue()) 1721 return true; 1722 1723 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 1724 if (isBigEndian) 1725 ElementIndex = Elements.size() - ElementIndex - 1; 1726 1727 // Fail if multiple elements are inserted into this slot. 1728 if (Elements[ElementIndex]) 1729 return false; 1730 1731 Elements[ElementIndex] = V; 1732 return true; 1733 } 1734 1735 if (Constant *C = dyn_cast<Constant>(V)) { 1736 // Figure out the # elements this provides, and bitcast it or slice it up 1737 // as required. 1738 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 1739 VecEltTy); 1740 // If the constant is the size of a vector element, we just need to bitcast 1741 // it to the right type so it gets properly inserted. 1742 if (NumElts == 1) 1743 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 1744 Shift, Elements, VecEltTy, isBigEndian); 1745 1746 // Okay, this is a constant that covers multiple elements. Slice it up into 1747 // pieces and insert each element-sized piece into the vector. 1748 if (!isa<IntegerType>(C->getType())) 1749 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 1750 C->getType()->getPrimitiveSizeInBits())); 1751 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 1752 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 1753 1754 for (unsigned i = 0; i != NumElts; ++i) { 1755 unsigned ShiftI = Shift+i*ElementSize; 1756 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 1757 ShiftI)); 1758 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 1759 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 1760 isBigEndian)) 1761 return false; 1762 } 1763 return true; 1764 } 1765 1766 if (!V->hasOneUse()) return false; 1767 1768 Instruction *I = dyn_cast<Instruction>(V); 1769 if (!I) return false; 1770 switch (I->getOpcode()) { 1771 default: return false; // Unhandled case. 1772 case Instruction::BitCast: 1773 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1774 isBigEndian); 1775 case Instruction::ZExt: 1776 if (!isMultipleOfTypeSize( 1777 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 1778 VecEltTy)) 1779 return false; 1780 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1781 isBigEndian); 1782 case Instruction::Or: 1783 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1784 isBigEndian) && 1785 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 1786 isBigEndian); 1787 case Instruction::Shl: { 1788 // Must be shifting by a constant that is a multiple of the element size. 1789 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 1790 if (!CI) return false; 1791 Shift += CI->getZExtValue(); 1792 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 1793 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1794 isBigEndian); 1795 } 1796 1797 } 1798 } 1799 1800 1801 /// If the input is an 'or' instruction, we may be doing shifts and ors to 1802 /// assemble the elements of the vector manually. 1803 /// Try to rip the code out and replace it with insertelements. This is to 1804 /// optimize code like this: 1805 /// 1806 /// %tmp37 = bitcast float %inc to i32 1807 /// %tmp38 = zext i32 %tmp37 to i64 1808 /// %tmp31 = bitcast float %inc5 to i32 1809 /// %tmp32 = zext i32 %tmp31 to i64 1810 /// %tmp33 = shl i64 %tmp32, 32 1811 /// %ins35 = or i64 %tmp33, %tmp38 1812 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 1813 /// 1814 /// Into two insertelements that do "buildvector{%inc, %inc5}". 1815 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 1816 InstCombiner &IC) { 1817 VectorType *DestVecTy = cast<VectorType>(CI.getType()); 1818 Value *IntInput = CI.getOperand(0); 1819 1820 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 1821 if (!collectInsertionElements(IntInput, 0, Elements, 1822 DestVecTy->getElementType(), 1823 IC.getDataLayout().isBigEndian())) 1824 return nullptr; 1825 1826 // If we succeeded, we know that all of the element are specified by Elements 1827 // or are zero if Elements has a null entry. Recast this as a set of 1828 // insertions. 1829 Value *Result = Constant::getNullValue(CI.getType()); 1830 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 1831 if (!Elements[i]) continue; // Unset element. 1832 1833 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 1834 IC.Builder.getInt32(i)); 1835 } 1836 1837 return Result; 1838 } 1839 1840 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 1841 /// vector followed by extract element. The backend tends to handle bitcasts of 1842 /// vectors better than bitcasts of scalars because vector registers are 1843 /// usually not type-specific like scalar integer or scalar floating-point. 1844 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 1845 InstCombiner &IC) { 1846 // TODO: Create and use a pattern matcher for ExtractElementInst. 1847 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 1848 if (!ExtElt || !ExtElt->hasOneUse()) 1849 return nullptr; 1850 1851 // The bitcast must be to a vectorizable type, otherwise we can't make a new 1852 // type to extract from. 1853 Type *DestType = BitCast.getType(); 1854 if (!VectorType::isValidElementType(DestType)) 1855 return nullptr; 1856 1857 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); 1858 auto *NewVecType = VectorType::get(DestType, NumElts); 1859 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 1860 NewVecType, "bc"); 1861 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 1862 } 1863 1864 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 1865 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 1866 InstCombiner::BuilderTy &Builder) { 1867 Type *DestTy = BitCast.getType(); 1868 BinaryOperator *BO; 1869 if (!DestTy->isIntOrIntVectorTy() || 1870 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 1871 !BO->isBitwiseLogicOp()) 1872 return nullptr; 1873 1874 // FIXME: This transform is restricted to vector types to avoid backend 1875 // problems caused by creating potentially illegal operations. If a fix-up is 1876 // added to handle that situation, we can remove this check. 1877 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 1878 return nullptr; 1879 1880 Value *X; 1881 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 1882 X->getType() == DestTy && !isa<Constant>(X)) { 1883 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 1884 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 1885 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 1886 } 1887 1888 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 1889 X->getType() == DestTy && !isa<Constant>(X)) { 1890 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 1891 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 1892 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 1893 } 1894 1895 // Canonicalize vector bitcasts to come before vector bitwise logic with a 1896 // constant. This eases recognition of special constants for later ops. 1897 // Example: 1898 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 1899 Constant *C; 1900 if (match(BO->getOperand(1), m_Constant(C))) { 1901 // bitcast (logic X, C) --> logic (bitcast X, C') 1902 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 1903 Value *CastedC = ConstantExpr::getBitCast(C, DestTy); 1904 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 1905 } 1906 1907 return nullptr; 1908 } 1909 1910 /// Change the type of a select if we can eliminate a bitcast. 1911 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 1912 InstCombiner::BuilderTy &Builder) { 1913 Value *Cond, *TVal, *FVal; 1914 if (!match(BitCast.getOperand(0), 1915 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 1916 return nullptr; 1917 1918 // A vector select must maintain the same number of elements in its operands. 1919 Type *CondTy = Cond->getType(); 1920 Type *DestTy = BitCast.getType(); 1921 if (CondTy->isVectorTy()) { 1922 if (!DestTy->isVectorTy()) 1923 return nullptr; 1924 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) 1925 return nullptr; 1926 } 1927 1928 // FIXME: This transform is restricted from changing the select between 1929 // scalars and vectors to avoid backend problems caused by creating 1930 // potentially illegal operations. If a fix-up is added to handle that 1931 // situation, we can remove this check. 1932 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 1933 return nullptr; 1934 1935 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 1936 Value *X; 1937 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 1938 !isa<Constant>(X)) { 1939 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 1940 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 1941 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 1942 } 1943 1944 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 1945 !isa<Constant>(X)) { 1946 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 1947 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 1948 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 1949 } 1950 1951 return nullptr; 1952 } 1953 1954 /// Check if all users of CI are StoreInsts. 1955 static bool hasStoreUsersOnly(CastInst &CI) { 1956 for (User *U : CI.users()) { 1957 if (!isa<StoreInst>(U)) 1958 return false; 1959 } 1960 return true; 1961 } 1962 1963 /// This function handles following case 1964 /// 1965 /// A -> B cast 1966 /// PHI 1967 /// B -> A cast 1968 /// 1969 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 1970 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 1971 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) { 1972 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 1973 if (hasStoreUsersOnly(CI)) 1974 return nullptr; 1975 1976 Value *Src = CI.getOperand(0); 1977 Type *SrcTy = Src->getType(); // Type B 1978 Type *DestTy = CI.getType(); // Type A 1979 1980 SmallVector<PHINode *, 4> PhiWorklist; 1981 SmallSetVector<PHINode *, 4> OldPhiNodes; 1982 1983 // Find all of the A->B casts and PHI nodes. 1984 // We need to inpect all related PHI nodes, but PHIs can be cyclic, so 1985 // OldPhiNodes is used to track all known PHI nodes, before adding a new 1986 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 1987 PhiWorklist.push_back(PN); 1988 OldPhiNodes.insert(PN); 1989 while (!PhiWorklist.empty()) { 1990 auto *OldPN = PhiWorklist.pop_back_val(); 1991 for (Value *IncValue : OldPN->incoming_values()) { 1992 if (isa<Constant>(IncValue)) 1993 continue; 1994 1995 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 1996 // If there is a sequence of one or more load instructions, each loaded 1997 // value is used as address of later load instruction, bitcast is 1998 // necessary to change the value type, don't optimize it. For 1999 // simplicity we give up if the load address comes from another load. 2000 Value *Addr = LI->getOperand(0); 2001 if (Addr == &CI || isa<LoadInst>(Addr)) 2002 return nullptr; 2003 if (LI->hasOneUse() && LI->isSimple()) 2004 continue; 2005 // If a LoadInst has more than one use, changing the type of loaded 2006 // value may create another bitcast. 2007 return nullptr; 2008 } 2009 2010 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2011 if (OldPhiNodes.insert(PNode)) 2012 PhiWorklist.push_back(PNode); 2013 continue; 2014 } 2015 2016 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2017 // We can't handle other instructions. 2018 if (!BCI) 2019 return nullptr; 2020 2021 // Verify it's a A->B cast. 2022 Type *TyA = BCI->getOperand(0)->getType(); 2023 Type *TyB = BCI->getType(); 2024 if (TyA != DestTy || TyB != SrcTy) 2025 return nullptr; 2026 } 2027 } 2028 2029 // For each old PHI node, create a corresponding new PHI node with a type A. 2030 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2031 for (auto *OldPN : OldPhiNodes) { 2032 Builder.SetInsertPoint(OldPN); 2033 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2034 NewPNodes[OldPN] = NewPN; 2035 } 2036 2037 // Fill in the operands of new PHI nodes. 2038 for (auto *OldPN : OldPhiNodes) { 2039 PHINode *NewPN = NewPNodes[OldPN]; 2040 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2041 Value *V = OldPN->getOperand(j); 2042 Value *NewV = nullptr; 2043 if (auto *C = dyn_cast<Constant>(V)) { 2044 NewV = ConstantExpr::getBitCast(C, DestTy); 2045 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2046 Builder.SetInsertPoint(LI->getNextNode()); 2047 NewV = Builder.CreateBitCast(LI, DestTy); 2048 Worklist.Add(LI); 2049 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2050 NewV = BCI->getOperand(0); 2051 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2052 NewV = NewPNodes[PrevPN]; 2053 } 2054 assert(NewV); 2055 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2056 } 2057 } 2058 2059 // If there is a store with type B, change it to type A. 2060 for (User *U : PN->users()) { 2061 auto *SI = dyn_cast<StoreInst>(U); 2062 if (SI && SI->isSimple() && SI->getOperand(0) == PN) { 2063 Builder.SetInsertPoint(SI); 2064 auto *NewBC = 2065 cast<BitCastInst>(Builder.CreateBitCast(NewPNodes[PN], SrcTy)); 2066 SI->setOperand(0, NewBC); 2067 Worklist.Add(SI); 2068 assert(hasStoreUsersOnly(*NewBC)); 2069 } 2070 } 2071 2072 return replaceInstUsesWith(CI, NewPNodes[PN]); 2073 } 2074 2075 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { 2076 // If the operands are integer typed then apply the integer transforms, 2077 // otherwise just apply the common ones. 2078 Value *Src = CI.getOperand(0); 2079 Type *SrcTy = Src->getType(); 2080 Type *DestTy = CI.getType(); 2081 2082 // Get rid of casts from one type to the same type. These are useless and can 2083 // be replaced by the operand. 2084 if (DestTy == Src->getType()) 2085 return replaceInstUsesWith(CI, Src); 2086 2087 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) { 2088 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2089 Type *DstElTy = DstPTy->getElementType(); 2090 Type *SrcElTy = SrcPTy->getElementType(); 2091 2092 // If we are casting a alloca to a pointer to a type of the same 2093 // size, rewrite the allocation instruction to allocate the "right" type. 2094 // There is no need to modify malloc calls because it is their bitcast that 2095 // needs to be cleaned up. 2096 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2097 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2098 return V; 2099 2100 // When the type pointed to is not sized the cast cannot be 2101 // turned into a gep. 2102 Type *PointeeType = 2103 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2104 if (!PointeeType->isSized()) 2105 return nullptr; 2106 2107 // If the source and destination are pointers, and this cast is equivalent 2108 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2109 // This can enhance SROA and other transforms that want type-safe pointers. 2110 unsigned NumZeros = 0; 2111 while (SrcElTy != DstElTy && 2112 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() && 2113 SrcElTy->getNumContainedTypes() /* not "{}" */) { 2114 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U); 2115 ++NumZeros; 2116 } 2117 2118 // If we found a path from the src to dest, create the getelementptr now. 2119 if (SrcElTy == DstElTy) { 2120 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2121 return GetElementPtrInst::CreateInBounds(Src, Idxs); 2122 } 2123 } 2124 2125 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { 2126 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { 2127 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2128 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2129 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2130 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) 2131 } 2132 2133 if (isa<IntegerType>(SrcTy)) { 2134 // If this is a cast from an integer to vector, check to see if the input 2135 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2136 // the casts with a shuffle and (potentially) a bitcast. 2137 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2138 CastInst *SrcCast = cast<CastInst>(Src); 2139 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2140 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2141 if (Instruction *I = optimizeVectorResize(BCIn->getOperand(0), 2142 cast<VectorType>(DestTy), *this)) 2143 return I; 2144 } 2145 2146 // If the input is an 'or' instruction, we may be doing shifts and ors to 2147 // assemble the elements of the vector manually. Try to rip the code out 2148 // and replace it with insertelements. 2149 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2150 return replaceInstUsesWith(CI, V); 2151 } 2152 } 2153 2154 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) { 2155 if (SrcVTy->getNumElements() == 1) { 2156 // If our destination is not a vector, then make this a straight 2157 // scalar-scalar cast. 2158 if (!DestTy->isVectorTy()) { 2159 Value *Elem = 2160 Builder.CreateExtractElement(Src, 2161 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2162 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2163 } 2164 2165 // Otherwise, see if our source is an insert. If so, then use the scalar 2166 // component directly. 2167 if (InsertElementInst *IEI = 2168 dyn_cast<InsertElementInst>(CI.getOperand(0))) 2169 return CastInst::Create(Instruction::BitCast, IEI->getOperand(1), 2170 DestTy); 2171 } 2172 } 2173 2174 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) { 2175 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2176 // a bitcast to a vector with the same # elts. 2177 if (SVI->hasOneUse() && DestTy->isVectorTy() && 2178 DestTy->getVectorNumElements() == SVI->getType()->getNumElements() && 2179 SVI->getType()->getNumElements() == 2180 SVI->getOperand(0)->getType()->getVectorNumElements()) { 2181 BitCastInst *Tmp; 2182 // If either of the operands is a cast from CI.getType(), then 2183 // evaluating the shuffle in the casted destination's type will allow 2184 // us to eliminate at least one cast. 2185 if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) && 2186 Tmp->getOperand(0)->getType() == DestTy) || 2187 ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) && 2188 Tmp->getOperand(0)->getType() == DestTy)) { 2189 Value *LHS = Builder.CreateBitCast(SVI->getOperand(0), DestTy); 2190 Value *RHS = Builder.CreateBitCast(SVI->getOperand(1), DestTy); 2191 // Return a new shuffle vector. Use the same element ID's, as we 2192 // know the vector types match #elts. 2193 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2)); 2194 } 2195 } 2196 } 2197 2198 // Handle the A->B->A cast, and there is an intervening PHI node. 2199 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2200 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2201 return I; 2202 2203 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2204 return I; 2205 2206 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2207 return I; 2208 2209 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2210 return I; 2211 2212 if (SrcTy->isPointerTy()) 2213 return commonPointerCastTransforms(CI); 2214 return commonCastTransforms(CI); 2215 } 2216 2217 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2218 // If the destination pointer element type is not the same as the source's 2219 // first do a bitcast to the destination type, and then the addrspacecast. 2220 // This allows the cast to be exposed to other transforms. 2221 Value *Src = CI.getOperand(0); 2222 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2223 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2224 2225 Type *DestElemTy = DestTy->getElementType(); 2226 if (SrcTy->getElementType() != DestElemTy) { 2227 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2228 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2229 // Handle vectors of pointers. 2230 MidTy = VectorType::get(MidTy, VT->getNumElements()); 2231 } 2232 2233 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2234 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2235 } 2236 2237 return commonPointerCastTransforms(CI); 2238 } 2239