1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for cast operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/Analysis/ConstantFolding.h" 17 #include "llvm/Analysis/TargetLibraryInfo.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 using namespace llvm; 22 using namespace PatternMatch; 23 24 #define DEBUG_TYPE "instcombine" 25 26 /// Analyze 'Val', seeing if it is a simple linear expression. 27 /// If so, decompose it, returning some value X, such that Val is 28 /// X*Scale+Offset. 29 /// 30 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 31 uint64_t &Offset) { 32 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 33 Offset = CI->getZExtValue(); 34 Scale = 0; 35 return ConstantInt::get(Val->getType(), 0); 36 } 37 38 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 39 // Cannot look past anything that might overflow. 40 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 41 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 42 Scale = 1; 43 Offset = 0; 44 return Val; 45 } 46 47 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 48 if (I->getOpcode() == Instruction::Shl) { 49 // This is a value scaled by '1 << the shift amt'. 50 Scale = UINT64_C(1) << RHS->getZExtValue(); 51 Offset = 0; 52 return I->getOperand(0); 53 } 54 55 if (I->getOpcode() == Instruction::Mul) { 56 // This value is scaled by 'RHS'. 57 Scale = RHS->getZExtValue(); 58 Offset = 0; 59 return I->getOperand(0); 60 } 61 62 if (I->getOpcode() == Instruction::Add) { 63 // We have X+C. Check to see if we really have (X*C2)+C1, 64 // where C1 is divisible by C2. 65 unsigned SubScale; 66 Value *SubVal = 67 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 68 Offset += RHS->getZExtValue(); 69 Scale = SubScale; 70 return SubVal; 71 } 72 } 73 } 74 75 // Otherwise, we can't look past this. 76 Scale = 1; 77 Offset = 0; 78 return Val; 79 } 80 81 /// If we find a cast of an allocation instruction, try to eliminate the cast by 82 /// moving the type information into the alloc. 83 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, 84 AllocaInst &AI) { 85 PointerType *PTy = cast<PointerType>(CI.getType()); 86 87 BuilderTy AllocaBuilder(*Builder); 88 AllocaBuilder.SetInsertPoint(&AI); 89 90 // Get the type really allocated and the type casted to. 91 Type *AllocElTy = AI.getAllocatedType(); 92 Type *CastElTy = PTy->getElementType(); 93 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 94 95 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); 96 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); 97 if (CastElTyAlign < AllocElTyAlign) return nullptr; 98 99 // If the allocation has multiple uses, only promote it if we are strictly 100 // increasing the alignment of the resultant allocation. If we keep it the 101 // same, we open the door to infinite loops of various kinds. 102 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 103 104 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy); 105 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy); 106 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 107 108 // If the allocation has multiple uses, only promote it if we're not 109 // shrinking the amount of memory being allocated. 110 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy); 111 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy); 112 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 113 114 // See if we can satisfy the modulus by pulling a scale out of the array 115 // size argument. 116 unsigned ArraySizeScale; 117 uint64_t ArrayOffset; 118 Value *NumElements = // See if the array size is a decomposable linear expr. 119 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 120 121 // If we can now satisfy the modulus, by using a non-1 scale, we really can 122 // do the xform. 123 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 124 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 125 126 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 127 Value *Amt = nullptr; 128 if (Scale == 1) { 129 Amt = NumElements; 130 } else { 131 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 132 // Insert before the alloca, not before the cast. 133 Amt = AllocaBuilder.CreateMul(Amt, NumElements); 134 } 135 136 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 137 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 138 Offset, true); 139 Amt = AllocaBuilder.CreateAdd(Amt, Off); 140 } 141 142 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); 143 New->setAlignment(AI.getAlignment()); 144 New->takeName(&AI); 145 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 146 147 // If the allocation has multiple real uses, insert a cast and change all 148 // things that used it to use the new cast. This will also hack on CI, but it 149 // will die soon. 150 if (!AI.hasOneUse()) { 151 // New is the allocation instruction, pointer typed. AI is the original 152 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 153 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast"); 154 replaceInstUsesWith(AI, NewCast); 155 } 156 return replaceInstUsesWith(CI, New); 157 } 158 159 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 160 /// true for, actually insert the code to evaluate the expression. 161 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty, 162 bool isSigned) { 163 if (Constant *C = dyn_cast<Constant>(V)) { 164 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 165 // If we got a constantexpr back, try to simplify it with DL info. 166 if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI)) 167 C = FoldedC; 168 return C; 169 } 170 171 // Otherwise, it must be an instruction. 172 Instruction *I = cast<Instruction>(V); 173 Instruction *Res = nullptr; 174 unsigned Opc = I->getOpcode(); 175 switch (Opc) { 176 case Instruction::Add: 177 case Instruction::Sub: 178 case Instruction::Mul: 179 case Instruction::And: 180 case Instruction::Or: 181 case Instruction::Xor: 182 case Instruction::AShr: 183 case Instruction::LShr: 184 case Instruction::Shl: 185 case Instruction::UDiv: 186 case Instruction::URem: { 187 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 188 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 189 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 190 break; 191 } 192 case Instruction::Trunc: 193 case Instruction::ZExt: 194 case Instruction::SExt: 195 // If the source type of the cast is the type we're trying for then we can 196 // just return the source. There's no need to insert it because it is not 197 // new. 198 if (I->getOperand(0)->getType() == Ty) 199 return I->getOperand(0); 200 201 // Otherwise, must be the same type of cast, so just reinsert a new one. 202 // This also handles the case of zext(trunc(x)) -> zext(x). 203 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 204 Opc == Instruction::SExt); 205 break; 206 case Instruction::Select: { 207 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 208 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 209 Res = SelectInst::Create(I->getOperand(0), True, False); 210 break; 211 } 212 case Instruction::PHI: { 213 PHINode *OPN = cast<PHINode>(I); 214 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 215 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 216 Value *V = 217 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 218 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 219 } 220 Res = NPN; 221 break; 222 } 223 default: 224 // TODO: Can handle more cases here. 225 llvm_unreachable("Unreachable!"); 226 } 227 228 Res->takeName(I); 229 return InsertNewInstWith(Res, *I); 230 } 231 232 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1, 233 const CastInst *CI2) { 234 Type *SrcTy = CI1->getSrcTy(); 235 Type *MidTy = CI1->getDestTy(); 236 Type *DstTy = CI2->getDestTy(); 237 238 Instruction::CastOps firstOp = Instruction::CastOps(CI1->getOpcode()); 239 Instruction::CastOps secondOp = Instruction::CastOps(CI2->getOpcode()); 240 Type *SrcIntPtrTy = 241 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 242 Type *MidIntPtrTy = 243 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 244 Type *DstIntPtrTy = 245 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 246 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 247 DstTy, SrcIntPtrTy, MidIntPtrTy, 248 DstIntPtrTy); 249 250 // We don't want to form an inttoptr or ptrtoint that converts to an integer 251 // type that differs from the pointer size. 252 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 253 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 254 Res = 0; 255 256 return Instruction::CastOps(Res); 257 } 258 259 /// @brief Implement the transforms common to all CastInst visitors. 260 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) { 261 Value *Src = CI.getOperand(0); 262 263 // Try to eliminate a cast of a cast. 264 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 265 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 266 // The first cast (CSrc) is eliminable so we need to fix up or replace 267 // the second cast (CI). CSrc will then have a good chance of being dead. 268 return CastInst::Create(NewOpc, CSrc->getOperand(0), CI.getType()); 269 } 270 } 271 272 // If we are casting a select, then fold the cast into the select. 273 if (auto *SI = dyn_cast<SelectInst>(Src)) 274 if (Instruction *NV = FoldOpIntoSelect(CI, SI)) 275 return NV; 276 277 // If we are casting a PHI, then fold the cast into the PHI. 278 if (auto *PN = dyn_cast<PHINode>(Src)) { 279 // Don't do this if it would create a PHI node with an illegal type from a 280 // legal type. 281 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 282 shouldChangeType(CI.getType(), Src->getType())) 283 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 284 return NV; 285 } 286 287 return nullptr; 288 } 289 290 /// Return true if we can evaluate the specified expression tree as type Ty 291 /// instead of its larger type, and arrive with the same value. 292 /// This is used by code that tries to eliminate truncates. 293 /// 294 /// Ty will always be a type smaller than V. We should return true if trunc(V) 295 /// can be computed by computing V in the smaller type. If V is an instruction, 296 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 297 /// makes sense if x and y can be efficiently truncated. 298 /// 299 /// This function works on both vectors and scalars. 300 /// 301 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC, 302 Instruction *CxtI) { 303 // We can always evaluate constants in another type. 304 if (isa<Constant>(V)) 305 return true; 306 307 Instruction *I = dyn_cast<Instruction>(V); 308 if (!I) return false; 309 310 Type *OrigTy = V->getType(); 311 312 // If this is an extension from the dest type, we can eliminate it, even if it 313 // has multiple uses. 314 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) && 315 I->getOperand(0)->getType() == Ty) 316 return true; 317 318 // We can't extend or shrink something that has multiple uses: doing so would 319 // require duplicating the instruction in general, which isn't profitable. 320 if (!I->hasOneUse()) return false; 321 322 unsigned Opc = I->getOpcode(); 323 switch (Opc) { 324 case Instruction::Add: 325 case Instruction::Sub: 326 case Instruction::Mul: 327 case Instruction::And: 328 case Instruction::Or: 329 case Instruction::Xor: 330 // These operators can all arbitrarily be extended or truncated. 331 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 332 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 333 334 case Instruction::UDiv: 335 case Instruction::URem: { 336 // UDiv and URem can be truncated if all the truncated bits are zero. 337 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 338 uint32_t BitWidth = Ty->getScalarSizeInBits(); 339 if (BitWidth < OrigBitWidth) { 340 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); 341 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 342 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 343 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 344 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 345 } 346 } 347 break; 348 } 349 case Instruction::Shl: 350 // If we are truncating the result of this SHL, and if it's a shift of a 351 // constant amount, we can always perform a SHL in a smaller type. 352 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 353 uint32_t BitWidth = Ty->getScalarSizeInBits(); 354 if (CI->getLimitedValue(BitWidth) < BitWidth) 355 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 356 } 357 break; 358 case Instruction::LShr: 359 // If this is a truncate of a logical shr, we can truncate it to a smaller 360 // lshr iff we know that the bits we would otherwise be shifting in are 361 // already zeros. 362 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 363 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 364 uint32_t BitWidth = Ty->getScalarSizeInBits(); 365 if (IC.MaskedValueIsZero(I->getOperand(0), 366 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth), 0, CxtI) && 367 CI->getLimitedValue(BitWidth) < BitWidth) { 368 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 369 } 370 } 371 break; 372 case Instruction::Trunc: 373 // trunc(trunc(x)) -> trunc(x) 374 return true; 375 case Instruction::ZExt: 376 case Instruction::SExt: 377 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 378 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 379 return true; 380 case Instruction::Select: { 381 SelectInst *SI = cast<SelectInst>(I); 382 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 383 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 384 } 385 case Instruction::PHI: { 386 // We can change a phi if we can change all operands. Note that we never 387 // get into trouble with cyclic PHIs here because we only consider 388 // instructions with a single use. 389 PHINode *PN = cast<PHINode>(I); 390 for (Value *IncValue : PN->incoming_values()) 391 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 392 return false; 393 return true; 394 } 395 default: 396 // TODO: Can handle more cases here. 397 break; 398 } 399 400 return false; 401 } 402 403 /// Given a vector that is bitcast to an integer, optionally logically 404 /// right-shifted, and truncated, convert it to an extractelement. 405 /// Example (big endian): 406 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 407 /// ---> 408 /// extractelement <4 x i32> %X, 1 409 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC, 410 const DataLayout &DL) { 411 Value *TruncOp = Trunc.getOperand(0); 412 Type *DestType = Trunc.getType(); 413 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 414 return nullptr; 415 416 Value *VecInput = nullptr; 417 ConstantInt *ShiftVal = nullptr; 418 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 419 m_LShr(m_BitCast(m_Value(VecInput)), 420 m_ConstantInt(ShiftVal)))) || 421 !isa<VectorType>(VecInput->getType())) 422 return nullptr; 423 424 VectorType *VecType = cast<VectorType>(VecInput->getType()); 425 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 426 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 427 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 428 429 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 430 return nullptr; 431 432 // If the element type of the vector doesn't match the result type, 433 // bitcast it to a vector type that we can extract from. 434 unsigned NumVecElts = VecWidth / DestWidth; 435 if (VecType->getElementType() != DestType) { 436 VecType = VectorType::get(DestType, NumVecElts); 437 VecInput = IC.Builder->CreateBitCast(VecInput, VecType, "bc"); 438 } 439 440 unsigned Elt = ShiftAmount / DestWidth; 441 if (DL.isBigEndian()) 442 Elt = NumVecElts - 1 - Elt; 443 444 return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt)); 445 } 446 447 /// Try to narrow the width of bitwise logic instructions with constants. 448 Instruction *InstCombiner::shrinkBitwiseLogic(TruncInst &Trunc) { 449 Type *SrcTy = Trunc.getSrcTy(); 450 Type *DestTy = Trunc.getType(); 451 if (isa<IntegerType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 452 return nullptr; 453 454 BinaryOperator *LogicOp; 455 Constant *C; 456 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(LogicOp))) || 457 !LogicOp->isBitwiseLogicOp() || 458 !match(LogicOp->getOperand(1), m_Constant(C))) 459 return nullptr; 460 461 // trunc (logic X, C) --> logic (trunc X, C') 462 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 463 Value *NarrowOp0 = Builder->CreateTrunc(LogicOp->getOperand(0), DestTy); 464 return BinaryOperator::Create(LogicOp->getOpcode(), NarrowOp0, NarrowC); 465 } 466 467 /// Try to narrow the width of a splat shuffle. This could be generalized to any 468 /// shuffle with a constant operand, but we limit the transform to avoid 469 /// creating a shuffle type that targets may not be able to lower effectively. 470 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 471 InstCombiner::BuilderTy &Builder) { 472 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 473 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 474 Shuf->getMask()->getSplatValue() && 475 Shuf->getType() == Shuf->getOperand(0)->getType()) { 476 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 477 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 478 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 479 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask()); 480 } 481 482 return nullptr; 483 } 484 485 /// Try to narrow the width of an insert element. This could be generalized for 486 /// any vector constant, but we limit the transform to insertion into undef to 487 /// avoid potential backend problems from unsupported insertion widths. This 488 /// could also be extended to handle the case of inserting a scalar constant 489 /// into a vector variable. 490 static Instruction *shrinkInsertElt(CastInst &Trunc, 491 InstCombiner::BuilderTy &Builder) { 492 Instruction::CastOps Opcode = Trunc.getOpcode(); 493 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 494 "Unexpected instruction for shrinking"); 495 496 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 497 if (!InsElt || !InsElt->hasOneUse()) 498 return nullptr; 499 500 Type *DestTy = Trunc.getType(); 501 Type *DestScalarTy = DestTy->getScalarType(); 502 Value *VecOp = InsElt->getOperand(0); 503 Value *ScalarOp = InsElt->getOperand(1); 504 Value *Index = InsElt->getOperand(2); 505 506 if (isa<UndefValue>(VecOp)) { 507 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 508 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 509 UndefValue *NarrowUndef = UndefValue::get(DestTy); 510 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 511 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 512 } 513 514 return nullptr; 515 } 516 517 Instruction *InstCombiner::visitTrunc(TruncInst &CI) { 518 if (Instruction *Result = commonCastTransforms(CI)) 519 return Result; 520 521 // Test if the trunc is the user of a select which is part of a 522 // minimum or maximum operation. If so, don't do any more simplification. 523 // Even simplifying demanded bits can break the canonical form of a 524 // min/max. 525 Value *LHS, *RHS; 526 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0))) 527 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN) 528 return nullptr; 529 530 // See if we can simplify any instructions used by the input whose sole 531 // purpose is to compute bits we don't care about. 532 if (SimplifyDemandedInstructionBits(CI)) 533 return &CI; 534 535 Value *Src = CI.getOperand(0); 536 Type *DestTy = CI.getType(), *SrcTy = Src->getType(); 537 538 // Attempt to truncate the entire input expression tree to the destination 539 // type. Only do this if the dest type is a simple type, don't convert the 540 // expression tree to something weird like i93 unless the source is also 541 // strange. 542 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 543 canEvaluateTruncated(Src, DestTy, *this, &CI)) { 544 545 // If this cast is a truncate, evaluting in a different type always 546 // eliminates the cast, so it is always a win. 547 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 548 " to avoid cast: " << CI << '\n'); 549 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 550 assert(Res->getType() == DestTy); 551 return replaceInstUsesWith(CI, Res); 552 } 553 554 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector. 555 if (DestTy->getScalarSizeInBits() == 1) { 556 Constant *One = ConstantInt::get(SrcTy, 1); 557 Src = Builder->CreateAnd(Src, One); 558 Value *Zero = Constant::getNullValue(Src->getType()); 559 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero); 560 } 561 562 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion. 563 Value *A = nullptr; ConstantInt *Cst = nullptr; 564 if (Src->hasOneUse() && 565 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) { 566 // We have three types to worry about here, the type of A, the source of 567 // the truncate (MidSize), and the destination of the truncate. We know that 568 // ASize < MidSize and MidSize > ResultSize, but don't know the relation 569 // between ASize and ResultSize. 570 unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 571 572 // If the shift amount is larger than the size of A, then the result is 573 // known to be zero because all the input bits got shifted out. 574 if (Cst->getZExtValue() >= ASize) 575 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy)); 576 577 // Since we're doing an lshr and a zero extend, and know that the shift 578 // amount is smaller than ASize, it is always safe to do the shift in A's 579 // type, then zero extend or truncate to the result. 580 Value *Shift = Builder->CreateLShr(A, Cst->getZExtValue()); 581 Shift->takeName(Src); 582 return CastInst::CreateIntegerCast(Shift, DestTy, false); 583 } 584 585 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type 586 // conversion. 587 // It works because bits coming from sign extension have the same value as 588 // the sign bit of the original value; performing ashr instead of lshr 589 // generates bits of the same value as the sign bit. 590 if (Src->hasOneUse() && 591 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst))) && 592 cast<Instruction>(Src)->getOperand(0)->hasOneUse()) { 593 const unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 594 // This optimization can be only performed when zero bits generated by 595 // the original lshr aren't pulled into the value after truncation, so we 596 // can only shift by values smaller than the size of destination type (in 597 // bits). 598 if (Cst->getValue().ult(ASize)) { 599 Value *Shift = Builder->CreateAShr(A, Cst->getZExtValue()); 600 Shift->takeName(Src); 601 return CastInst::CreateIntegerCast(Shift, CI.getType(), true); 602 } 603 } 604 605 if (Instruction *I = shrinkBitwiseLogic(CI)) 606 return I; 607 608 if (Instruction *I = shrinkSplatShuffle(CI, *Builder)) 609 return I; 610 611 if (Instruction *I = shrinkInsertElt(CI, *Builder)) 612 return I; 613 614 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && 615 shouldChangeType(SrcTy, DestTy)) { 616 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 617 // dest type is native and cst < dest size. 618 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) && 619 !match(A, m_Shr(m_Value(), m_Constant()))) { 620 // Skip shifts of shift by constants. It undoes a combine in 621 // FoldShiftByConstant and is the extend in reg pattern. 622 const unsigned DestSize = DestTy->getScalarSizeInBits(); 623 if (Cst->getValue().ult(DestSize)) { 624 Value *NewTrunc = Builder->CreateTrunc(A, DestTy, A->getName() + ".tr"); 625 626 return BinaryOperator::Create( 627 Instruction::Shl, NewTrunc, 628 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize))); 629 } 630 } 631 } 632 633 if (Instruction *I = foldVecTruncToExtElt(CI, *this, DL)) 634 return I; 635 636 return nullptr; 637 } 638 639 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 640 bool DoTransform) { 641 // If we are just checking for a icmp eq of a single bit and zext'ing it 642 // to an integer, then shift the bit to the appropriate place and then 643 // cast to integer to avoid the comparison. 644 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) { 645 const APInt &Op1CV = Op1C->getValue(); 646 647 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 648 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 649 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) || 650 (ICI->getPredicate() == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) { 651 if (!DoTransform) return ICI; 652 653 Value *In = ICI->getOperand(0); 654 Value *Sh = ConstantInt::get(In->getType(), 655 In->getType()->getScalarSizeInBits() - 1); 656 In = Builder->CreateLShr(In, Sh, In->getName() + ".lobit"); 657 if (In->getType() != CI.getType()) 658 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/); 659 660 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) { 661 Constant *One = ConstantInt::get(In->getType(), 1); 662 In = Builder->CreateXor(In, One, In->getName() + ".not"); 663 } 664 665 return replaceInstUsesWith(CI, In); 666 } 667 668 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 669 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 670 // zext (X == 1) to i32 --> X iff X has only the low bit set. 671 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 672 // zext (X != 0) to i32 --> X iff X has only the low bit set. 673 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 674 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 675 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 676 if ((Op1CV == 0 || Op1CV.isPowerOf2()) && 677 // This only works for EQ and NE 678 ICI->isEquality()) { 679 // If Op1C some other power of two, convert: 680 KnownBits Known(Op1C->getType()->getBitWidth()); 681 computeKnownBits(ICI->getOperand(0), Known, 0, &CI); 682 683 APInt KnownZeroMask(~Known.Zero); 684 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 685 if (!DoTransform) return ICI; 686 687 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE; 688 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) { 689 // (X&4) == 2 --> false 690 // (X&4) != 2 --> true 691 Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()), 692 isNE); 693 Res = ConstantExpr::getZExt(Res, CI.getType()); 694 return replaceInstUsesWith(CI, Res); 695 } 696 697 uint32_t ShAmt = KnownZeroMask.logBase2(); 698 Value *In = ICI->getOperand(0); 699 if (ShAmt) { 700 // Perform a logical shr by shiftamt. 701 // Insert the shift to put the result in the low bit. 702 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 703 In->getName() + ".lobit"); 704 } 705 706 if ((Op1CV != 0) == isNE) { // Toggle the low bit. 707 Constant *One = ConstantInt::get(In->getType(), 1); 708 In = Builder->CreateXor(In, One); 709 } 710 711 if (CI.getType() == In->getType()) 712 return replaceInstUsesWith(CI, In); 713 714 Value *IntCast = Builder->CreateIntCast(In, CI.getType(), false); 715 return replaceInstUsesWith(CI, IntCast); 716 } 717 } 718 } 719 720 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 721 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 722 // may lead to additional simplifications. 723 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) { 724 if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) { 725 uint32_t BitWidth = ITy->getBitWidth(); 726 Value *LHS = ICI->getOperand(0); 727 Value *RHS = ICI->getOperand(1); 728 729 KnownBits KnownLHS(BitWidth); 730 KnownBits KnownRHS(BitWidth); 731 computeKnownBits(LHS, KnownLHS, 0, &CI); 732 computeKnownBits(RHS, KnownRHS, 0, &CI); 733 734 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 735 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 736 APInt UnknownBit = ~KnownBits; 737 if (UnknownBit.countPopulation() == 1) { 738 if (!DoTransform) return ICI; 739 740 Value *Result = Builder->CreateXor(LHS, RHS); 741 742 // Mask off any bits that are set and won't be shifted away. 743 if (KnownLHS.One.uge(UnknownBit)) 744 Result = Builder->CreateAnd(Result, 745 ConstantInt::get(ITy, UnknownBit)); 746 747 // Shift the bit we're testing down to the lsb. 748 Result = Builder->CreateLShr( 749 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 750 751 if (ICI->getPredicate() == ICmpInst::ICMP_EQ) 752 Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1)); 753 Result->takeName(ICI); 754 return replaceInstUsesWith(CI, Result); 755 } 756 } 757 } 758 } 759 760 return nullptr; 761 } 762 763 /// Determine if the specified value can be computed in the specified wider type 764 /// and produce the same low bits. If not, return false. 765 /// 766 /// If this function returns true, it can also return a non-zero number of bits 767 /// (in BitsToClear) which indicates that the value it computes is correct for 768 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 769 /// out. For example, to promote something like: 770 /// 771 /// %B = trunc i64 %A to i32 772 /// %C = lshr i32 %B, 8 773 /// %E = zext i32 %C to i64 774 /// 775 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 776 /// set to 8 to indicate that the promoted value needs to have bits 24-31 777 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 778 /// clear the top bits anyway, doing this has no extra cost. 779 /// 780 /// This function works on both vectors and scalars. 781 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 782 InstCombiner &IC, Instruction *CxtI) { 783 BitsToClear = 0; 784 if (isa<Constant>(V)) 785 return true; 786 787 Instruction *I = dyn_cast<Instruction>(V); 788 if (!I) return false; 789 790 // If the input is a truncate from the destination type, we can trivially 791 // eliminate it. 792 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty) 793 return true; 794 795 // We can't extend or shrink something that has multiple uses: doing so would 796 // require duplicating the instruction in general, which isn't profitable. 797 if (!I->hasOneUse()) return false; 798 799 unsigned Opc = I->getOpcode(), Tmp; 800 switch (Opc) { 801 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 802 case Instruction::SExt: // zext(sext(x)) -> sext(x). 803 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 804 return true; 805 case Instruction::And: 806 case Instruction::Or: 807 case Instruction::Xor: 808 case Instruction::Add: 809 case Instruction::Sub: 810 case Instruction::Mul: 811 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 812 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 813 return false; 814 // These can all be promoted if neither operand has 'bits to clear'. 815 if (BitsToClear == 0 && Tmp == 0) 816 return true; 817 818 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 819 // other side, BitsToClear is ok. 820 if (Tmp == 0 && I->isBitwiseLogicOp()) { 821 // We use MaskedValueIsZero here for generality, but the case we care 822 // about the most is constant RHS. 823 unsigned VSize = V->getType()->getScalarSizeInBits(); 824 if (IC.MaskedValueIsZero(I->getOperand(1), 825 APInt::getHighBitsSet(VSize, BitsToClear), 826 0, CxtI)) 827 return true; 828 } 829 830 // Otherwise, we don't know how to analyze this BitsToClear case yet. 831 return false; 832 833 case Instruction::Shl: 834 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 835 // upper bits we can reduce BitsToClear by the shift amount. 836 if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) { 837 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 838 return false; 839 uint64_t ShiftAmt = Amt->getZExtValue(); 840 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 841 return true; 842 } 843 return false; 844 case Instruction::LShr: 845 // We can promote lshr(x, cst) if we can promote x. This requires the 846 // ultimate 'and' to clear out the high zero bits we're clearing out though. 847 if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) { 848 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 849 return false; 850 BitsToClear += Amt->getZExtValue(); 851 if (BitsToClear > V->getType()->getScalarSizeInBits()) 852 BitsToClear = V->getType()->getScalarSizeInBits(); 853 return true; 854 } 855 // Cannot promote variable LSHR. 856 return false; 857 case Instruction::Select: 858 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 859 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 860 // TODO: If important, we could handle the case when the BitsToClear are 861 // known zero in the disagreeing side. 862 Tmp != BitsToClear) 863 return false; 864 return true; 865 866 case Instruction::PHI: { 867 // We can change a phi if we can change all operands. Note that we never 868 // get into trouble with cyclic PHIs here because we only consider 869 // instructions with a single use. 870 PHINode *PN = cast<PHINode>(I); 871 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 872 return false; 873 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 874 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 875 // TODO: If important, we could handle the case when the BitsToClear 876 // are known zero in the disagreeing input. 877 Tmp != BitsToClear) 878 return false; 879 return true; 880 } 881 default: 882 // TODO: Can handle more cases here. 883 return false; 884 } 885 } 886 887 Instruction *InstCombiner::visitZExt(ZExtInst &CI) { 888 // If this zero extend is only used by a truncate, let the truncate be 889 // eliminated before we try to optimize this zext. 890 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 891 return nullptr; 892 893 // If one of the common conversion will work, do it. 894 if (Instruction *Result = commonCastTransforms(CI)) 895 return Result; 896 897 Value *Src = CI.getOperand(0); 898 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 899 900 // Attempt to extend the entire input expression tree to the destination 901 // type. Only do this if the dest type is a simple type, don't convert the 902 // expression tree to something weird like i93 unless the source is also 903 // strange. 904 unsigned BitsToClear; 905 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 906 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 907 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 908 "Can't clear more bits than in SrcTy"); 909 910 // Okay, we can transform this! Insert the new expression now. 911 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 912 " to avoid zero extend: " << CI << '\n'); 913 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 914 assert(Res->getType() == DestTy); 915 916 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 917 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 918 919 // If the high bits are already filled with zeros, just replace this 920 // cast with the result. 921 if (MaskedValueIsZero(Res, 922 APInt::getHighBitsSet(DestBitSize, 923 DestBitSize-SrcBitsKept), 924 0, &CI)) 925 return replaceInstUsesWith(CI, Res); 926 927 // We need to emit an AND to clear the high bits. 928 Constant *C = ConstantInt::get(Res->getType(), 929 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 930 return BinaryOperator::CreateAnd(Res, C); 931 } 932 933 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 934 // types and if the sizes are just right we can convert this into a logical 935 // 'and' which will be much cheaper than the pair of casts. 936 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 937 // TODO: Subsume this into EvaluateInDifferentType. 938 939 // Get the sizes of the types involved. We know that the intermediate type 940 // will be smaller than A or C, but don't know the relation between A and C. 941 Value *A = CSrc->getOperand(0); 942 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 943 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 944 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 945 // If we're actually extending zero bits, then if 946 // SrcSize < DstSize: zext(a & mask) 947 // SrcSize == DstSize: a & mask 948 // SrcSize > DstSize: trunc(a) & mask 949 if (SrcSize < DstSize) { 950 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 951 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 952 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask"); 953 return new ZExtInst(And, CI.getType()); 954 } 955 956 if (SrcSize == DstSize) { 957 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 958 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 959 AndValue)); 960 } 961 if (SrcSize > DstSize) { 962 Value *Trunc = Builder->CreateTrunc(A, CI.getType()); 963 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 964 return BinaryOperator::CreateAnd(Trunc, 965 ConstantInt::get(Trunc->getType(), 966 AndValue)); 967 } 968 } 969 970 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 971 return transformZExtICmp(ICI, CI); 972 973 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 974 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 975 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 976 // of the (zext icmp) can be eliminated. If so, immediately perform the 977 // according elimination. 978 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 979 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 980 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 981 (transformZExtICmp(LHS, CI, false) || 982 transformZExtICmp(RHS, CI, false))) { 983 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 984 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName()); 985 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName()); 986 BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast); 987 988 // Perform the elimination. 989 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 990 transformZExtICmp(LHS, *LZExt); 991 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 992 transformZExtICmp(RHS, *RZExt); 993 994 return Or; 995 } 996 } 997 998 // zext(trunc(X) & C) -> (X & zext(C)). 999 Constant *C; 1000 Value *X; 1001 if (SrcI && 1002 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1003 X->getType() == CI.getType()) 1004 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1005 1006 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1007 Value *And; 1008 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1009 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1010 X->getType() == CI.getType()) { 1011 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1012 return BinaryOperator::CreateXor(Builder->CreateAnd(X, ZC), ZC); 1013 } 1014 1015 return nullptr; 1016 } 1017 1018 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1019 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) { 1020 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1021 ICmpInst::Predicate Pred = ICI->getPredicate(); 1022 1023 // Don't bother if Op1 isn't of vector or integer type. 1024 if (!Op1->getType()->isIntOrIntVectorTy()) 1025 return nullptr; 1026 1027 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 1028 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1029 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1030 if ((Pred == ICmpInst::ICMP_SLT && Op1C->isNullValue()) || 1031 (Pred == ICmpInst::ICMP_SGT && Op1C->isAllOnesValue())) { 1032 1033 Value *Sh = ConstantInt::get(Op0->getType(), 1034 Op0->getType()->getScalarSizeInBits()-1); 1035 Value *In = Builder->CreateAShr(Op0, Sh, Op0->getName()+".lobit"); 1036 if (In->getType() != CI.getType()) 1037 In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/); 1038 1039 if (Pred == ICmpInst::ICMP_SGT) 1040 In = Builder->CreateNot(In, In->getName()+".not"); 1041 return replaceInstUsesWith(CI, In); 1042 } 1043 } 1044 1045 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1046 // If we know that only one bit of the LHS of the icmp can be set and we 1047 // have an equality comparison with zero or a power of 2, we can transform 1048 // the icmp and sext into bitwise/integer operations. 1049 if (ICI->hasOneUse() && 1050 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1051 unsigned BitWidth = Op1C->getType()->getBitWidth(); 1052 KnownBits Known(BitWidth); 1053 computeKnownBits(Op0, Known, 0, &CI); 1054 1055 APInt KnownZeroMask(~Known.Zero); 1056 if (KnownZeroMask.isPowerOf2()) { 1057 Value *In = ICI->getOperand(0); 1058 1059 // If the icmp tests for a known zero bit we can constant fold it. 1060 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1061 Value *V = Pred == ICmpInst::ICMP_NE ? 1062 ConstantInt::getAllOnesValue(CI.getType()) : 1063 ConstantInt::getNullValue(CI.getType()); 1064 return replaceInstUsesWith(CI, V); 1065 } 1066 1067 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1068 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1069 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1070 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1071 // Perform a right shift to place the desired bit in the LSB. 1072 if (ShiftAmt) 1073 In = Builder->CreateLShr(In, 1074 ConstantInt::get(In->getType(), ShiftAmt)); 1075 1076 // At this point "In" is either 1 or 0. Subtract 1 to turn 1077 // {1, 0} -> {0, -1}. 1078 In = Builder->CreateAdd(In, 1079 ConstantInt::getAllOnesValue(In->getType()), 1080 "sext"); 1081 } else { 1082 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1083 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1084 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1085 // Perform a left shift to place the desired bit in the MSB. 1086 if (ShiftAmt) 1087 In = Builder->CreateShl(In, 1088 ConstantInt::get(In->getType(), ShiftAmt)); 1089 1090 // Distribute the bit over the whole bit width. 1091 In = Builder->CreateAShr(In, ConstantInt::get(In->getType(), 1092 BitWidth - 1), "sext"); 1093 } 1094 1095 if (CI.getType() == In->getType()) 1096 return replaceInstUsesWith(CI, In); 1097 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1098 } 1099 } 1100 } 1101 1102 return nullptr; 1103 } 1104 1105 /// Return true if we can take the specified value and return it as type Ty 1106 /// without inserting any new casts and without changing the value of the common 1107 /// low bits. This is used by code that tries to promote integer operations to 1108 /// a wider types will allow us to eliminate the extension. 1109 /// 1110 /// This function works on both vectors and scalars. 1111 /// 1112 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1113 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1114 "Can't sign extend type to a smaller type"); 1115 // If this is a constant, it can be trivially promoted. 1116 if (isa<Constant>(V)) 1117 return true; 1118 1119 Instruction *I = dyn_cast<Instruction>(V); 1120 if (!I) return false; 1121 1122 // If this is a truncate from the dest type, we can trivially eliminate it. 1123 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty) 1124 return true; 1125 1126 // We can't extend or shrink something that has multiple uses: doing so would 1127 // require duplicating the instruction in general, which isn't profitable. 1128 if (!I->hasOneUse()) return false; 1129 1130 switch (I->getOpcode()) { 1131 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1132 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1133 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1134 return true; 1135 case Instruction::And: 1136 case Instruction::Or: 1137 case Instruction::Xor: 1138 case Instruction::Add: 1139 case Instruction::Sub: 1140 case Instruction::Mul: 1141 // These operators can all arbitrarily be extended if their inputs can. 1142 return canEvaluateSExtd(I->getOperand(0), Ty) && 1143 canEvaluateSExtd(I->getOperand(1), Ty); 1144 1145 //case Instruction::Shl: TODO 1146 //case Instruction::LShr: TODO 1147 1148 case Instruction::Select: 1149 return canEvaluateSExtd(I->getOperand(1), Ty) && 1150 canEvaluateSExtd(I->getOperand(2), Ty); 1151 1152 case Instruction::PHI: { 1153 // We can change a phi if we can change all operands. Note that we never 1154 // get into trouble with cyclic PHIs here because we only consider 1155 // instructions with a single use. 1156 PHINode *PN = cast<PHINode>(I); 1157 for (Value *IncValue : PN->incoming_values()) 1158 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1159 return true; 1160 } 1161 default: 1162 // TODO: Can handle more cases here. 1163 break; 1164 } 1165 1166 return false; 1167 } 1168 1169 Instruction *InstCombiner::visitSExt(SExtInst &CI) { 1170 // If this sign extend is only used by a truncate, let the truncate be 1171 // eliminated before we try to optimize this sext. 1172 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1173 return nullptr; 1174 1175 if (Instruction *I = commonCastTransforms(CI)) 1176 return I; 1177 1178 Value *Src = CI.getOperand(0); 1179 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1180 1181 // If we know that the value being extended is positive, we can use a zext 1182 // instead. 1183 bool KnownZero, KnownOne; 1184 ComputeSignBit(Src, KnownZero, KnownOne, 0, &CI); 1185 if (KnownZero) { 1186 Value *ZExt = Builder->CreateZExt(Src, DestTy); 1187 return replaceInstUsesWith(CI, ZExt); 1188 } 1189 1190 // Attempt to extend the entire input expression tree to the destination 1191 // type. Only do this if the dest type is a simple type, don't convert the 1192 // expression tree to something weird like i93 unless the source is also 1193 // strange. 1194 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 1195 canEvaluateSExtd(Src, DestTy)) { 1196 // Okay, we can transform this! Insert the new expression now. 1197 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1198 " to avoid sign extend: " << CI << '\n'); 1199 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1200 assert(Res->getType() == DestTy); 1201 1202 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1203 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1204 1205 // If the high bits are already filled with sign bit, just replace this 1206 // cast with the result. 1207 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1208 return replaceInstUsesWith(CI, Res); 1209 1210 // We need to emit a shl + ashr to do the sign extend. 1211 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1212 return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"), 1213 ShAmt); 1214 } 1215 1216 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1217 // into shifts. 1218 Value *X; 1219 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1220 // sext(trunc(X)) --> ashr(shl(X, C), C) 1221 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1222 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1223 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1224 return BinaryOperator::CreateAShr(Builder->CreateShl(X, ShAmt), ShAmt); 1225 } 1226 1227 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1228 return transformSExtICmp(ICI, CI); 1229 1230 // If the input is a shl/ashr pair of a same constant, then this is a sign 1231 // extension from a smaller value. If we could trust arbitrary bitwidth 1232 // integers, we could turn this into a truncate to the smaller bit and then 1233 // use a sext for the whole extension. Since we don't, look deeper and check 1234 // for a truncate. If the source and dest are the same type, eliminate the 1235 // trunc and extend and just do shifts. For example, turn: 1236 // %a = trunc i32 %i to i8 1237 // %b = shl i8 %a, 6 1238 // %c = ashr i8 %b, 6 1239 // %d = sext i8 %c to i32 1240 // into: 1241 // %a = shl i32 %i, 30 1242 // %d = ashr i32 %a, 30 1243 Value *A = nullptr; 1244 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1245 ConstantInt *BA = nullptr, *CA = nullptr; 1246 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)), 1247 m_ConstantInt(CA))) && 1248 BA == CA && A->getType() == CI.getType()) { 1249 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1250 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1251 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; 1252 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); 1253 A = Builder->CreateShl(A, ShAmtV, CI.getName()); 1254 return BinaryOperator::CreateAShr(A, ShAmtV); 1255 } 1256 1257 return nullptr; 1258 } 1259 1260 1261 /// Return a Constant* for the specified floating-point constant if it fits 1262 /// in the specified FP type without changing its value. 1263 static Constant *fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1264 bool losesInfo; 1265 APFloat F = CFP->getValueAPF(); 1266 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1267 if (!losesInfo) 1268 return ConstantFP::get(CFP->getContext(), F); 1269 return nullptr; 1270 } 1271 1272 /// Look through floating-point extensions until we get the source value. 1273 static Value *lookThroughFPExtensions(Value *V) { 1274 while (auto *FPExt = dyn_cast<FPExtInst>(V)) 1275 V = FPExt->getOperand(0); 1276 1277 // If this value is a constant, return the constant in the smallest FP type 1278 // that can accurately represent it. This allows us to turn 1279 // (float)((double)X+2.0) into x+2.0f. 1280 if (auto *CFP = dyn_cast<ConstantFP>(V)) { 1281 if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext())) 1282 return V; // No constant folding of this. 1283 // See if the value can be truncated to half and then reextended. 1284 if (Value *V = fitsInFPType(CFP, APFloat::IEEEhalf())) 1285 return V; 1286 // See if the value can be truncated to float and then reextended. 1287 if (Value *V = fitsInFPType(CFP, APFloat::IEEEsingle())) 1288 return V; 1289 if (CFP->getType()->isDoubleTy()) 1290 return V; // Won't shrink. 1291 if (Value *V = fitsInFPType(CFP, APFloat::IEEEdouble())) 1292 return V; 1293 // Don't try to shrink to various long double types. 1294 } 1295 1296 return V; 1297 } 1298 1299 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) { 1300 if (Instruction *I = commonCastTransforms(CI)) 1301 return I; 1302 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1303 // simplify this expression to avoid one or more of the trunc/extend 1304 // operations if we can do so without changing the numerical results. 1305 // 1306 // The exact manner in which the widths of the operands interact to limit 1307 // what we can and cannot do safely varies from operation to operation, and 1308 // is explained below in the various case statements. 1309 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0)); 1310 if (OpI && OpI->hasOneUse()) { 1311 Value *LHSOrig = lookThroughFPExtensions(OpI->getOperand(0)); 1312 Value *RHSOrig = lookThroughFPExtensions(OpI->getOperand(1)); 1313 unsigned OpWidth = OpI->getType()->getFPMantissaWidth(); 1314 unsigned LHSWidth = LHSOrig->getType()->getFPMantissaWidth(); 1315 unsigned RHSWidth = RHSOrig->getType()->getFPMantissaWidth(); 1316 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1317 unsigned DstWidth = CI.getType()->getFPMantissaWidth(); 1318 switch (OpI->getOpcode()) { 1319 default: break; 1320 case Instruction::FAdd: 1321 case Instruction::FSub: 1322 // For addition and subtraction, the infinitely precise result can 1323 // essentially be arbitrarily wide; proving that double rounding 1324 // will not occur because the result of OpI is exact (as we will for 1325 // FMul, for example) is hopeless. However, we *can* nonetheless 1326 // frequently know that double rounding cannot occur (or that it is 1327 // innocuous) by taking advantage of the specific structure of 1328 // infinitely-precise results that admit double rounding. 1329 // 1330 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1331 // to represent both sources, we can guarantee that the double 1332 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1333 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1334 // for proof of this fact). 1335 // 1336 // Note: Figueroa does not consider the case where DstFormat != 1337 // SrcFormat. It's possible (likely even!) that this analysis 1338 // could be tightened for those cases, but they are rare (the main 1339 // case of interest here is (float)((double)float + float)). 1340 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1341 if (LHSOrig->getType() != CI.getType()) 1342 LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); 1343 if (RHSOrig->getType() != CI.getType()) 1344 RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); 1345 Instruction *RI = 1346 BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig); 1347 RI->copyFastMathFlags(OpI); 1348 return RI; 1349 } 1350 break; 1351 case Instruction::FMul: 1352 // For multiplication, the infinitely precise result has at most 1353 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1354 // that such a value can be exactly represented, then no double 1355 // rounding can possibly occur; we can safely perform the operation 1356 // in the destination format if it can represent both sources. 1357 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1358 if (LHSOrig->getType() != CI.getType()) 1359 LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); 1360 if (RHSOrig->getType() != CI.getType()) 1361 RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); 1362 Instruction *RI = 1363 BinaryOperator::CreateFMul(LHSOrig, RHSOrig); 1364 RI->copyFastMathFlags(OpI); 1365 return RI; 1366 } 1367 break; 1368 case Instruction::FDiv: 1369 // For division, we use again use the bound from Figueroa's 1370 // dissertation. I am entirely certain that this bound can be 1371 // tightened in the unbalanced operand case by an analysis based on 1372 // the diophantine rational approximation bound, but the well-known 1373 // condition used here is a good conservative first pass. 1374 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1375 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1376 if (LHSOrig->getType() != CI.getType()) 1377 LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); 1378 if (RHSOrig->getType() != CI.getType()) 1379 RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); 1380 Instruction *RI = 1381 BinaryOperator::CreateFDiv(LHSOrig, RHSOrig); 1382 RI->copyFastMathFlags(OpI); 1383 return RI; 1384 } 1385 break; 1386 case Instruction::FRem: 1387 // Remainder is straightforward. Remainder is always exact, so the 1388 // type of OpI doesn't enter into things at all. We simply evaluate 1389 // in whichever source type is larger, then convert to the 1390 // destination type. 1391 if (SrcWidth == OpWidth) 1392 break; 1393 if (LHSWidth < SrcWidth) 1394 LHSOrig = Builder->CreateFPExt(LHSOrig, RHSOrig->getType()); 1395 else if (RHSWidth <= SrcWidth) 1396 RHSOrig = Builder->CreateFPExt(RHSOrig, LHSOrig->getType()); 1397 if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) { 1398 Value *ExactResult = Builder->CreateFRem(LHSOrig, RHSOrig); 1399 if (Instruction *RI = dyn_cast<Instruction>(ExactResult)) 1400 RI->copyFastMathFlags(OpI); 1401 return CastInst::CreateFPCast(ExactResult, CI.getType()); 1402 } 1403 } 1404 1405 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1406 if (BinaryOperator::isFNeg(OpI)) { 1407 Value *InnerTrunc = Builder->CreateFPTrunc(OpI->getOperand(1), 1408 CI.getType()); 1409 Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc); 1410 RI->copyFastMathFlags(OpI); 1411 return RI; 1412 } 1413 } 1414 1415 // (fptrunc (select cond, R1, Cst)) --> 1416 // (select cond, (fptrunc R1), (fptrunc Cst)) 1417 // 1418 // - but only if this isn't part of a min/max operation, else we'll 1419 // ruin min/max canonical form which is to have the select and 1420 // compare's operands be of the same type with no casts to look through. 1421 Value *LHS, *RHS; 1422 SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0)); 1423 if (SI && 1424 (isa<ConstantFP>(SI->getOperand(1)) || 1425 isa<ConstantFP>(SI->getOperand(2))) && 1426 matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) { 1427 Value *LHSTrunc = Builder->CreateFPTrunc(SI->getOperand(1), 1428 CI.getType()); 1429 Value *RHSTrunc = Builder->CreateFPTrunc(SI->getOperand(2), 1430 CI.getType()); 1431 return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc); 1432 } 1433 1434 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI.getOperand(0)); 1435 if (II) { 1436 switch (II->getIntrinsicID()) { 1437 default: break; 1438 case Intrinsic::fabs: 1439 case Intrinsic::ceil: 1440 case Intrinsic::floor: 1441 case Intrinsic::rint: 1442 case Intrinsic::round: 1443 case Intrinsic::nearbyint: 1444 case Intrinsic::trunc: { 1445 Value *Src = II->getArgOperand(0); 1446 if (!Src->hasOneUse()) 1447 break; 1448 1449 // Except for fabs, this transformation requires the input of the unary FP 1450 // operation to be itself an fpext from the type to which we're 1451 // truncating. 1452 if (II->getIntrinsicID() != Intrinsic::fabs) { 1453 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1454 if (!FPExtSrc || FPExtSrc->getOperand(0)->getType() != CI.getType()) 1455 break; 1456 } 1457 1458 // Do unary FP operation on smaller type. 1459 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1460 Value *InnerTrunc = Builder->CreateFPTrunc(Src, CI.getType()); 1461 Type *IntrinsicType[] = { CI.getType() }; 1462 Function *Overload = Intrinsic::getDeclaration( 1463 CI.getModule(), II->getIntrinsicID(), IntrinsicType); 1464 1465 SmallVector<OperandBundleDef, 1> OpBundles; 1466 II->getOperandBundlesAsDefs(OpBundles); 1467 1468 Value *Args[] = { InnerTrunc }; 1469 CallInst *NewCI = CallInst::Create(Overload, Args, 1470 OpBundles, II->getName()); 1471 NewCI->copyFastMathFlags(II); 1472 return NewCI; 1473 } 1474 } 1475 } 1476 1477 if (Instruction *I = shrinkInsertElt(CI, *Builder)) 1478 return I; 1479 1480 return nullptr; 1481 } 1482 1483 Instruction *InstCombiner::visitFPExt(CastInst &CI) { 1484 return commonCastTransforms(CI); 1485 } 1486 1487 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1488 // This is safe if the intermediate type has enough bits in its mantissa to 1489 // accurately represent all values of X. For example, this won't work with 1490 // i64 -> float -> i64. 1491 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) { 1492 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1493 return nullptr; 1494 Instruction *OpI = cast<Instruction>(FI.getOperand(0)); 1495 1496 Value *SrcI = OpI->getOperand(0); 1497 Type *FITy = FI.getType(); 1498 Type *OpITy = OpI->getType(); 1499 Type *SrcTy = SrcI->getType(); 1500 bool IsInputSigned = isa<SIToFPInst>(OpI); 1501 bool IsOutputSigned = isa<FPToSIInst>(FI); 1502 1503 // We can safely assume the conversion won't overflow the output range, 1504 // because (for example) (uint8_t)18293.f is undefined behavior. 1505 1506 // Since we can assume the conversion won't overflow, our decision as to 1507 // whether the input will fit in the float should depend on the minimum 1508 // of the input range and output range. 1509 1510 // This means this is also safe for a signed input and unsigned output, since 1511 // a negative input would lead to undefined behavior. 1512 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned; 1513 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned; 1514 int ActualSize = std::min(InputSize, OutputSize); 1515 1516 if (ActualSize <= OpITy->getFPMantissaWidth()) { 1517 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) { 1518 if (IsInputSigned && IsOutputSigned) 1519 return new SExtInst(SrcI, FITy); 1520 return new ZExtInst(SrcI, FITy); 1521 } 1522 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits()) 1523 return new TruncInst(SrcI, FITy); 1524 if (SrcTy == FITy) 1525 return replaceInstUsesWith(FI, SrcI); 1526 return new BitCastInst(SrcI, FITy); 1527 } 1528 return nullptr; 1529 } 1530 1531 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) { 1532 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1533 if (!OpI) 1534 return commonCastTransforms(FI); 1535 1536 if (Instruction *I = FoldItoFPtoI(FI)) 1537 return I; 1538 1539 return commonCastTransforms(FI); 1540 } 1541 1542 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) { 1543 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1544 if (!OpI) 1545 return commonCastTransforms(FI); 1546 1547 if (Instruction *I = FoldItoFPtoI(FI)) 1548 return I; 1549 1550 return commonCastTransforms(FI); 1551 } 1552 1553 Instruction *InstCombiner::visitUIToFP(CastInst &CI) { 1554 return commonCastTransforms(CI); 1555 } 1556 1557 Instruction *InstCombiner::visitSIToFP(CastInst &CI) { 1558 return commonCastTransforms(CI); 1559 } 1560 1561 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { 1562 // If the source integer type is not the intptr_t type for this target, do a 1563 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1564 // cast to be exposed to other transforms. 1565 unsigned AS = CI.getAddressSpace(); 1566 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1567 DL.getPointerSizeInBits(AS)) { 1568 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1569 if (CI.getType()->isVectorTy()) // Handle vectors of pointers. 1570 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); 1571 1572 Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty); 1573 return new IntToPtrInst(P, CI.getType()); 1574 } 1575 1576 if (Instruction *I = commonCastTransforms(CI)) 1577 return I; 1578 1579 return nullptr; 1580 } 1581 1582 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint) 1583 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { 1584 Value *Src = CI.getOperand(0); 1585 1586 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1587 // If casting the result of a getelementptr instruction with no offset, turn 1588 // this into a cast of the original pointer! 1589 if (GEP->hasAllZeroIndices() && 1590 // If CI is an addrspacecast and GEP changes the poiner type, merging 1591 // GEP into CI would undo canonicalizing addrspacecast with different 1592 // pointer types, causing infinite loops. 1593 (!isa<AddrSpaceCastInst>(CI) || 1594 GEP->getType() == GEP->getPointerOperandType())) { 1595 // Changing the cast operand is usually not a good idea but it is safe 1596 // here because the pointer operand is being replaced with another 1597 // pointer operand so the opcode doesn't need to change. 1598 Worklist.Add(GEP); 1599 CI.setOperand(0, GEP->getOperand(0)); 1600 return &CI; 1601 } 1602 } 1603 1604 return commonCastTransforms(CI); 1605 } 1606 1607 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { 1608 // If the destination integer type is not the intptr_t type for this target, 1609 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1610 // to be exposed to other transforms. 1611 1612 Type *Ty = CI.getType(); 1613 unsigned AS = CI.getPointerAddressSpace(); 1614 1615 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) 1616 return commonPointerCastTransforms(CI); 1617 1618 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); 1619 if (Ty->isVectorTy()) // Handle vectors of pointers. 1620 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); 1621 1622 Value *P = Builder->CreatePtrToInt(CI.getOperand(0), PtrTy); 1623 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1624 } 1625 1626 /// This input value (which is known to have vector type) is being zero extended 1627 /// or truncated to the specified vector type. 1628 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 1629 /// 1630 /// The source and destination vector types may have different element types. 1631 static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy, 1632 InstCombiner &IC) { 1633 // We can only do this optimization if the output is a multiple of the input 1634 // element size, or the input is a multiple of the output element size. 1635 // Convert the input type to have the same element type as the output. 1636 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 1637 1638 if (SrcTy->getElementType() != DestTy->getElementType()) { 1639 // The input types don't need to be identical, but for now they must be the 1640 // same size. There is no specific reason we couldn't handle things like 1641 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 1642 // there yet. 1643 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 1644 DestTy->getElementType()->getPrimitiveSizeInBits()) 1645 return nullptr; 1646 1647 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); 1648 InVal = IC.Builder->CreateBitCast(InVal, SrcTy); 1649 } 1650 1651 // Now that the element types match, get the shuffle mask and RHS of the 1652 // shuffle to use, which depends on whether we're increasing or decreasing the 1653 // size of the input. 1654 SmallVector<uint32_t, 16> ShuffleMask; 1655 Value *V2; 1656 1657 if (SrcTy->getNumElements() > DestTy->getNumElements()) { 1658 // If we're shrinking the number of elements, just shuffle in the low 1659 // elements from the input and use undef as the second shuffle input. 1660 V2 = UndefValue::get(SrcTy); 1661 for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i) 1662 ShuffleMask.push_back(i); 1663 1664 } else { 1665 // If we're increasing the number of elements, shuffle in all of the 1666 // elements from InVal and fill the rest of the result elements with zeros 1667 // from a constant zero. 1668 V2 = Constant::getNullValue(SrcTy); 1669 unsigned SrcElts = SrcTy->getNumElements(); 1670 for (unsigned i = 0, e = SrcElts; i != e; ++i) 1671 ShuffleMask.push_back(i); 1672 1673 // The excess elements reference the first element of the zero input. 1674 for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i) 1675 ShuffleMask.push_back(SrcElts); 1676 } 1677 1678 return new ShuffleVectorInst(InVal, V2, 1679 ConstantDataVector::get(V2->getContext(), 1680 ShuffleMask)); 1681 } 1682 1683 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 1684 return Value % Ty->getPrimitiveSizeInBits() == 0; 1685 } 1686 1687 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 1688 return Value / Ty->getPrimitiveSizeInBits(); 1689 } 1690 1691 /// V is a value which is inserted into a vector of VecEltTy. 1692 /// Look through the value to see if we can decompose it into 1693 /// insertions into the vector. See the example in the comment for 1694 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 1695 /// The type of V is always a non-zero multiple of VecEltTy's size. 1696 /// Shift is the number of bits between the lsb of V and the lsb of 1697 /// the vector. 1698 /// 1699 /// This returns false if the pattern can't be matched or true if it can, 1700 /// filling in Elements with the elements found here. 1701 static bool collectInsertionElements(Value *V, unsigned Shift, 1702 SmallVectorImpl<Value *> &Elements, 1703 Type *VecEltTy, bool isBigEndian) { 1704 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 1705 "Shift should be a multiple of the element type size"); 1706 1707 // Undef values never contribute useful bits to the result. 1708 if (isa<UndefValue>(V)) return true; 1709 1710 // If we got down to a value of the right type, we win, try inserting into the 1711 // right element. 1712 if (V->getType() == VecEltTy) { 1713 // Inserting null doesn't actually insert any elements. 1714 if (Constant *C = dyn_cast<Constant>(V)) 1715 if (C->isNullValue()) 1716 return true; 1717 1718 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 1719 if (isBigEndian) 1720 ElementIndex = Elements.size() - ElementIndex - 1; 1721 1722 // Fail if multiple elements are inserted into this slot. 1723 if (Elements[ElementIndex]) 1724 return false; 1725 1726 Elements[ElementIndex] = V; 1727 return true; 1728 } 1729 1730 if (Constant *C = dyn_cast<Constant>(V)) { 1731 // Figure out the # elements this provides, and bitcast it or slice it up 1732 // as required. 1733 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 1734 VecEltTy); 1735 // If the constant is the size of a vector element, we just need to bitcast 1736 // it to the right type so it gets properly inserted. 1737 if (NumElts == 1) 1738 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 1739 Shift, Elements, VecEltTy, isBigEndian); 1740 1741 // Okay, this is a constant that covers multiple elements. Slice it up into 1742 // pieces and insert each element-sized piece into the vector. 1743 if (!isa<IntegerType>(C->getType())) 1744 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 1745 C->getType()->getPrimitiveSizeInBits())); 1746 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 1747 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 1748 1749 for (unsigned i = 0; i != NumElts; ++i) { 1750 unsigned ShiftI = Shift+i*ElementSize; 1751 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 1752 ShiftI)); 1753 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 1754 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 1755 isBigEndian)) 1756 return false; 1757 } 1758 return true; 1759 } 1760 1761 if (!V->hasOneUse()) return false; 1762 1763 Instruction *I = dyn_cast<Instruction>(V); 1764 if (!I) return false; 1765 switch (I->getOpcode()) { 1766 default: return false; // Unhandled case. 1767 case Instruction::BitCast: 1768 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1769 isBigEndian); 1770 case Instruction::ZExt: 1771 if (!isMultipleOfTypeSize( 1772 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 1773 VecEltTy)) 1774 return false; 1775 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1776 isBigEndian); 1777 case Instruction::Or: 1778 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1779 isBigEndian) && 1780 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 1781 isBigEndian); 1782 case Instruction::Shl: { 1783 // Must be shifting by a constant that is a multiple of the element size. 1784 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 1785 if (!CI) return false; 1786 Shift += CI->getZExtValue(); 1787 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 1788 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 1789 isBigEndian); 1790 } 1791 1792 } 1793 } 1794 1795 1796 /// If the input is an 'or' instruction, we may be doing shifts and ors to 1797 /// assemble the elements of the vector manually. 1798 /// Try to rip the code out and replace it with insertelements. This is to 1799 /// optimize code like this: 1800 /// 1801 /// %tmp37 = bitcast float %inc to i32 1802 /// %tmp38 = zext i32 %tmp37 to i64 1803 /// %tmp31 = bitcast float %inc5 to i32 1804 /// %tmp32 = zext i32 %tmp31 to i64 1805 /// %tmp33 = shl i64 %tmp32, 32 1806 /// %ins35 = or i64 %tmp33, %tmp38 1807 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 1808 /// 1809 /// Into two insertelements that do "buildvector{%inc, %inc5}". 1810 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 1811 InstCombiner &IC) { 1812 VectorType *DestVecTy = cast<VectorType>(CI.getType()); 1813 Value *IntInput = CI.getOperand(0); 1814 1815 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 1816 if (!collectInsertionElements(IntInput, 0, Elements, 1817 DestVecTy->getElementType(), 1818 IC.getDataLayout().isBigEndian())) 1819 return nullptr; 1820 1821 // If we succeeded, we know that all of the element are specified by Elements 1822 // or are zero if Elements has a null entry. Recast this as a set of 1823 // insertions. 1824 Value *Result = Constant::getNullValue(CI.getType()); 1825 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 1826 if (!Elements[i]) continue; // Unset element. 1827 1828 Result = IC.Builder->CreateInsertElement(Result, Elements[i], 1829 IC.Builder->getInt32(i)); 1830 } 1831 1832 return Result; 1833 } 1834 1835 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 1836 /// vector followed by extract element. The backend tends to handle bitcasts of 1837 /// vectors better than bitcasts of scalars because vector registers are 1838 /// usually not type-specific like scalar integer or scalar floating-point. 1839 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 1840 InstCombiner &IC, 1841 const DataLayout &DL) { 1842 // TODO: Create and use a pattern matcher for ExtractElementInst. 1843 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 1844 if (!ExtElt || !ExtElt->hasOneUse()) 1845 return nullptr; 1846 1847 // The bitcast must be to a vectorizable type, otherwise we can't make a new 1848 // type to extract from. 1849 Type *DestType = BitCast.getType(); 1850 if (!VectorType::isValidElementType(DestType)) 1851 return nullptr; 1852 1853 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); 1854 auto *NewVecType = VectorType::get(DestType, NumElts); 1855 auto *NewBC = IC.Builder->CreateBitCast(ExtElt->getVectorOperand(), 1856 NewVecType, "bc"); 1857 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 1858 } 1859 1860 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 1861 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 1862 InstCombiner::BuilderTy &Builder) { 1863 Type *DestTy = BitCast.getType(); 1864 BinaryOperator *BO; 1865 if (!DestTy->getScalarType()->isIntegerTy() || 1866 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 1867 !BO->isBitwiseLogicOp()) 1868 return nullptr; 1869 1870 // FIXME: This transform is restricted to vector types to avoid backend 1871 // problems caused by creating potentially illegal operations. If a fix-up is 1872 // added to handle that situation, we can remove this check. 1873 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 1874 return nullptr; 1875 1876 Value *X; 1877 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 1878 X->getType() == DestTy && !isa<Constant>(X)) { 1879 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 1880 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 1881 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 1882 } 1883 1884 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 1885 X->getType() == DestTy && !isa<Constant>(X)) { 1886 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 1887 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 1888 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 1889 } 1890 1891 return nullptr; 1892 } 1893 1894 /// Change the type of a select if we can eliminate a bitcast. 1895 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 1896 InstCombiner::BuilderTy &Builder) { 1897 Value *Cond, *TVal, *FVal; 1898 if (!match(BitCast.getOperand(0), 1899 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 1900 return nullptr; 1901 1902 // A vector select must maintain the same number of elements in its operands. 1903 Type *CondTy = Cond->getType(); 1904 Type *DestTy = BitCast.getType(); 1905 if (CondTy->isVectorTy()) { 1906 if (!DestTy->isVectorTy()) 1907 return nullptr; 1908 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) 1909 return nullptr; 1910 } 1911 1912 // FIXME: This transform is restricted from changing the select between 1913 // scalars and vectors to avoid backend problems caused by creating 1914 // potentially illegal operations. If a fix-up is added to handle that 1915 // situation, we can remove this check. 1916 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 1917 return nullptr; 1918 1919 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 1920 Value *X; 1921 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 1922 !isa<Constant>(X)) { 1923 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 1924 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 1925 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 1926 } 1927 1928 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 1929 !isa<Constant>(X)) { 1930 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 1931 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 1932 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 1933 } 1934 1935 return nullptr; 1936 } 1937 1938 /// Check if all users of CI are StoreInsts. 1939 static bool hasStoreUsersOnly(CastInst &CI) { 1940 for (User *U : CI.users()) { 1941 if (!isa<StoreInst>(U)) 1942 return false; 1943 } 1944 return true; 1945 } 1946 1947 /// This function handles following case 1948 /// 1949 /// A -> B cast 1950 /// PHI 1951 /// B -> A cast 1952 /// 1953 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 1954 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 1955 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) { 1956 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 1957 if (hasStoreUsersOnly(CI)) 1958 return nullptr; 1959 1960 Value *Src = CI.getOperand(0); 1961 Type *SrcTy = Src->getType(); // Type B 1962 Type *DestTy = CI.getType(); // Type A 1963 1964 SmallVector<PHINode *, 4> PhiWorklist; 1965 SmallSetVector<PHINode *, 4> OldPhiNodes; 1966 1967 // Find all of the A->B casts and PHI nodes. 1968 // We need to inpect all related PHI nodes, but PHIs can be cyclic, so 1969 // OldPhiNodes is used to track all known PHI nodes, before adding a new 1970 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 1971 PhiWorklist.push_back(PN); 1972 OldPhiNodes.insert(PN); 1973 while (!PhiWorklist.empty()) { 1974 auto *OldPN = PhiWorklist.pop_back_val(); 1975 for (Value *IncValue : OldPN->incoming_values()) { 1976 if (isa<Constant>(IncValue)) 1977 continue; 1978 1979 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 1980 // If there is a sequence of one or more load instructions, each loaded 1981 // value is used as address of later load instruction, bitcast is 1982 // necessary to change the value type, don't optimize it. For 1983 // simplicity we give up if the load address comes from another load. 1984 Value *Addr = LI->getOperand(0); 1985 if (Addr == &CI || isa<LoadInst>(Addr)) 1986 return nullptr; 1987 if (LI->hasOneUse() && LI->isSimple()) 1988 continue; 1989 // If a LoadInst has more than one use, changing the type of loaded 1990 // value may create another bitcast. 1991 return nullptr; 1992 } 1993 1994 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 1995 if (OldPhiNodes.insert(PNode)) 1996 PhiWorklist.push_back(PNode); 1997 continue; 1998 } 1999 2000 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2001 // We can't handle other instructions. 2002 if (!BCI) 2003 return nullptr; 2004 2005 // Verify it's a A->B cast. 2006 Type *TyA = BCI->getOperand(0)->getType(); 2007 Type *TyB = BCI->getType(); 2008 if (TyA != DestTy || TyB != SrcTy) 2009 return nullptr; 2010 } 2011 } 2012 2013 // For each old PHI node, create a corresponding new PHI node with a type A. 2014 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2015 for (auto *OldPN : OldPhiNodes) { 2016 Builder->SetInsertPoint(OldPN); 2017 PHINode *NewPN = Builder->CreatePHI(DestTy, OldPN->getNumOperands()); 2018 NewPNodes[OldPN] = NewPN; 2019 } 2020 2021 // Fill in the operands of new PHI nodes. 2022 for (auto *OldPN : OldPhiNodes) { 2023 PHINode *NewPN = NewPNodes[OldPN]; 2024 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2025 Value *V = OldPN->getOperand(j); 2026 Value *NewV = nullptr; 2027 if (auto *C = dyn_cast<Constant>(V)) { 2028 NewV = ConstantExpr::getBitCast(C, DestTy); 2029 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2030 Builder->SetInsertPoint(LI->getNextNode()); 2031 NewV = Builder->CreateBitCast(LI, DestTy); 2032 Worklist.Add(LI); 2033 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2034 NewV = BCI->getOperand(0); 2035 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2036 NewV = NewPNodes[PrevPN]; 2037 } 2038 assert(NewV); 2039 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2040 } 2041 } 2042 2043 // If there is a store with type B, change it to type A. 2044 for (User *U : PN->users()) { 2045 auto *SI = dyn_cast<StoreInst>(U); 2046 if (SI && SI->isSimple() && SI->getOperand(0) == PN) { 2047 Builder->SetInsertPoint(SI); 2048 auto *NewBC = 2049 cast<BitCastInst>(Builder->CreateBitCast(NewPNodes[PN], SrcTy)); 2050 SI->setOperand(0, NewBC); 2051 Worklist.Add(SI); 2052 assert(hasStoreUsersOnly(*NewBC)); 2053 } 2054 } 2055 2056 return replaceInstUsesWith(CI, NewPNodes[PN]); 2057 } 2058 2059 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { 2060 // If the operands are integer typed then apply the integer transforms, 2061 // otherwise just apply the common ones. 2062 Value *Src = CI.getOperand(0); 2063 Type *SrcTy = Src->getType(); 2064 Type *DestTy = CI.getType(); 2065 2066 // Get rid of casts from one type to the same type. These are useless and can 2067 // be replaced by the operand. 2068 if (DestTy == Src->getType()) 2069 return replaceInstUsesWith(CI, Src); 2070 2071 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) { 2072 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2073 Type *DstElTy = DstPTy->getElementType(); 2074 Type *SrcElTy = SrcPTy->getElementType(); 2075 2076 // If we are casting a alloca to a pointer to a type of the same 2077 // size, rewrite the allocation instruction to allocate the "right" type. 2078 // There is no need to modify malloc calls because it is their bitcast that 2079 // needs to be cleaned up. 2080 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2081 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2082 return V; 2083 2084 // When the type pointed to is not sized the cast cannot be 2085 // turned into a gep. 2086 Type *PointeeType = 2087 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2088 if (!PointeeType->isSized()) 2089 return nullptr; 2090 2091 // If the source and destination are pointers, and this cast is equivalent 2092 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2093 // This can enhance SROA and other transforms that want type-safe pointers. 2094 unsigned NumZeros = 0; 2095 while (SrcElTy != DstElTy && 2096 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() && 2097 SrcElTy->getNumContainedTypes() /* not "{}" */) { 2098 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U); 2099 ++NumZeros; 2100 } 2101 2102 // If we found a path from the src to dest, create the getelementptr now. 2103 if (SrcElTy == DstElTy) { 2104 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder->getInt32(0)); 2105 return GetElementPtrInst::CreateInBounds(Src, Idxs); 2106 } 2107 } 2108 2109 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { 2110 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { 2111 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType()); 2112 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2113 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2114 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) 2115 } 2116 2117 if (isa<IntegerType>(SrcTy)) { 2118 // If this is a cast from an integer to vector, check to see if the input 2119 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2120 // the casts with a shuffle and (potentially) a bitcast. 2121 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2122 CastInst *SrcCast = cast<CastInst>(Src); 2123 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2124 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2125 if (Instruction *I = optimizeVectorResize(BCIn->getOperand(0), 2126 cast<VectorType>(DestTy), *this)) 2127 return I; 2128 } 2129 2130 // If the input is an 'or' instruction, we may be doing shifts and ors to 2131 // assemble the elements of the vector manually. Try to rip the code out 2132 // and replace it with insertelements. 2133 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2134 return replaceInstUsesWith(CI, V); 2135 } 2136 } 2137 2138 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) { 2139 if (SrcVTy->getNumElements() == 1) { 2140 // If our destination is not a vector, then make this a straight 2141 // scalar-scalar cast. 2142 if (!DestTy->isVectorTy()) { 2143 Value *Elem = 2144 Builder->CreateExtractElement(Src, 2145 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2146 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2147 } 2148 2149 // Otherwise, see if our source is an insert. If so, then use the scalar 2150 // component directly. 2151 if (InsertElementInst *IEI = 2152 dyn_cast<InsertElementInst>(CI.getOperand(0))) 2153 return CastInst::Create(Instruction::BitCast, IEI->getOperand(1), 2154 DestTy); 2155 } 2156 } 2157 2158 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) { 2159 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2160 // a bitcast to a vector with the same # elts. 2161 if (SVI->hasOneUse() && DestTy->isVectorTy() && 2162 DestTy->getVectorNumElements() == SVI->getType()->getNumElements() && 2163 SVI->getType()->getNumElements() == 2164 SVI->getOperand(0)->getType()->getVectorNumElements()) { 2165 BitCastInst *Tmp; 2166 // If either of the operands is a cast from CI.getType(), then 2167 // evaluating the shuffle in the casted destination's type will allow 2168 // us to eliminate at least one cast. 2169 if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) && 2170 Tmp->getOperand(0)->getType() == DestTy) || 2171 ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) && 2172 Tmp->getOperand(0)->getType() == DestTy)) { 2173 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy); 2174 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy); 2175 // Return a new shuffle vector. Use the same element ID's, as we 2176 // know the vector types match #elts. 2177 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2)); 2178 } 2179 } 2180 } 2181 2182 // Handle the A->B->A cast, and there is an intervening PHI node. 2183 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2184 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2185 return I; 2186 2187 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this, DL)) 2188 return I; 2189 2190 if (Instruction *I = foldBitCastBitwiseLogic(CI, *Builder)) 2191 return I; 2192 2193 if (Instruction *I = foldBitCastSelect(CI, *Builder)) 2194 return I; 2195 2196 if (SrcTy->isPointerTy()) 2197 return commonPointerCastTransforms(CI); 2198 return commonCastTransforms(CI); 2199 } 2200 2201 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2202 // If the destination pointer element type is not the same as the source's 2203 // first do a bitcast to the destination type, and then the addrspacecast. 2204 // This allows the cast to be exposed to other transforms. 2205 Value *Src = CI.getOperand(0); 2206 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2207 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2208 2209 Type *DestElemTy = DestTy->getElementType(); 2210 if (SrcTy->getElementType() != DestElemTy) { 2211 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2212 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2213 // Handle vectors of pointers. 2214 MidTy = VectorType::get(MidTy, VT->getNumElements()); 2215 } 2216 2217 Value *NewBitCast = Builder->CreateBitCast(Src, MidTy); 2218 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2219 } 2220 2221 return commonPointerCastTransforms(CI); 2222 } 2223