1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/IR/DataLayout.h" 17 #include "llvm/IR/DebugInfo.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Support/KnownBits.h" 20 #include "llvm/Transforms/InstCombine/InstCombiner.h" 21 #include <optional> 22 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 29 /// true for, actually insert the code to evaluate the expression. 30 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 31 bool isSigned) { 32 if (Constant *C = dyn_cast<Constant>(V)) 33 return ConstantFoldIntegerCast(C, Ty, isSigned, DL); 34 35 // Otherwise, it must be an instruction. 36 Instruction *I = cast<Instruction>(V); 37 Instruction *Res = nullptr; 38 unsigned Opc = I->getOpcode(); 39 switch (Opc) { 40 case Instruction::Add: 41 case Instruction::Sub: 42 case Instruction::Mul: 43 case Instruction::And: 44 case Instruction::Or: 45 case Instruction::Xor: 46 case Instruction::AShr: 47 case Instruction::LShr: 48 case Instruction::Shl: 49 case Instruction::UDiv: 50 case Instruction::URem: { 51 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 52 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 53 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 54 break; 55 } 56 case Instruction::Trunc: 57 case Instruction::ZExt: 58 case Instruction::SExt: 59 // If the source type of the cast is the type we're trying for then we can 60 // just return the source. There's no need to insert it because it is not 61 // new. 62 if (I->getOperand(0)->getType() == Ty) 63 return I->getOperand(0); 64 65 // Otherwise, must be the same type of cast, so just reinsert a new one. 66 // This also handles the case of zext(trunc(x)) -> zext(x). 67 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 68 Opc == Instruction::SExt); 69 break; 70 case Instruction::Select: { 71 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 72 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 73 Res = SelectInst::Create(I->getOperand(0), True, False); 74 break; 75 } 76 case Instruction::PHI: { 77 PHINode *OPN = cast<PHINode>(I); 78 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 79 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 80 Value *V = 81 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 82 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 83 } 84 Res = NPN; 85 break; 86 } 87 case Instruction::FPToUI: 88 case Instruction::FPToSI: 89 Res = CastInst::Create( 90 static_cast<Instruction::CastOps>(Opc), I->getOperand(0), Ty); 91 break; 92 case Instruction::Call: 93 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 94 switch (II->getIntrinsicID()) { 95 default: 96 llvm_unreachable("Unsupported call!"); 97 case Intrinsic::vscale: { 98 Function *Fn = 99 Intrinsic::getDeclaration(I->getModule(), Intrinsic::vscale, {Ty}); 100 Res = CallInst::Create(Fn->getFunctionType(), Fn); 101 break; 102 } 103 } 104 } 105 break; 106 case Instruction::ShuffleVector: { 107 auto *ScalarTy = cast<VectorType>(Ty)->getElementType(); 108 auto *VTy = cast<VectorType>(I->getOperand(0)->getType()); 109 auto *FixedTy = VectorType::get(ScalarTy, VTy->getElementCount()); 110 Value *Op0 = EvaluateInDifferentType(I->getOperand(0), FixedTy, isSigned); 111 Value *Op1 = EvaluateInDifferentType(I->getOperand(1), FixedTy, isSigned); 112 Res = new ShuffleVectorInst(Op0, Op1, 113 cast<ShuffleVectorInst>(I)->getShuffleMask()); 114 break; 115 } 116 default: 117 // TODO: Can handle more cases here. 118 llvm_unreachable("Unreachable!"); 119 } 120 121 Res->takeName(I); 122 return InsertNewInstWith(Res, I->getIterator()); 123 } 124 125 Instruction::CastOps 126 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 127 const CastInst *CI2) { 128 Type *SrcTy = CI1->getSrcTy(); 129 Type *MidTy = CI1->getDestTy(); 130 Type *DstTy = CI2->getDestTy(); 131 132 Instruction::CastOps firstOp = CI1->getOpcode(); 133 Instruction::CastOps secondOp = CI2->getOpcode(); 134 Type *SrcIntPtrTy = 135 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 136 Type *MidIntPtrTy = 137 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 138 Type *DstIntPtrTy = 139 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 140 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 141 DstTy, SrcIntPtrTy, MidIntPtrTy, 142 DstIntPtrTy); 143 144 // We don't want to form an inttoptr or ptrtoint that converts to an integer 145 // type that differs from the pointer size. 146 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 147 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 148 Res = 0; 149 150 return Instruction::CastOps(Res); 151 } 152 153 /// Implement the transforms common to all CastInst visitors. 154 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 155 Value *Src = CI.getOperand(0); 156 Type *Ty = CI.getType(); 157 158 if (auto *SrcC = dyn_cast<Constant>(Src)) 159 if (Constant *Res = ConstantFoldCastOperand(CI.getOpcode(), SrcC, Ty, DL)) 160 return replaceInstUsesWith(CI, Res); 161 162 // Try to eliminate a cast of a cast. 163 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 164 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 165 // The first cast (CSrc) is eliminable so we need to fix up or replace 166 // the second cast (CI). CSrc will then have a good chance of being dead. 167 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 168 // Point debug users of the dying cast to the new one. 169 if (CSrc->hasOneUse()) 170 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 171 return Res; 172 } 173 } 174 175 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 176 // We are casting a select. Try to fold the cast into the select if the 177 // select does not have a compare instruction with matching operand types 178 // or the select is likely better done in a narrow type. 179 // Creating a select with operands that are different sizes than its 180 // condition may inhibit other folds and lead to worse codegen. 181 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 182 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 183 (CI.getOpcode() == Instruction::Trunc && 184 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 185 186 // If it's a bitcast involving vectors, make sure it has the same number 187 // of elements on both sides. 188 if (CI.getOpcode() != Instruction::BitCast || 189 match(&CI, m_ElementWiseBitCast(m_Value()))) { 190 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 191 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 192 return NV; 193 } 194 } 195 } 196 } 197 198 // If we are casting a PHI, then fold the cast into the PHI. 199 if (auto *PN = dyn_cast<PHINode>(Src)) { 200 // Don't do this if it would create a PHI node with an illegal type from a 201 // legal type. 202 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 203 shouldChangeType(CI.getSrcTy(), CI.getType())) 204 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 205 return NV; 206 } 207 208 // Canonicalize a unary shuffle after the cast if neither operation changes 209 // the size or element size of the input vector. 210 // TODO: We could allow size-changing ops if that doesn't harm codegen. 211 // cast (shuffle X, Mask) --> shuffle (cast X), Mask 212 Value *X; 213 ArrayRef<int> Mask; 214 if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) { 215 // TODO: Allow scalable vectors? 216 auto *SrcTy = dyn_cast<FixedVectorType>(X->getType()); 217 auto *DestTy = dyn_cast<FixedVectorType>(Ty); 218 if (SrcTy && DestTy && 219 SrcTy->getNumElements() == DestTy->getNumElements() && 220 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) { 221 Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy); 222 return new ShuffleVectorInst(CastX, Mask); 223 } 224 } 225 226 return nullptr; 227 } 228 229 /// Constants and extensions/truncates from the destination type are always 230 /// free to be evaluated in that type. This is a helper for canEvaluate*. 231 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 232 if (isa<Constant>(V)) 233 return match(V, m_ImmConstant()); 234 235 Value *X; 236 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 237 X->getType() == Ty) 238 return true; 239 240 return false; 241 } 242 243 /// Filter out values that we can not evaluate in the destination type for free. 244 /// This is a helper for canEvaluate*. 245 static bool canNotEvaluateInType(Value *V, Type *Ty) { 246 if (!isa<Instruction>(V)) 247 return true; 248 // We don't extend or shrink something that has multiple uses -- doing so 249 // would require duplicating the instruction which isn't profitable. 250 if (!V->hasOneUse()) 251 return true; 252 253 return false; 254 } 255 256 /// Return true if we can evaluate the specified expression tree as type Ty 257 /// instead of its larger type, and arrive with the same value. 258 /// This is used by code that tries to eliminate truncates. 259 /// 260 /// Ty will always be a type smaller than V. We should return true if trunc(V) 261 /// can be computed by computing V in the smaller type. If V is an instruction, 262 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 263 /// makes sense if x and y can be efficiently truncated. 264 /// 265 /// This function works on both vectors and scalars. 266 /// 267 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 268 Instruction *CxtI) { 269 if (canAlwaysEvaluateInType(V, Ty)) 270 return true; 271 if (canNotEvaluateInType(V, Ty)) 272 return false; 273 274 auto *I = cast<Instruction>(V); 275 Type *OrigTy = V->getType(); 276 switch (I->getOpcode()) { 277 case Instruction::Add: 278 case Instruction::Sub: 279 case Instruction::Mul: 280 case Instruction::And: 281 case Instruction::Or: 282 case Instruction::Xor: 283 // These operators can all arbitrarily be extended or truncated. 284 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 285 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 286 287 case Instruction::UDiv: 288 case Instruction::URem: { 289 // UDiv and URem can be truncated if all the truncated bits are zero. 290 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 291 uint32_t BitWidth = Ty->getScalarSizeInBits(); 292 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 293 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 294 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 295 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 296 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 297 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 298 } 299 break; 300 } 301 case Instruction::Shl: { 302 // If we are truncating the result of this SHL, and if it's a shift of an 303 // inrange amount, we can always perform a SHL in a smaller type. 304 uint32_t BitWidth = Ty->getScalarSizeInBits(); 305 KnownBits AmtKnownBits = 306 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 307 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 308 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 309 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 310 break; 311 } 312 case Instruction::LShr: { 313 // If this is a truncate of a logical shr, we can truncate it to a smaller 314 // lshr iff we know that the bits we would otherwise be shifting in are 315 // already zeros. 316 // TODO: It is enough to check that the bits we would be shifting in are 317 // zero - use AmtKnownBits.getMaxValue(). 318 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 319 uint32_t BitWidth = Ty->getScalarSizeInBits(); 320 KnownBits AmtKnownBits = 321 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 322 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 323 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 324 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 325 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 326 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 327 } 328 break; 329 } 330 case Instruction::AShr: { 331 // If this is a truncate of an arithmetic shr, we can truncate it to a 332 // smaller ashr iff we know that all the bits from the sign bit of the 333 // original type and the sign bit of the truncate type are similar. 334 // TODO: It is enough to check that the bits we would be shifting in are 335 // similar to sign bit of the truncate type. 336 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 337 uint32_t BitWidth = Ty->getScalarSizeInBits(); 338 KnownBits AmtKnownBits = 339 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 340 unsigned ShiftedBits = OrigBitWidth - BitWidth; 341 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 342 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 343 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 344 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 345 break; 346 } 347 case Instruction::Trunc: 348 // trunc(trunc(x)) -> trunc(x) 349 return true; 350 case Instruction::ZExt: 351 case Instruction::SExt: 352 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 353 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 354 return true; 355 case Instruction::Select: { 356 SelectInst *SI = cast<SelectInst>(I); 357 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 358 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 359 } 360 case Instruction::PHI: { 361 // We can change a phi if we can change all operands. Note that we never 362 // get into trouble with cyclic PHIs here because we only consider 363 // instructions with a single use. 364 PHINode *PN = cast<PHINode>(I); 365 for (Value *IncValue : PN->incoming_values()) 366 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 367 return false; 368 return true; 369 } 370 case Instruction::FPToUI: 371 case Instruction::FPToSI: { 372 // If the integer type can hold the max FP value, it is safe to cast 373 // directly to that type. Otherwise, we may create poison via overflow 374 // that did not exist in the original code. 375 Type *InputTy = I->getOperand(0)->getType()->getScalarType(); 376 const fltSemantics &Semantics = InputTy->getFltSemantics(); 377 uint32_t MinBitWidth = 378 APFloatBase::semanticsIntSizeInBits(Semantics, 379 I->getOpcode() == Instruction::FPToSI); 380 return Ty->getScalarSizeInBits() >= MinBitWidth; 381 } 382 case Instruction::ShuffleVector: 383 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 384 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 385 default: 386 // TODO: Can handle more cases here. 387 break; 388 } 389 390 return false; 391 } 392 393 /// Given a vector that is bitcast to an integer, optionally logically 394 /// right-shifted, and truncated, convert it to an extractelement. 395 /// Example (big endian): 396 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 397 /// ---> 398 /// extractelement <4 x i32> %X, 1 399 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 400 InstCombinerImpl &IC) { 401 Value *TruncOp = Trunc.getOperand(0); 402 Type *DestType = Trunc.getType(); 403 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 404 return nullptr; 405 406 Value *VecInput = nullptr; 407 ConstantInt *ShiftVal = nullptr; 408 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 409 m_LShr(m_BitCast(m_Value(VecInput)), 410 m_ConstantInt(ShiftVal)))) || 411 !isa<VectorType>(VecInput->getType())) 412 return nullptr; 413 414 VectorType *VecType = cast<VectorType>(VecInput->getType()); 415 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 416 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 417 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 418 419 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 420 return nullptr; 421 422 // If the element type of the vector doesn't match the result type, 423 // bitcast it to a vector type that we can extract from. 424 unsigned NumVecElts = VecWidth / DestWidth; 425 if (VecType->getElementType() != DestType) { 426 VecType = FixedVectorType::get(DestType, NumVecElts); 427 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 428 } 429 430 unsigned Elt = ShiftAmount / DestWidth; 431 if (IC.getDataLayout().isBigEndian()) 432 Elt = NumVecElts - 1 - Elt; 433 434 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 435 } 436 437 /// Funnel/Rotate left/right may occur in a wider type than necessary because of 438 /// type promotion rules. Try to narrow the inputs and convert to funnel shift. 439 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) { 440 assert((isa<VectorType>(Trunc.getSrcTy()) || 441 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 442 "Don't narrow to an illegal scalar type"); 443 444 // Bail out on strange types. It is possible to handle some of these patterns 445 // even with non-power-of-2 sizes, but it is not a likely scenario. 446 Type *DestTy = Trunc.getType(); 447 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 448 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 449 if (!isPowerOf2_32(NarrowWidth)) 450 return nullptr; 451 452 // First, find an or'd pair of opposite shifts: 453 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)) 454 BinaryOperator *Or0, *Or1; 455 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1))))) 456 return nullptr; 457 458 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 459 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 460 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 461 Or0->getOpcode() == Or1->getOpcode()) 462 return nullptr; 463 464 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 465 if (Or0->getOpcode() == BinaryOperator::LShr) { 466 std::swap(Or0, Or1); 467 std::swap(ShVal0, ShVal1); 468 std::swap(ShAmt0, ShAmt1); 469 } 470 assert(Or0->getOpcode() == BinaryOperator::Shl && 471 Or1->getOpcode() == BinaryOperator::LShr && 472 "Illegal or(shift,shift) pair"); 473 474 // Match the shift amount operands for a funnel/rotate pattern. This always 475 // matches a subtraction on the R operand. 476 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * { 477 // The shift amounts may add up to the narrow bit width: 478 // (shl ShVal0, L) | (lshr ShVal1, Width - L) 479 // If this is a funnel shift (different operands are shifted), then the 480 // shift amount can not over-shift (create poison) in the narrow type. 481 unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth); 482 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth); 483 if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask)) 484 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 485 return L; 486 487 // The following patterns currently only work for rotation patterns. 488 // TODO: Add more general funnel-shift compatible patterns. 489 if (ShVal0 != ShVal1) 490 return nullptr; 491 492 // The shift amount may be masked with negation: 493 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1))) 494 Value *X; 495 unsigned Mask = Width - 1; 496 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 497 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 498 return X; 499 500 // Same as above, but the shift amount may be extended after masking: 501 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 502 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 503 return X; 504 505 return nullptr; 506 }; 507 508 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 509 bool IsFshl = true; // Sub on LSHR. 510 if (!ShAmt) { 511 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 512 IsFshl = false; // Sub on SHL. 513 } 514 if (!ShAmt) 515 return nullptr; 516 517 // The right-shifted value must have high zeros in the wide type (for example 518 // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are 519 // truncated, so those do not matter. 520 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 521 if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc)) 522 return nullptr; 523 524 // Adjust the width of ShAmt for narrowed funnel shift operation: 525 // - Zero-extend if ShAmt is narrower than the destination type. 526 // - Truncate if ShAmt is wider, discarding non-significant high-order bits. 527 // This prepares ShAmt for llvm.fshl.i8(trunc(ShVal), trunc(ShVal), 528 // zext/trunc(ShAmt)). 529 Value *NarrowShAmt = Builder.CreateZExtOrTrunc(ShAmt, DestTy); 530 531 Value *X, *Y; 532 X = Y = Builder.CreateTrunc(ShVal0, DestTy); 533 if (ShVal0 != ShVal1) 534 Y = Builder.CreateTrunc(ShVal1, DestTy); 535 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 536 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 537 return CallInst::Create(F, {X, Y, NarrowShAmt}); 538 } 539 540 /// Try to narrow the width of math or bitwise logic instructions by pulling a 541 /// truncate ahead of binary operators. 542 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 543 Type *SrcTy = Trunc.getSrcTy(); 544 Type *DestTy = Trunc.getType(); 545 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 546 unsigned DestWidth = DestTy->getScalarSizeInBits(); 547 548 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 549 return nullptr; 550 551 BinaryOperator *BinOp; 552 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 553 return nullptr; 554 555 Value *BinOp0 = BinOp->getOperand(0); 556 Value *BinOp1 = BinOp->getOperand(1); 557 switch (BinOp->getOpcode()) { 558 case Instruction::And: 559 case Instruction::Or: 560 case Instruction::Xor: 561 case Instruction::Add: 562 case Instruction::Sub: 563 case Instruction::Mul: { 564 Constant *C; 565 if (match(BinOp0, m_Constant(C))) { 566 // trunc (binop C, X) --> binop (trunc C', X) 567 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 568 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 569 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 570 } 571 if (match(BinOp1, m_Constant(C))) { 572 // trunc (binop X, C) --> binop (trunc X, C') 573 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 574 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 575 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 576 } 577 Value *X; 578 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 579 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 580 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 581 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 582 } 583 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 584 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 585 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 586 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 587 } 588 break; 589 } 590 case Instruction::LShr: 591 case Instruction::AShr: { 592 // trunc (*shr (trunc A), C) --> trunc(*shr A, C) 593 Value *A; 594 Constant *C; 595 if (match(BinOp0, m_Trunc(m_Value(A))) && match(BinOp1, m_Constant(C))) { 596 unsigned MaxShiftAmt = SrcWidth - DestWidth; 597 // If the shift is small enough, all zero/sign bits created by the shift 598 // are removed by the trunc. 599 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 600 APInt(SrcWidth, MaxShiftAmt)))) { 601 auto *OldShift = cast<Instruction>(Trunc.getOperand(0)); 602 bool IsExact = OldShift->isExact(); 603 if (Constant *ShAmt = ConstantFoldIntegerCast(C, A->getType(), 604 /*IsSigned*/ true, DL)) { 605 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 606 Value *Shift = 607 OldShift->getOpcode() == Instruction::AShr 608 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact) 609 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact); 610 return CastInst::CreateTruncOrBitCast(Shift, DestTy); 611 } 612 } 613 } 614 break; 615 } 616 default: break; 617 } 618 619 if (Instruction *NarrowOr = narrowFunnelShift(Trunc)) 620 return NarrowOr; 621 622 return nullptr; 623 } 624 625 /// Try to narrow the width of a splat shuffle. This could be generalized to any 626 /// shuffle with a constant operand, but we limit the transform to avoid 627 /// creating a shuffle type that targets may not be able to lower effectively. 628 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 629 InstCombiner::BuilderTy &Builder) { 630 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 631 if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) && 632 all_equal(Shuf->getShuffleMask()) && 633 Shuf->getType() == Shuf->getOperand(0)->getType()) { 634 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Poison, SplatMask 635 // trunc (shuf X, Poison, SplatMask) --> shuf (trunc X), Poison, SplatMask 636 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 637 return new ShuffleVectorInst(NarrowOp, Shuf->getShuffleMask()); 638 } 639 640 return nullptr; 641 } 642 643 /// Try to narrow the width of an insert element. This could be generalized for 644 /// any vector constant, but we limit the transform to insertion into undef to 645 /// avoid potential backend problems from unsupported insertion widths. This 646 /// could also be extended to handle the case of inserting a scalar constant 647 /// into a vector variable. 648 static Instruction *shrinkInsertElt(CastInst &Trunc, 649 InstCombiner::BuilderTy &Builder) { 650 Instruction::CastOps Opcode = Trunc.getOpcode(); 651 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 652 "Unexpected instruction for shrinking"); 653 654 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 655 if (!InsElt || !InsElt->hasOneUse()) 656 return nullptr; 657 658 Type *DestTy = Trunc.getType(); 659 Type *DestScalarTy = DestTy->getScalarType(); 660 Value *VecOp = InsElt->getOperand(0); 661 Value *ScalarOp = InsElt->getOperand(1); 662 Value *Index = InsElt->getOperand(2); 663 664 if (match(VecOp, m_Undef())) { 665 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 666 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 667 UndefValue *NarrowUndef = UndefValue::get(DestTy); 668 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 669 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 670 } 671 672 return nullptr; 673 } 674 675 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 676 if (Instruction *Result = commonCastTransforms(Trunc)) 677 return Result; 678 679 Value *Src = Trunc.getOperand(0); 680 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 681 unsigned DestWidth = DestTy->getScalarSizeInBits(); 682 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 683 684 // Attempt to truncate the entire input expression tree to the destination 685 // type. Only do this if the dest type is a simple type, don't convert the 686 // expression tree to something weird like i93 unless the source is also 687 // strange. 688 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 689 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 690 691 // If this cast is a truncate, evaluting in a different type always 692 // eliminates the cast, so it is always a win. 693 LLVM_DEBUG( 694 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 695 " to avoid cast: " 696 << Trunc << '\n'); 697 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 698 assert(Res->getType() == DestTy); 699 return replaceInstUsesWith(Trunc, Res); 700 } 701 702 // For integer types, check if we can shorten the entire input expression to 703 // DestWidth * 2, which won't allow removing the truncate, but reducing the 704 // width may enable further optimizations, e.g. allowing for larger 705 // vectorization factors. 706 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 707 if (DestWidth * 2 < SrcWidth) { 708 auto *NewDestTy = DestITy->getExtendedType(); 709 if (shouldChangeType(SrcTy, NewDestTy) && 710 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 711 LLVM_DEBUG( 712 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 713 " to reduce the width of operand of" 714 << Trunc << '\n'); 715 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 716 return new TruncInst(Res, DestTy); 717 } 718 } 719 } 720 721 // Test if the trunc is the user of a select which is part of a 722 // minimum or maximum operation. If so, don't do any more simplification. 723 // Even simplifying demanded bits can break the canonical form of a 724 // min/max. 725 Value *LHS, *RHS; 726 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 727 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 728 return nullptr; 729 730 // See if we can simplify any instructions used by the input whose sole 731 // purpose is to compute bits we don't care about. 732 if (SimplifyDemandedInstructionBits(Trunc)) 733 return &Trunc; 734 735 if (DestWidth == 1) { 736 Value *Zero = Constant::getNullValue(SrcTy); 737 if (DestTy->isIntegerTy()) { 738 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 739 // TODO: We canonicalize to more instructions here because we are probably 740 // lacking equivalent analysis for trunc relative to icmp. There may also 741 // be codegen concerns. If those trunc limitations were removed, we could 742 // remove this transform. 743 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 744 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 745 } 746 747 // For vectors, we do not canonicalize all truncs to icmp, so optimize 748 // patterns that would be covered within visitICmpInst. 749 Value *X; 750 Constant *C; 751 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 752 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 753 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 754 Constant *MaskC = ConstantExpr::getShl(One, C); 755 Value *And = Builder.CreateAnd(X, MaskC); 756 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 757 } 758 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_ImmConstant(C)), 759 m_Deferred(X))))) { 760 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 761 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 762 Constant *MaskC = ConstantExpr::getShl(One, C); 763 Value *And = Builder.CreateAnd(X, Builder.CreateOr(MaskC, One)); 764 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 765 } 766 } 767 768 Value *A, *B; 769 Constant *C; 770 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { 771 unsigned AWidth = A->getType()->getScalarSizeInBits(); 772 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 773 auto *OldSh = cast<Instruction>(Src); 774 bool IsExact = OldSh->isExact(); 775 776 // If the shift is small enough, all zero bits created by the shift are 777 // removed by the trunc. 778 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 779 APInt(SrcWidth, MaxShiftAmt)))) { 780 auto GetNewShAmt = [&](unsigned Width) { 781 Constant *MaxAmt = ConstantInt::get(SrcTy, Width - 1, false); 782 Constant *Cmp = 783 ConstantFoldCompareInstOperands(ICmpInst::ICMP_ULT, C, MaxAmt, DL); 784 Constant *ShAmt = ConstantFoldSelectInstruction(Cmp, C, MaxAmt); 785 return ConstantFoldCastOperand(Instruction::Trunc, ShAmt, A->getType(), 786 DL); 787 }; 788 789 // trunc (lshr (sext A), C) --> ashr A, C 790 if (A->getType() == DestTy) { 791 Constant *ShAmt = GetNewShAmt(DestWidth); 792 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 793 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt) 794 : BinaryOperator::CreateAShr(A, ShAmt); 795 } 796 // The types are mismatched, so create a cast after shifting: 797 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 798 if (Src->hasOneUse()) { 799 Constant *ShAmt = GetNewShAmt(AWidth); 800 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact); 801 return CastInst::CreateIntegerCast(Shift, DestTy, true); 802 } 803 } 804 // TODO: Mask high bits with 'and'. 805 } 806 807 if (Instruction *I = narrowBinOp(Trunc)) 808 return I; 809 810 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 811 return I; 812 813 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 814 return I; 815 816 if (Src->hasOneUse() && 817 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) { 818 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 819 // dest type is native and cst < dest size. 820 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) && 821 !match(A, m_Shr(m_Value(), m_Constant()))) { 822 // Skip shifts of shift by constants. It undoes a combine in 823 // FoldShiftByConstant and is the extend in reg pattern. 824 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth); 825 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) { 826 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 827 return BinaryOperator::Create(Instruction::Shl, NewTrunc, 828 ConstantExpr::getTrunc(C, DestTy)); 829 } 830 } 831 } 832 833 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 834 return I; 835 836 // Whenever an element is extracted from a vector, and then truncated, 837 // canonicalize by converting it to a bitcast followed by an 838 // extractelement. 839 // 840 // Example (little endian): 841 // trunc (extractelement <4 x i64> %X, 0) to i32 842 // ---> 843 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 844 Value *VecOp; 845 ConstantInt *Cst; 846 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 847 auto *VecOpTy = cast<VectorType>(VecOp->getType()); 848 auto VecElts = VecOpTy->getElementCount(); 849 850 // A badly fit destination size would result in an invalid cast. 851 if (SrcWidth % DestWidth == 0) { 852 uint64_t TruncRatio = SrcWidth / DestWidth; 853 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio; 854 uint64_t VecOpIdx = Cst->getZExtValue(); 855 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 856 : VecOpIdx * TruncRatio; 857 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 858 "overflow 32-bits"); 859 860 auto *BitCastTo = 861 VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable()); 862 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 863 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 864 } 865 } 866 867 // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C) 868 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)), 869 m_Value(B))))) { 870 unsigned AWidth = A->getType()->getScalarSizeInBits(); 871 if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) { 872 Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth); 873 Value *NarrowCtlz = 874 Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B}); 875 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff); 876 } 877 } 878 879 if (match(Src, m_VScale())) { 880 if (Trunc.getFunction() && 881 Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 882 Attribute Attr = 883 Trunc.getFunction()->getFnAttribute(Attribute::VScaleRange); 884 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 885 if (Log2_32(*MaxVScale) < DestWidth) { 886 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 887 return replaceInstUsesWith(Trunc, VScale); 888 } 889 } 890 } 891 } 892 893 return nullptr; 894 } 895 896 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, 897 ZExtInst &Zext) { 898 // If we are just checking for a icmp eq of a single bit and zext'ing it 899 // to an integer, then shift the bit to the appropriate place and then 900 // cast to integer to avoid the comparison. 901 902 // FIXME: This set of transforms does not check for extra uses and/or creates 903 // an extra instruction (an optional final cast is not included 904 // in the transform comments). We may also want to favor icmp over 905 // shifts in cases of equal instructions because icmp has better 906 // analysis in general (invert the transform). 907 908 const APInt *Op1CV; 909 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 910 911 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 912 if (Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) { 913 Value *In = Cmp->getOperand(0); 914 Value *Sh = ConstantInt::get(In->getType(), 915 In->getType()->getScalarSizeInBits() - 1); 916 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 917 if (In->getType() != Zext.getType()) 918 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 919 920 return replaceInstUsesWith(Zext, In); 921 } 922 923 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 924 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 925 // zext (X != 0) to i32 --> X iff X has only the low bit set. 926 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 927 928 if (Op1CV->isZero() && Cmp->isEquality()) { 929 // Exactly 1 possible 1? But not the high-bit because that is 930 // canonicalized to this form. 931 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 932 APInt KnownZeroMask(~Known.Zero); 933 uint32_t ShAmt = KnownZeroMask.logBase2(); 934 bool IsExpectShAmt = KnownZeroMask.isPowerOf2() && 935 (Zext.getType()->getScalarSizeInBits() != ShAmt + 1); 936 if (IsExpectShAmt && 937 (Cmp->getOperand(0)->getType() == Zext.getType() || 938 Cmp->getPredicate() == ICmpInst::ICMP_NE || ShAmt == 0)) { 939 Value *In = Cmp->getOperand(0); 940 if (ShAmt) { 941 // Perform a logical shr by shiftamt. 942 // Insert the shift to put the result in the low bit. 943 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 944 In->getName() + ".lobit"); 945 } 946 947 // Toggle the low bit for "X == 0". 948 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 949 In = Builder.CreateXor(In, ConstantInt::get(In->getType(), 1)); 950 951 if (Zext.getType() == In->getType()) 952 return replaceInstUsesWith(Zext, In); 953 954 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 955 return replaceInstUsesWith(Zext, IntCast); 956 } 957 } 958 } 959 960 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 961 // Test if a bit is clear/set using a shifted-one mask: 962 // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1 963 // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1 964 Value *X, *ShAmt; 965 if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) && 966 match(Cmp->getOperand(0), 967 m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) { 968 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 969 X = Builder.CreateNot(X); 970 Value *Lshr = Builder.CreateLShr(X, ShAmt); 971 Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1)); 972 return replaceInstUsesWith(Zext, And1); 973 } 974 } 975 976 return nullptr; 977 } 978 979 /// Determine if the specified value can be computed in the specified wider type 980 /// and produce the same low bits. If not, return false. 981 /// 982 /// If this function returns true, it can also return a non-zero number of bits 983 /// (in BitsToClear) which indicates that the value it computes is correct for 984 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 985 /// out. For example, to promote something like: 986 /// 987 /// %B = trunc i64 %A to i32 988 /// %C = lshr i32 %B, 8 989 /// %E = zext i32 %C to i64 990 /// 991 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 992 /// set to 8 to indicate that the promoted value needs to have bits 24-31 993 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 994 /// clear the top bits anyway, doing this has no extra cost. 995 /// 996 /// This function works on both vectors and scalars. 997 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 998 InstCombinerImpl &IC, Instruction *CxtI) { 999 BitsToClear = 0; 1000 if (canAlwaysEvaluateInType(V, Ty)) 1001 return true; 1002 if (canNotEvaluateInType(V, Ty)) 1003 return false; 1004 1005 auto *I = cast<Instruction>(V); 1006 unsigned Tmp; 1007 switch (I->getOpcode()) { 1008 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1009 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1010 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1011 return true; 1012 case Instruction::And: 1013 case Instruction::Or: 1014 case Instruction::Xor: 1015 case Instruction::Add: 1016 case Instruction::Sub: 1017 case Instruction::Mul: 1018 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1019 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1020 return false; 1021 // These can all be promoted if neither operand has 'bits to clear'. 1022 if (BitsToClear == 0 && Tmp == 0) 1023 return true; 1024 1025 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1026 // other side, BitsToClear is ok. 1027 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1028 // We use MaskedValueIsZero here for generality, but the case we care 1029 // about the most is constant RHS. 1030 unsigned VSize = V->getType()->getScalarSizeInBits(); 1031 if (IC.MaskedValueIsZero(I->getOperand(1), 1032 APInt::getHighBitsSet(VSize, BitsToClear), 1033 0, CxtI)) { 1034 // If this is an And instruction and all of the BitsToClear are 1035 // known to be zero we can reset BitsToClear. 1036 if (I->getOpcode() == Instruction::And) 1037 BitsToClear = 0; 1038 return true; 1039 } 1040 } 1041 1042 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1043 return false; 1044 1045 case Instruction::Shl: { 1046 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1047 // upper bits we can reduce BitsToClear by the shift amount. 1048 const APInt *Amt; 1049 if (match(I->getOperand(1), m_APInt(Amt))) { 1050 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1051 return false; 1052 uint64_t ShiftAmt = Amt->getZExtValue(); 1053 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1054 return true; 1055 } 1056 return false; 1057 } 1058 case Instruction::LShr: { 1059 // We can promote lshr(x, cst) if we can promote x. This requires the 1060 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1061 const APInt *Amt; 1062 if (match(I->getOperand(1), m_APInt(Amt))) { 1063 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1064 return false; 1065 BitsToClear += Amt->getZExtValue(); 1066 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1067 BitsToClear = V->getType()->getScalarSizeInBits(); 1068 return true; 1069 } 1070 // Cannot promote variable LSHR. 1071 return false; 1072 } 1073 case Instruction::Select: 1074 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1075 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1076 // TODO: If important, we could handle the case when the BitsToClear are 1077 // known zero in the disagreeing side. 1078 Tmp != BitsToClear) 1079 return false; 1080 return true; 1081 1082 case Instruction::PHI: { 1083 // We can change a phi if we can change all operands. Note that we never 1084 // get into trouble with cyclic PHIs here because we only consider 1085 // instructions with a single use. 1086 PHINode *PN = cast<PHINode>(I); 1087 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1088 return false; 1089 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1090 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1091 // TODO: If important, we could handle the case when the BitsToClear 1092 // are known zero in the disagreeing input. 1093 Tmp != BitsToClear) 1094 return false; 1095 return true; 1096 } 1097 case Instruction::Call: 1098 // llvm.vscale() can always be executed in larger type, because the 1099 // value is automatically zero-extended. 1100 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1101 if (II->getIntrinsicID() == Intrinsic::vscale) 1102 return true; 1103 return false; 1104 default: 1105 // TODO: Can handle more cases here. 1106 return false; 1107 } 1108 } 1109 1110 Instruction *InstCombinerImpl::visitZExt(ZExtInst &Zext) { 1111 // If this zero extend is only used by a truncate, let the truncate be 1112 // eliminated before we try to optimize this zext. 1113 if (Zext.hasOneUse() && isa<TruncInst>(Zext.user_back()) && 1114 !isa<Constant>(Zext.getOperand(0))) 1115 return nullptr; 1116 1117 // If one of the common conversion will work, do it. 1118 if (Instruction *Result = commonCastTransforms(Zext)) 1119 return Result; 1120 1121 Value *Src = Zext.getOperand(0); 1122 Type *SrcTy = Src->getType(), *DestTy = Zext.getType(); 1123 1124 // Try to extend the entire expression tree to the wide destination type. 1125 unsigned BitsToClear; 1126 if (shouldChangeType(SrcTy, DestTy) && 1127 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &Zext)) { 1128 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1129 "Can't clear more bits than in SrcTy"); 1130 1131 // Okay, we can transform this! Insert the new expression now. 1132 LLVM_DEBUG( 1133 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1134 " to avoid zero extend: " 1135 << Zext << '\n'); 1136 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1137 assert(Res->getType() == DestTy); 1138 1139 // Preserve debug values referring to Src if the zext is its last use. 1140 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1141 if (SrcOp->hasOneUse()) 1142 replaceAllDbgUsesWith(*SrcOp, *Res, Zext, DT); 1143 1144 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits() - BitsToClear; 1145 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1146 1147 // If the high bits are already filled with zeros, just replace this 1148 // cast with the result. 1149 if (MaskedValueIsZero(Res, 1150 APInt::getHighBitsSet(DestBitSize, 1151 DestBitSize - SrcBitsKept), 1152 0, &Zext)) 1153 return replaceInstUsesWith(Zext, Res); 1154 1155 // We need to emit an AND to clear the high bits. 1156 Constant *C = ConstantInt::get(Res->getType(), 1157 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1158 return BinaryOperator::CreateAnd(Res, C); 1159 } 1160 1161 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1162 // types and if the sizes are just right we can convert this into a logical 1163 // 'and' which will be much cheaper than the pair of casts. 1164 if (auto *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1165 // TODO: Subsume this into EvaluateInDifferentType. 1166 1167 // Get the sizes of the types involved. We know that the intermediate type 1168 // will be smaller than A or C, but don't know the relation between A and C. 1169 Value *A = CSrc->getOperand(0); 1170 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1171 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1172 unsigned DstSize = DestTy->getScalarSizeInBits(); 1173 // If we're actually extending zero bits, then if 1174 // SrcSize < DstSize: zext(a & mask) 1175 // SrcSize == DstSize: a & mask 1176 // SrcSize > DstSize: trunc(a) & mask 1177 if (SrcSize < DstSize) { 1178 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1179 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1180 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1181 return new ZExtInst(And, DestTy); 1182 } 1183 1184 if (SrcSize == DstSize) { 1185 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1186 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1187 AndValue)); 1188 } 1189 if (SrcSize > DstSize) { 1190 Value *Trunc = Builder.CreateTrunc(A, DestTy); 1191 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1192 return BinaryOperator::CreateAnd(Trunc, 1193 ConstantInt::get(Trunc->getType(), 1194 AndValue)); 1195 } 1196 } 1197 1198 if (auto *Cmp = dyn_cast<ICmpInst>(Src)) 1199 return transformZExtICmp(Cmp, Zext); 1200 1201 // zext(trunc(X) & C) -> (X & zext(C)). 1202 Constant *C; 1203 Value *X; 1204 if (match(Src, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1205 X->getType() == DestTy) 1206 return BinaryOperator::CreateAnd(X, Builder.CreateZExt(C, DestTy)); 1207 1208 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1209 Value *And; 1210 if (match(Src, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1211 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1212 X->getType() == DestTy) { 1213 Value *ZC = Builder.CreateZExt(C, DestTy); 1214 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1215 } 1216 1217 // If we are truncating, masking, and then zexting back to the original type, 1218 // that's just a mask. This is not handled by canEvaluateZextd if the 1219 // intermediate values have extra uses. This could be generalized further for 1220 // a non-constant mask operand. 1221 // zext (and (trunc X), C) --> and X, (zext C) 1222 if (match(Src, m_And(m_Trunc(m_Value(X)), m_Constant(C))) && 1223 X->getType() == DestTy) { 1224 Value *ZextC = Builder.CreateZExt(C, DestTy); 1225 return BinaryOperator::CreateAnd(X, ZextC); 1226 } 1227 1228 if (match(Src, m_VScale())) { 1229 if (Zext.getFunction() && 1230 Zext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 1231 Attribute Attr = 1232 Zext.getFunction()->getFnAttribute(Attribute::VScaleRange); 1233 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 1234 unsigned TypeWidth = Src->getType()->getScalarSizeInBits(); 1235 if (Log2_32(*MaxVScale) < TypeWidth) { 1236 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 1237 return replaceInstUsesWith(Zext, VScale); 1238 } 1239 } 1240 } 1241 } 1242 1243 if (!Zext.hasNonNeg()) { 1244 // If this zero extend is only used by a shift, add nneg flag. 1245 if (Zext.hasOneUse() && 1246 SrcTy->getScalarSizeInBits() > 1247 Log2_64_Ceil(DestTy->getScalarSizeInBits()) && 1248 match(Zext.user_back(), m_Shift(m_Value(), m_Specific(&Zext)))) { 1249 Zext.setNonNeg(); 1250 return &Zext; 1251 } 1252 1253 if (isKnownNonNegative(Src, SQ.getWithInstruction(&Zext))) { 1254 Zext.setNonNeg(); 1255 return &Zext; 1256 } 1257 } 1258 1259 return nullptr; 1260 } 1261 1262 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1263 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *Cmp, 1264 SExtInst &Sext) { 1265 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1266 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1267 1268 // Don't bother if Op1 isn't of vector or integer type. 1269 if (!Op1->getType()->isIntOrIntVectorTy()) 1270 return nullptr; 1271 1272 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) { 1273 // sext (x <s 0) --> ashr x, 31 (all ones if negative) 1274 Value *Sh = ConstantInt::get(Op0->getType(), 1275 Op0->getType()->getScalarSizeInBits() - 1); 1276 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1277 if (In->getType() != Sext.getType()) 1278 In = Builder.CreateIntCast(In, Sext.getType(), true /*SExt*/); 1279 1280 return replaceInstUsesWith(Sext, In); 1281 } 1282 1283 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1284 // If we know that only one bit of the LHS of the icmp can be set and we 1285 // have an equality comparison with zero or a power of 2, we can transform 1286 // the icmp and sext into bitwise/integer operations. 1287 if (Cmp->hasOneUse() && 1288 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1289 KnownBits Known = computeKnownBits(Op0, 0, &Sext); 1290 1291 APInt KnownZeroMask(~Known.Zero); 1292 if (KnownZeroMask.isPowerOf2()) { 1293 Value *In = Cmp->getOperand(0); 1294 1295 // If the icmp tests for a known zero bit we can constant fold it. 1296 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1297 Value *V = Pred == ICmpInst::ICMP_NE ? 1298 ConstantInt::getAllOnesValue(Sext.getType()) : 1299 ConstantInt::getNullValue(Sext.getType()); 1300 return replaceInstUsesWith(Sext, V); 1301 } 1302 1303 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1304 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1305 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1306 unsigned ShiftAmt = KnownZeroMask.countr_zero(); 1307 // Perform a right shift to place the desired bit in the LSB. 1308 if (ShiftAmt) 1309 In = Builder.CreateLShr(In, 1310 ConstantInt::get(In->getType(), ShiftAmt)); 1311 1312 // At this point "In" is either 1 or 0. Subtract 1 to turn 1313 // {1, 0} -> {0, -1}. 1314 In = Builder.CreateAdd(In, 1315 ConstantInt::getAllOnesValue(In->getType()), 1316 "sext"); 1317 } else { 1318 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1319 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1320 unsigned ShiftAmt = KnownZeroMask.countl_zero(); 1321 // Perform a left shift to place the desired bit in the MSB. 1322 if (ShiftAmt) 1323 In = Builder.CreateShl(In, 1324 ConstantInt::get(In->getType(), ShiftAmt)); 1325 1326 // Distribute the bit over the whole bit width. 1327 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1328 KnownZeroMask.getBitWidth() - 1), "sext"); 1329 } 1330 1331 if (Sext.getType() == In->getType()) 1332 return replaceInstUsesWith(Sext, In); 1333 return CastInst::CreateIntegerCast(In, Sext.getType(), true/*SExt*/); 1334 } 1335 } 1336 } 1337 1338 return nullptr; 1339 } 1340 1341 /// Return true if we can take the specified value and return it as type Ty 1342 /// without inserting any new casts and without changing the value of the common 1343 /// low bits. This is used by code that tries to promote integer operations to 1344 /// a wider types will allow us to eliminate the extension. 1345 /// 1346 /// This function works on both vectors and scalars. 1347 /// 1348 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1349 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1350 "Can't sign extend type to a smaller type"); 1351 if (canAlwaysEvaluateInType(V, Ty)) 1352 return true; 1353 if (canNotEvaluateInType(V, Ty)) 1354 return false; 1355 1356 auto *I = cast<Instruction>(V); 1357 switch (I->getOpcode()) { 1358 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1359 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1360 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1361 return true; 1362 case Instruction::And: 1363 case Instruction::Or: 1364 case Instruction::Xor: 1365 case Instruction::Add: 1366 case Instruction::Sub: 1367 case Instruction::Mul: 1368 // These operators can all arbitrarily be extended if their inputs can. 1369 return canEvaluateSExtd(I->getOperand(0), Ty) && 1370 canEvaluateSExtd(I->getOperand(1), Ty); 1371 1372 //case Instruction::Shl: TODO 1373 //case Instruction::LShr: TODO 1374 1375 case Instruction::Select: 1376 return canEvaluateSExtd(I->getOperand(1), Ty) && 1377 canEvaluateSExtd(I->getOperand(2), Ty); 1378 1379 case Instruction::PHI: { 1380 // We can change a phi if we can change all operands. Note that we never 1381 // get into trouble with cyclic PHIs here because we only consider 1382 // instructions with a single use. 1383 PHINode *PN = cast<PHINode>(I); 1384 for (Value *IncValue : PN->incoming_values()) 1385 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1386 return true; 1387 } 1388 default: 1389 // TODO: Can handle more cases here. 1390 break; 1391 } 1392 1393 return false; 1394 } 1395 1396 Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) { 1397 // If this sign extend is only used by a truncate, let the truncate be 1398 // eliminated before we try to optimize this sext. 1399 if (Sext.hasOneUse() && isa<TruncInst>(Sext.user_back())) 1400 return nullptr; 1401 1402 if (Instruction *I = commonCastTransforms(Sext)) 1403 return I; 1404 1405 Value *Src = Sext.getOperand(0); 1406 Type *SrcTy = Src->getType(), *DestTy = Sext.getType(); 1407 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1408 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1409 1410 // If the value being extended is zero or positive, use a zext instead. 1411 if (isKnownNonNegative(Src, SQ.getWithInstruction(&Sext))) { 1412 auto CI = CastInst::Create(Instruction::ZExt, Src, DestTy); 1413 CI->setNonNeg(true); 1414 return CI; 1415 } 1416 1417 // Try to extend the entire expression tree to the wide destination type. 1418 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1419 // Okay, we can transform this! Insert the new expression now. 1420 LLVM_DEBUG( 1421 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1422 " to avoid sign extend: " 1423 << Sext << '\n'); 1424 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1425 assert(Res->getType() == DestTy); 1426 1427 // If the high bits are already filled with sign bit, just replace this 1428 // cast with the result. 1429 if (ComputeNumSignBits(Res, 0, &Sext) > DestBitSize - SrcBitSize) 1430 return replaceInstUsesWith(Sext, Res); 1431 1432 // We need to emit a shl + ashr to do the sign extend. 1433 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1434 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1435 ShAmt); 1436 } 1437 1438 Value *X; 1439 if (match(Src, m_Trunc(m_Value(X)))) { 1440 // If the input has more sign bits than bits truncated, then convert 1441 // directly to final type. 1442 unsigned XBitSize = X->getType()->getScalarSizeInBits(); 1443 if (ComputeNumSignBits(X, 0, &Sext) > XBitSize - SrcBitSize) 1444 return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true); 1445 1446 // If input is a trunc from the destination type, then convert into shifts. 1447 if (Src->hasOneUse() && X->getType() == DestTy) { 1448 // sext (trunc X) --> ashr (shl X, C), C 1449 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1450 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1451 } 1452 1453 // If we are replacing shifted-in high zero bits with sign bits, convert 1454 // the logic shift to arithmetic shift and eliminate the cast to 1455 // intermediate type: 1456 // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C) 1457 Value *Y; 1458 if (Src->hasOneUse() && 1459 match(X, m_LShr(m_Value(Y), 1460 m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) { 1461 Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize); 1462 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true); 1463 } 1464 } 1465 1466 if (auto *Cmp = dyn_cast<ICmpInst>(Src)) 1467 return transformSExtICmp(Cmp, Sext); 1468 1469 // If the input is a shl/ashr pair of a same constant, then this is a sign 1470 // extension from a smaller value. If we could trust arbitrary bitwidth 1471 // integers, we could turn this into a truncate to the smaller bit and then 1472 // use a sext for the whole extension. Since we don't, look deeper and check 1473 // for a truncate. If the source and dest are the same type, eliminate the 1474 // trunc and extend and just do shifts. For example, turn: 1475 // %a = trunc i32 %i to i8 1476 // %b = shl i8 %a, C 1477 // %c = ashr i8 %b, C 1478 // %d = sext i8 %c to i32 1479 // into: 1480 // %a = shl i32 %i, 32-(8-C) 1481 // %d = ashr i32 %a, 32-(8-C) 1482 Value *A = nullptr; 1483 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1484 Constant *BA = nullptr, *CA = nullptr; 1485 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1486 m_ImmConstant(CA))) && 1487 BA->isElementWiseEqual(CA) && A->getType() == DestTy) { 1488 Constant *WideCurrShAmt = 1489 ConstantFoldCastOperand(Instruction::SExt, CA, DestTy, DL); 1490 assert(WideCurrShAmt && "Constant folding of ImmConstant cannot fail"); 1491 Constant *NumLowbitsLeft = ConstantExpr::getSub( 1492 ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt); 1493 Constant *NewShAmt = ConstantExpr::getSub( 1494 ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()), 1495 NumLowbitsLeft); 1496 NewShAmt = 1497 Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA); 1498 A = Builder.CreateShl(A, NewShAmt, Sext.getName()); 1499 return BinaryOperator::CreateAShr(A, NewShAmt); 1500 } 1501 1502 // Splatting a bit of constant-index across a value: 1503 // sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1 1504 // If the dest type is different, use a cast (adjust use check). 1505 if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)), 1506 m_SpecificInt(SrcBitSize - 1))))) { 1507 Type *XTy = X->getType(); 1508 unsigned XBitSize = XTy->getScalarSizeInBits(); 1509 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize); 1510 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1); 1511 if (XTy == DestTy) 1512 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShlAmtC), 1513 AshrAmtC); 1514 if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) { 1515 Value *Ashr = Builder.CreateAShr(Builder.CreateShl(X, ShlAmtC), AshrAmtC); 1516 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true); 1517 } 1518 } 1519 1520 if (match(Src, m_VScale())) { 1521 if (Sext.getFunction() && 1522 Sext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 1523 Attribute Attr = 1524 Sext.getFunction()->getFnAttribute(Attribute::VScaleRange); 1525 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 1526 if (Log2_32(*MaxVScale) < (SrcBitSize - 1)) { 1527 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 1528 return replaceInstUsesWith(Sext, VScale); 1529 } 1530 } 1531 } 1532 } 1533 1534 return nullptr; 1535 } 1536 1537 /// Return a Constant* for the specified floating-point constant if it fits 1538 /// in the specified FP type without changing its value. 1539 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1540 bool losesInfo; 1541 APFloat F = CFP->getValueAPF(); 1542 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1543 return !losesInfo; 1544 } 1545 1546 static Type *shrinkFPConstant(ConstantFP *CFP, bool PreferBFloat) { 1547 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1548 return nullptr; // No constant folding of this. 1549 // See if the value can be truncated to bfloat and then reextended. 1550 if (PreferBFloat && fitsInFPType(CFP, APFloat::BFloat())) 1551 return Type::getBFloatTy(CFP->getContext()); 1552 // See if the value can be truncated to half and then reextended. 1553 if (!PreferBFloat && fitsInFPType(CFP, APFloat::IEEEhalf())) 1554 return Type::getHalfTy(CFP->getContext()); 1555 // See if the value can be truncated to float and then reextended. 1556 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1557 return Type::getFloatTy(CFP->getContext()); 1558 if (CFP->getType()->isDoubleTy()) 1559 return nullptr; // Won't shrink. 1560 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1561 return Type::getDoubleTy(CFP->getContext()); 1562 // Don't try to shrink to various long double types. 1563 return nullptr; 1564 } 1565 1566 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1567 // type we can safely truncate all elements to. 1568 static Type *shrinkFPConstantVector(Value *V, bool PreferBFloat) { 1569 auto *CV = dyn_cast<Constant>(V); 1570 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType()); 1571 if (!CV || !CVVTy) 1572 return nullptr; 1573 1574 Type *MinType = nullptr; 1575 1576 unsigned NumElts = CVVTy->getNumElements(); 1577 1578 // For fixed-width vectors we find the minimal type by looking 1579 // through the constant values of the vector. 1580 for (unsigned i = 0; i != NumElts; ++i) { 1581 if (isa<UndefValue>(CV->getAggregateElement(i))) 1582 continue; 1583 1584 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1585 if (!CFP) 1586 return nullptr; 1587 1588 Type *T = shrinkFPConstant(CFP, PreferBFloat); 1589 if (!T) 1590 return nullptr; 1591 1592 // If we haven't found a type yet or this type has a larger mantissa than 1593 // our previous type, this is our new minimal type. 1594 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1595 MinType = T; 1596 } 1597 1598 // Make a vector type from the minimal type. 1599 return MinType ? FixedVectorType::get(MinType, NumElts) : nullptr; 1600 } 1601 1602 /// Find the minimum FP type we can safely truncate to. 1603 static Type *getMinimumFPType(Value *V, bool PreferBFloat) { 1604 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1605 return FPExt->getOperand(0)->getType(); 1606 1607 // If this value is a constant, return the constant in the smallest FP type 1608 // that can accurately represent it. This allows us to turn 1609 // (float)((double)X+2.0) into x+2.0f. 1610 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1611 if (Type *T = shrinkFPConstant(CFP, PreferBFloat)) 1612 return T; 1613 1614 // We can only correctly find a minimum type for a scalable vector when it is 1615 // a splat. For splats of constant values the fpext is wrapped up as a 1616 // ConstantExpr. 1617 if (auto *FPCExt = dyn_cast<ConstantExpr>(V)) 1618 if (FPCExt->getOpcode() == Instruction::FPExt) 1619 return FPCExt->getOperand(0)->getType(); 1620 1621 // Try to shrink a vector of FP constants. This returns nullptr on scalable 1622 // vectors 1623 if (Type *T = shrinkFPConstantVector(V, PreferBFloat)) 1624 return T; 1625 1626 return V->getType(); 1627 } 1628 1629 /// Return true if the cast from integer to FP can be proven to be exact for all 1630 /// possible inputs (the conversion does not lose any precision). 1631 static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC) { 1632 CastInst::CastOps Opcode = I.getOpcode(); 1633 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1634 "Unexpected cast"); 1635 Value *Src = I.getOperand(0); 1636 Type *SrcTy = Src->getType(); 1637 Type *FPTy = I.getType(); 1638 bool IsSigned = Opcode == Instruction::SIToFP; 1639 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1640 1641 // Easy case - if the source integer type has less bits than the FP mantissa, 1642 // then the cast must be exact. 1643 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1644 if (SrcSize <= DestNumSigBits) 1645 return true; 1646 1647 // Cast from FP to integer and back to FP is independent of the intermediate 1648 // integer width because of poison on overflow. 1649 Value *F; 1650 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1651 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1652 // potential rounding of negative FP input values. 1653 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1654 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1655 SrcNumSigBits++; 1656 1657 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1658 // significant bits than the destination (and make sure neither type is 1659 // weird -- ppc_fp128). 1660 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1661 SrcNumSigBits <= DestNumSigBits) 1662 return true; 1663 } 1664 1665 // TODO: 1666 // Try harder to find if the source integer type has less significant bits. 1667 // For example, compute number of sign bits. 1668 KnownBits SrcKnown = IC.computeKnownBits(Src, 0, &I); 1669 int SigBits = (int)SrcTy->getScalarSizeInBits() - 1670 SrcKnown.countMinLeadingZeros() - 1671 SrcKnown.countMinTrailingZeros(); 1672 if (SigBits <= DestNumSigBits) 1673 return true; 1674 1675 return false; 1676 } 1677 1678 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1679 if (Instruction *I = commonCastTransforms(FPT)) 1680 return I; 1681 1682 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1683 // simplify this expression to avoid one or more of the trunc/extend 1684 // operations if we can do so without changing the numerical results. 1685 // 1686 // The exact manner in which the widths of the operands interact to limit 1687 // what we can and cannot do safely varies from operation to operation, and 1688 // is explained below in the various case statements. 1689 Type *Ty = FPT.getType(); 1690 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1691 if (BO && BO->hasOneUse()) { 1692 Type *LHSMinType = 1693 getMinimumFPType(BO->getOperand(0), /*PreferBFloat=*/Ty->isBFloatTy()); 1694 Type *RHSMinType = 1695 getMinimumFPType(BO->getOperand(1), /*PreferBFloat=*/Ty->isBFloatTy()); 1696 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1697 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1698 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1699 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1700 unsigned DstWidth = Ty->getFPMantissaWidth(); 1701 switch (BO->getOpcode()) { 1702 default: break; 1703 case Instruction::FAdd: 1704 case Instruction::FSub: 1705 // For addition and subtraction, the infinitely precise result can 1706 // essentially be arbitrarily wide; proving that double rounding 1707 // will not occur because the result of OpI is exact (as we will for 1708 // FMul, for example) is hopeless. However, we *can* nonetheless 1709 // frequently know that double rounding cannot occur (or that it is 1710 // innocuous) by taking advantage of the specific structure of 1711 // infinitely-precise results that admit double rounding. 1712 // 1713 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1714 // to represent both sources, we can guarantee that the double 1715 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1716 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1717 // for proof of this fact). 1718 // 1719 // Note: Figueroa does not consider the case where DstFormat != 1720 // SrcFormat. It's possible (likely even!) that this analysis 1721 // could be tightened for those cases, but they are rare (the main 1722 // case of interest here is (float)((double)float + float)). 1723 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1724 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1725 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1726 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1727 RI->copyFastMathFlags(BO); 1728 return RI; 1729 } 1730 break; 1731 case Instruction::FMul: 1732 // For multiplication, the infinitely precise result has at most 1733 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1734 // that such a value can be exactly represented, then no double 1735 // rounding can possibly occur; we can safely perform the operation 1736 // in the destination format if it can represent both sources. 1737 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1738 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1739 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1740 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1741 } 1742 break; 1743 case Instruction::FDiv: 1744 // For division, we use again use the bound from Figueroa's 1745 // dissertation. I am entirely certain that this bound can be 1746 // tightened in the unbalanced operand case by an analysis based on 1747 // the diophantine rational approximation bound, but the well-known 1748 // condition used here is a good conservative first pass. 1749 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1750 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1751 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1752 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1753 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1754 } 1755 break; 1756 case Instruction::FRem: { 1757 // Remainder is straightforward. Remainder is always exact, so the 1758 // type of OpI doesn't enter into things at all. We simply evaluate 1759 // in whichever source type is larger, then convert to the 1760 // destination type. 1761 if (SrcWidth == OpWidth) 1762 break; 1763 Value *LHS, *RHS; 1764 if (LHSWidth == SrcWidth) { 1765 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1766 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1767 } else { 1768 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1769 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1770 } 1771 1772 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1773 return CastInst::CreateFPCast(ExactResult, Ty); 1774 } 1775 } 1776 } 1777 1778 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1779 Value *X; 1780 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1781 if (Op && Op->hasOneUse()) { 1782 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1783 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1784 if (isa<FPMathOperator>(Op)) 1785 Builder.setFastMathFlags(Op->getFastMathFlags()); 1786 1787 if (match(Op, m_FNeg(m_Value(X)))) { 1788 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1789 1790 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1791 } 1792 1793 // If we are truncating a select that has an extended operand, we can 1794 // narrow the other operand and do the select as a narrow op. 1795 Value *Cond, *X, *Y; 1796 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1797 X->getType() == Ty) { 1798 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1799 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1800 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1801 return replaceInstUsesWith(FPT, Sel); 1802 } 1803 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1804 X->getType() == Ty) { 1805 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1806 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1807 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1808 return replaceInstUsesWith(FPT, Sel); 1809 } 1810 } 1811 1812 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1813 switch (II->getIntrinsicID()) { 1814 default: break; 1815 case Intrinsic::ceil: 1816 case Intrinsic::fabs: 1817 case Intrinsic::floor: 1818 case Intrinsic::nearbyint: 1819 case Intrinsic::rint: 1820 case Intrinsic::round: 1821 case Intrinsic::roundeven: 1822 case Intrinsic::trunc: { 1823 Value *Src = II->getArgOperand(0); 1824 if (!Src->hasOneUse()) 1825 break; 1826 1827 // Except for fabs, this transformation requires the input of the unary FP 1828 // operation to be itself an fpext from the type to which we're 1829 // truncating. 1830 if (II->getIntrinsicID() != Intrinsic::fabs) { 1831 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1832 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1833 break; 1834 } 1835 1836 // Do unary FP operation on smaller type. 1837 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1838 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1839 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1840 II->getIntrinsicID(), Ty); 1841 SmallVector<OperandBundleDef, 1> OpBundles; 1842 II->getOperandBundlesAsDefs(OpBundles); 1843 CallInst *NewCI = 1844 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1845 NewCI->copyFastMathFlags(II); 1846 return NewCI; 1847 } 1848 } 1849 } 1850 1851 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1852 return I; 1853 1854 Value *Src = FPT.getOperand(0); 1855 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1856 auto *FPCast = cast<CastInst>(Src); 1857 if (isKnownExactCastIntToFP(*FPCast, *this)) 1858 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1859 } 1860 1861 return nullptr; 1862 } 1863 1864 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1865 // If the source operand is a cast from integer to FP and known exact, then 1866 // cast the integer operand directly to the destination type. 1867 Type *Ty = FPExt.getType(); 1868 Value *Src = FPExt.getOperand(0); 1869 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1870 auto *FPCast = cast<CastInst>(Src); 1871 if (isKnownExactCastIntToFP(*FPCast, *this)) 1872 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1873 } 1874 1875 return commonCastTransforms(FPExt); 1876 } 1877 1878 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1879 /// This is safe if the intermediate type has enough bits in its mantissa to 1880 /// accurately represent all values of X. For example, this won't work with 1881 /// i64 -> float -> i64. 1882 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1883 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1884 return nullptr; 1885 1886 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1887 Value *X = OpI->getOperand(0); 1888 Type *XType = X->getType(); 1889 Type *DestType = FI.getType(); 1890 bool IsOutputSigned = isa<FPToSIInst>(FI); 1891 1892 // Since we can assume the conversion won't overflow, our decision as to 1893 // whether the input will fit in the float should depend on the minimum 1894 // of the input range and output range. 1895 1896 // This means this is also safe for a signed input and unsigned output, since 1897 // a negative input would lead to undefined behavior. 1898 if (!isKnownExactCastIntToFP(*OpI, *this)) { 1899 // The first cast may not round exactly based on the source integer width 1900 // and FP width, but the overflow UB rules can still allow this to fold. 1901 // If the destination type is narrow, that means the intermediate FP value 1902 // must be large enough to hold the source value exactly. 1903 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1904 int OutputSize = (int)DestType->getScalarSizeInBits(); 1905 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1906 return nullptr; 1907 } 1908 1909 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1910 bool IsInputSigned = isa<SIToFPInst>(OpI); 1911 if (IsInputSigned && IsOutputSigned) 1912 return new SExtInst(X, DestType); 1913 return new ZExtInst(X, DestType); 1914 } 1915 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 1916 return new TruncInst(X, DestType); 1917 1918 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 1919 return replaceInstUsesWith(FI, X); 1920 } 1921 1922 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 1923 if (Instruction *I = foldItoFPtoI(FI)) 1924 return I; 1925 1926 return commonCastTransforms(FI); 1927 } 1928 1929 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 1930 if (Instruction *I = foldItoFPtoI(FI)) 1931 return I; 1932 1933 return commonCastTransforms(FI); 1934 } 1935 1936 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 1937 return commonCastTransforms(CI); 1938 } 1939 1940 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 1941 return commonCastTransforms(CI); 1942 } 1943 1944 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 1945 // If the source integer type is not the intptr_t type for this target, do a 1946 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1947 // cast to be exposed to other transforms. 1948 unsigned AS = CI.getAddressSpace(); 1949 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1950 DL.getPointerSizeInBits(AS)) { 1951 Type *Ty = CI.getOperand(0)->getType()->getWithNewType( 1952 DL.getIntPtrType(CI.getContext(), AS)); 1953 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1954 return new IntToPtrInst(P, CI.getType()); 1955 } 1956 1957 if (Instruction *I = commonCastTransforms(CI)) 1958 return I; 1959 1960 return nullptr; 1961 } 1962 1963 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 1964 // If the destination integer type is not the intptr_t type for this target, 1965 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1966 // to be exposed to other transforms. 1967 Value *SrcOp = CI.getPointerOperand(); 1968 Type *SrcTy = SrcOp->getType(); 1969 Type *Ty = CI.getType(); 1970 unsigned AS = CI.getPointerAddressSpace(); 1971 unsigned TySize = Ty->getScalarSizeInBits(); 1972 unsigned PtrSize = DL.getPointerSizeInBits(AS); 1973 if (TySize != PtrSize) { 1974 Type *IntPtrTy = 1975 SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS)); 1976 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 1977 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1978 } 1979 1980 // (ptrtoint (ptrmask P, M)) 1981 // -> (and (ptrtoint P), M) 1982 // This is generally beneficial as `and` is better supported than `ptrmask`. 1983 Value *Ptr, *Mask; 1984 if (match(SrcOp, m_OneUse(m_Intrinsic<Intrinsic::ptrmask>(m_Value(Ptr), 1985 m_Value(Mask)))) && 1986 Mask->getType() == Ty) 1987 return BinaryOperator::CreateAnd(Builder.CreatePtrToInt(Ptr, Ty), Mask); 1988 1989 if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) { 1990 // Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use. 1991 // While this can increase the number of instructions it doesn't actually 1992 // increase the overall complexity since the arithmetic is just part of 1993 // the GEP otherwise. 1994 if (GEP->hasOneUse() && 1995 isa<ConstantPointerNull>(GEP->getPointerOperand())) { 1996 return replaceInstUsesWith(CI, 1997 Builder.CreateIntCast(EmitGEPOffset(GEP), Ty, 1998 /*isSigned=*/false)); 1999 } 2000 } 2001 2002 Value *Vec, *Scalar, *Index; 2003 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 2004 m_Value(Scalar), m_Value(Index)))) && 2005 Vec->getType() == Ty) { 2006 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 2007 // Convert the scalar to int followed by insert to eliminate one cast: 2008 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 2009 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 2010 return InsertElementInst::Create(Vec, NewCast, Index); 2011 } 2012 2013 return commonCastTransforms(CI); 2014 } 2015 2016 /// This input value (which is known to have vector type) is being zero extended 2017 /// or truncated to the specified vector type. Since the zext/trunc is done 2018 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 2019 /// endianness will impact which end of the vector that is extended or 2020 /// truncated. 2021 /// 2022 /// A vector is always stored with index 0 at the lowest address, which 2023 /// corresponds to the most significant bits for a big endian stored integer and 2024 /// the least significant bits for little endian. A trunc/zext of an integer 2025 /// impacts the big end of the integer. Thus, we need to add/remove elements at 2026 /// the front of the vector for big endian targets, and the back of the vector 2027 /// for little endian targets. 2028 /// 2029 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 2030 /// 2031 /// The source and destination vector types may have different element types. 2032 static Instruction * 2033 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 2034 InstCombinerImpl &IC) { 2035 // We can only do this optimization if the output is a multiple of the input 2036 // element size, or the input is a multiple of the output element size. 2037 // Convert the input type to have the same element type as the output. 2038 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 2039 2040 if (SrcTy->getElementType() != DestTy->getElementType()) { 2041 // The input types don't need to be identical, but for now they must be the 2042 // same size. There is no specific reason we couldn't handle things like 2043 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 2044 // there yet. 2045 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 2046 DestTy->getElementType()->getPrimitiveSizeInBits()) 2047 return nullptr; 2048 2049 SrcTy = 2050 FixedVectorType::get(DestTy->getElementType(), 2051 cast<FixedVectorType>(SrcTy)->getNumElements()); 2052 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2053 } 2054 2055 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2056 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2057 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2058 2059 assert(SrcElts != DestElts && "Element counts should be different."); 2060 2061 // Now that the element types match, get the shuffle mask and RHS of the 2062 // shuffle to use, which depends on whether we're increasing or decreasing the 2063 // size of the input. 2064 auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts)); 2065 ArrayRef<int> ShuffleMask; 2066 Value *V2; 2067 2068 if (SrcElts > DestElts) { 2069 // If we're shrinking the number of elements (rewriting an integer 2070 // truncate), just shuffle in the elements corresponding to the least 2071 // significant bits from the input and use poison as the second shuffle 2072 // input. 2073 V2 = PoisonValue::get(SrcTy); 2074 // Make sure the shuffle mask selects the "least significant bits" by 2075 // keeping elements from back of the src vector for big endian, and from the 2076 // front for little endian. 2077 ShuffleMask = ShuffleMaskStorage; 2078 if (IsBigEndian) 2079 ShuffleMask = ShuffleMask.take_back(DestElts); 2080 else 2081 ShuffleMask = ShuffleMask.take_front(DestElts); 2082 } else { 2083 // If we're increasing the number of elements (rewriting an integer zext), 2084 // shuffle in all of the elements from InVal. Fill the rest of the result 2085 // elements with zeros from a constant zero. 2086 V2 = Constant::getNullValue(SrcTy); 2087 // Use first elt from V2 when indicating zero in the shuffle mask. 2088 uint32_t NullElt = SrcElts; 2089 // Extend with null values in the "most significant bits" by adding elements 2090 // in front of the src vector for big endian, and at the back for little 2091 // endian. 2092 unsigned DeltaElts = DestElts - SrcElts; 2093 if (IsBigEndian) 2094 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2095 else 2096 ShuffleMaskStorage.append(DeltaElts, NullElt); 2097 ShuffleMask = ShuffleMaskStorage; 2098 } 2099 2100 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2101 } 2102 2103 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2104 return Value % Ty->getPrimitiveSizeInBits() == 0; 2105 } 2106 2107 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2108 return Value / Ty->getPrimitiveSizeInBits(); 2109 } 2110 2111 /// V is a value which is inserted into a vector of VecEltTy. 2112 /// Look through the value to see if we can decompose it into 2113 /// insertions into the vector. See the example in the comment for 2114 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2115 /// The type of V is always a non-zero multiple of VecEltTy's size. 2116 /// Shift is the number of bits between the lsb of V and the lsb of 2117 /// the vector. 2118 /// 2119 /// This returns false if the pattern can't be matched or true if it can, 2120 /// filling in Elements with the elements found here. 2121 static bool collectInsertionElements(Value *V, unsigned Shift, 2122 SmallVectorImpl<Value *> &Elements, 2123 Type *VecEltTy, bool isBigEndian) { 2124 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2125 "Shift should be a multiple of the element type size"); 2126 2127 // Undef values never contribute useful bits to the result. 2128 if (isa<UndefValue>(V)) return true; 2129 2130 // If we got down to a value of the right type, we win, try inserting into the 2131 // right element. 2132 if (V->getType() == VecEltTy) { 2133 // Inserting null doesn't actually insert any elements. 2134 if (Constant *C = dyn_cast<Constant>(V)) 2135 if (C->isNullValue()) 2136 return true; 2137 2138 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2139 if (isBigEndian) 2140 ElementIndex = Elements.size() - ElementIndex - 1; 2141 2142 // Fail if multiple elements are inserted into this slot. 2143 if (Elements[ElementIndex]) 2144 return false; 2145 2146 Elements[ElementIndex] = V; 2147 return true; 2148 } 2149 2150 if (Constant *C = dyn_cast<Constant>(V)) { 2151 // Figure out the # elements this provides, and bitcast it or slice it up 2152 // as required. 2153 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2154 VecEltTy); 2155 // If the constant is the size of a vector element, we just need to bitcast 2156 // it to the right type so it gets properly inserted. 2157 if (NumElts == 1) 2158 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2159 Shift, Elements, VecEltTy, isBigEndian); 2160 2161 // Okay, this is a constant that covers multiple elements. Slice it up into 2162 // pieces and insert each element-sized piece into the vector. 2163 if (!isa<IntegerType>(C->getType())) 2164 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2165 C->getType()->getPrimitiveSizeInBits())); 2166 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2167 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2168 2169 for (unsigned i = 0; i != NumElts; ++i) { 2170 unsigned ShiftI = Shift + i * ElementSize; 2171 Constant *Piece = ConstantFoldBinaryInstruction( 2172 Instruction::LShr, C, ConstantInt::get(C->getType(), ShiftI)); 2173 if (!Piece) 2174 return false; 2175 2176 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2177 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2178 isBigEndian)) 2179 return false; 2180 } 2181 return true; 2182 } 2183 2184 if (!V->hasOneUse()) return false; 2185 2186 Instruction *I = dyn_cast<Instruction>(V); 2187 if (!I) return false; 2188 switch (I->getOpcode()) { 2189 default: return false; // Unhandled case. 2190 case Instruction::BitCast: 2191 if (I->getOperand(0)->getType()->isVectorTy()) 2192 return false; 2193 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2194 isBigEndian); 2195 case Instruction::ZExt: 2196 if (!isMultipleOfTypeSize( 2197 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2198 VecEltTy)) 2199 return false; 2200 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2201 isBigEndian); 2202 case Instruction::Or: 2203 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2204 isBigEndian) && 2205 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2206 isBigEndian); 2207 case Instruction::Shl: { 2208 // Must be shifting by a constant that is a multiple of the element size. 2209 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2210 if (!CI) return false; 2211 Shift += CI->getZExtValue(); 2212 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2213 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2214 isBigEndian); 2215 } 2216 2217 } 2218 } 2219 2220 2221 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2222 /// assemble the elements of the vector manually. 2223 /// Try to rip the code out and replace it with insertelements. This is to 2224 /// optimize code like this: 2225 /// 2226 /// %tmp37 = bitcast float %inc to i32 2227 /// %tmp38 = zext i32 %tmp37 to i64 2228 /// %tmp31 = bitcast float %inc5 to i32 2229 /// %tmp32 = zext i32 %tmp31 to i64 2230 /// %tmp33 = shl i64 %tmp32, 32 2231 /// %ins35 = or i64 %tmp33, %tmp38 2232 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2233 /// 2234 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2235 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2236 InstCombinerImpl &IC) { 2237 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2238 Value *IntInput = CI.getOperand(0); 2239 2240 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2241 if (!collectInsertionElements(IntInput, 0, Elements, 2242 DestVecTy->getElementType(), 2243 IC.getDataLayout().isBigEndian())) 2244 return nullptr; 2245 2246 // If we succeeded, we know that all of the element are specified by Elements 2247 // or are zero if Elements has a null entry. Recast this as a set of 2248 // insertions. 2249 Value *Result = Constant::getNullValue(CI.getType()); 2250 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2251 if (!Elements[i]) continue; // Unset element. 2252 2253 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2254 IC.Builder.getInt32(i)); 2255 } 2256 2257 return Result; 2258 } 2259 2260 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2261 /// vector followed by extract element. The backend tends to handle bitcasts of 2262 /// vectors better than bitcasts of scalars because vector registers are 2263 /// usually not type-specific like scalar integer or scalar floating-point. 2264 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2265 InstCombinerImpl &IC) { 2266 Value *VecOp, *Index; 2267 if (!match(BitCast.getOperand(0), 2268 m_OneUse(m_ExtractElt(m_Value(VecOp), m_Value(Index))))) 2269 return nullptr; 2270 2271 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2272 // type to extract from. 2273 Type *DestType = BitCast.getType(); 2274 VectorType *VecType = cast<VectorType>(VecOp->getType()); 2275 if (VectorType::isValidElementType(DestType)) { 2276 auto *NewVecType = VectorType::get(DestType, VecType); 2277 auto *NewBC = IC.Builder.CreateBitCast(VecOp, NewVecType, "bc"); 2278 return ExtractElementInst::Create(NewBC, Index); 2279 } 2280 2281 // Only solve DestType is vector to avoid inverse transform in visitBitCast. 2282 // bitcast (extractelement <1 x elt>, dest) -> bitcast(<1 x elt>, dest) 2283 auto *FixedVType = dyn_cast<FixedVectorType>(VecType); 2284 if (DestType->isVectorTy() && FixedVType && FixedVType->getNumElements() == 1) 2285 return CastInst::Create(Instruction::BitCast, VecOp, DestType); 2286 2287 return nullptr; 2288 } 2289 2290 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2291 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2292 InstCombiner::BuilderTy &Builder) { 2293 Type *DestTy = BitCast.getType(); 2294 BinaryOperator *BO; 2295 2296 if (!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2297 !BO->isBitwiseLogicOp()) 2298 return nullptr; 2299 2300 // FIXME: This transform is restricted to vector types to avoid backend 2301 // problems caused by creating potentially illegal operations. If a fix-up is 2302 // added to handle that situation, we can remove this check. 2303 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2304 return nullptr; 2305 2306 if (DestTy->isFPOrFPVectorTy()) { 2307 Value *X, *Y; 2308 // bitcast(logic(bitcast(X), bitcast(Y))) -> bitcast'(logic(bitcast'(X), Y)) 2309 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2310 match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(Y))))) { 2311 if (X->getType()->isFPOrFPVectorTy() && 2312 Y->getType()->isIntOrIntVectorTy()) { 2313 Value *CastedOp = 2314 Builder.CreateBitCast(BO->getOperand(0), Y->getType()); 2315 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, Y); 2316 return CastInst::CreateBitOrPointerCast(NewBO, DestTy); 2317 } 2318 if (X->getType()->isIntOrIntVectorTy() && 2319 Y->getType()->isFPOrFPVectorTy()) { 2320 Value *CastedOp = 2321 Builder.CreateBitCast(BO->getOperand(1), X->getType()); 2322 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, X); 2323 return CastInst::CreateBitOrPointerCast(NewBO, DestTy); 2324 } 2325 } 2326 return nullptr; 2327 } 2328 2329 if (!DestTy->isIntOrIntVectorTy()) 2330 return nullptr; 2331 2332 Value *X; 2333 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2334 X->getType() == DestTy && !isa<Constant>(X)) { 2335 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2336 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2337 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2338 } 2339 2340 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2341 X->getType() == DestTy && !isa<Constant>(X)) { 2342 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2343 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2344 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2345 } 2346 2347 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2348 // constant. This eases recognition of special constants for later ops. 2349 // Example: 2350 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2351 Constant *C; 2352 if (match(BO->getOperand(1), m_Constant(C))) { 2353 // bitcast (logic X, C) --> logic (bitcast X, C') 2354 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2355 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2356 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2357 } 2358 2359 return nullptr; 2360 } 2361 2362 /// Change the type of a select if we can eliminate a bitcast. 2363 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2364 InstCombiner::BuilderTy &Builder) { 2365 Value *Cond, *TVal, *FVal; 2366 if (!match(BitCast.getOperand(0), 2367 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2368 return nullptr; 2369 2370 // A vector select must maintain the same number of elements in its operands. 2371 Type *CondTy = Cond->getType(); 2372 Type *DestTy = BitCast.getType(); 2373 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) 2374 if (!DestTy->isVectorTy() || 2375 CondVTy->getElementCount() != 2376 cast<VectorType>(DestTy)->getElementCount()) 2377 return nullptr; 2378 2379 // FIXME: This transform is restricted from changing the select between 2380 // scalars and vectors to avoid backend problems caused by creating 2381 // potentially illegal operations. If a fix-up is added to handle that 2382 // situation, we can remove this check. 2383 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2384 return nullptr; 2385 2386 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2387 Value *X; 2388 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2389 !isa<Constant>(X)) { 2390 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2391 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2392 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2393 } 2394 2395 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2396 !isa<Constant>(X)) { 2397 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2398 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2399 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2400 } 2401 2402 return nullptr; 2403 } 2404 2405 /// Check if all users of CI are StoreInsts. 2406 static bool hasStoreUsersOnly(CastInst &CI) { 2407 for (User *U : CI.users()) { 2408 if (!isa<StoreInst>(U)) 2409 return false; 2410 } 2411 return true; 2412 } 2413 2414 /// This function handles following case 2415 /// 2416 /// A -> B cast 2417 /// PHI 2418 /// B -> A cast 2419 /// 2420 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2421 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2422 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2423 PHINode *PN) { 2424 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2425 if (hasStoreUsersOnly(CI)) 2426 return nullptr; 2427 2428 Value *Src = CI.getOperand(0); 2429 Type *SrcTy = Src->getType(); // Type B 2430 Type *DestTy = CI.getType(); // Type A 2431 2432 SmallVector<PHINode *, 4> PhiWorklist; 2433 SmallSetVector<PHINode *, 4> OldPhiNodes; 2434 2435 // Find all of the A->B casts and PHI nodes. 2436 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2437 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2438 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2439 PhiWorklist.push_back(PN); 2440 OldPhiNodes.insert(PN); 2441 while (!PhiWorklist.empty()) { 2442 auto *OldPN = PhiWorklist.pop_back_val(); 2443 for (Value *IncValue : OldPN->incoming_values()) { 2444 if (isa<Constant>(IncValue)) 2445 continue; 2446 2447 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2448 // If there is a sequence of one or more load instructions, each loaded 2449 // value is used as address of later load instruction, bitcast is 2450 // necessary to change the value type, don't optimize it. For 2451 // simplicity we give up if the load address comes from another load. 2452 Value *Addr = LI->getOperand(0); 2453 if (Addr == &CI || isa<LoadInst>(Addr)) 2454 return nullptr; 2455 // Don't tranform "load <256 x i32>, <256 x i32>*" to 2456 // "load x86_amx, x86_amx*", because x86_amx* is invalid. 2457 // TODO: Remove this check when bitcast between vector and x86_amx 2458 // is replaced with a specific intrinsic. 2459 if (DestTy->isX86_AMXTy()) 2460 return nullptr; 2461 if (LI->hasOneUse() && LI->isSimple()) 2462 continue; 2463 // If a LoadInst has more than one use, changing the type of loaded 2464 // value may create another bitcast. 2465 return nullptr; 2466 } 2467 2468 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2469 if (OldPhiNodes.insert(PNode)) 2470 PhiWorklist.push_back(PNode); 2471 continue; 2472 } 2473 2474 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2475 // We can't handle other instructions. 2476 if (!BCI) 2477 return nullptr; 2478 2479 // Verify it's a A->B cast. 2480 Type *TyA = BCI->getOperand(0)->getType(); 2481 Type *TyB = BCI->getType(); 2482 if (TyA != DestTy || TyB != SrcTy) 2483 return nullptr; 2484 } 2485 } 2486 2487 // Check that each user of each old PHI node is something that we can 2488 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2489 for (auto *OldPN : OldPhiNodes) { 2490 for (User *V : OldPN->users()) { 2491 if (auto *SI = dyn_cast<StoreInst>(V)) { 2492 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2493 return nullptr; 2494 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2495 // Verify it's a B->A cast. 2496 Type *TyB = BCI->getOperand(0)->getType(); 2497 Type *TyA = BCI->getType(); 2498 if (TyA != DestTy || TyB != SrcTy) 2499 return nullptr; 2500 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2501 // As long as the user is another old PHI node, then even if we don't 2502 // rewrite it, the PHI web we're considering won't have any users 2503 // outside itself, so it'll be dead. 2504 if (!OldPhiNodes.contains(PHI)) 2505 return nullptr; 2506 } else { 2507 return nullptr; 2508 } 2509 } 2510 } 2511 2512 // For each old PHI node, create a corresponding new PHI node with a type A. 2513 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2514 for (auto *OldPN : OldPhiNodes) { 2515 Builder.SetInsertPoint(OldPN); 2516 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2517 NewPNodes[OldPN] = NewPN; 2518 } 2519 2520 // Fill in the operands of new PHI nodes. 2521 for (auto *OldPN : OldPhiNodes) { 2522 PHINode *NewPN = NewPNodes[OldPN]; 2523 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2524 Value *V = OldPN->getOperand(j); 2525 Value *NewV = nullptr; 2526 if (auto *C = dyn_cast<Constant>(V)) { 2527 NewV = ConstantExpr::getBitCast(C, DestTy); 2528 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2529 // Explicitly perform load combine to make sure no opposing transform 2530 // can remove the bitcast in the meantime and trigger an infinite loop. 2531 Builder.SetInsertPoint(LI); 2532 NewV = combineLoadToNewType(*LI, DestTy); 2533 // Remove the old load and its use in the old phi, which itself becomes 2534 // dead once the whole transform finishes. 2535 replaceInstUsesWith(*LI, PoisonValue::get(LI->getType())); 2536 eraseInstFromFunction(*LI); 2537 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2538 NewV = BCI->getOperand(0); 2539 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2540 NewV = NewPNodes[PrevPN]; 2541 } 2542 assert(NewV); 2543 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2544 } 2545 } 2546 2547 // Traverse all accumulated PHI nodes and process its users, 2548 // which are Stores and BitcCasts. Without this processing 2549 // NewPHI nodes could be replicated and could lead to extra 2550 // moves generated after DeSSA. 2551 // If there is a store with type B, change it to type A. 2552 2553 2554 // Replace users of BitCast B->A with NewPHI. These will help 2555 // later to get rid off a closure formed by OldPHI nodes. 2556 Instruction *RetVal = nullptr; 2557 for (auto *OldPN : OldPhiNodes) { 2558 PHINode *NewPN = NewPNodes[OldPN]; 2559 for (User *V : make_early_inc_range(OldPN->users())) { 2560 if (auto *SI = dyn_cast<StoreInst>(V)) { 2561 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2562 Builder.SetInsertPoint(SI); 2563 auto *NewBC = 2564 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2565 SI->setOperand(0, NewBC); 2566 Worklist.push(SI); 2567 assert(hasStoreUsersOnly(*NewBC)); 2568 } 2569 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2570 Type *TyB = BCI->getOperand(0)->getType(); 2571 Type *TyA = BCI->getType(); 2572 assert(TyA == DestTy && TyB == SrcTy); 2573 (void) TyA; 2574 (void) TyB; 2575 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2576 if (BCI == &CI) 2577 RetVal = I; 2578 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2579 assert(OldPhiNodes.contains(PHI)); 2580 (void) PHI; 2581 } else { 2582 llvm_unreachable("all uses should be handled"); 2583 } 2584 } 2585 } 2586 2587 return RetVal; 2588 } 2589 2590 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2591 // If the operands are integer typed then apply the integer transforms, 2592 // otherwise just apply the common ones. 2593 Value *Src = CI.getOperand(0); 2594 Type *SrcTy = Src->getType(); 2595 Type *DestTy = CI.getType(); 2596 2597 // Get rid of casts from one type to the same type. These are useless and can 2598 // be replaced by the operand. 2599 if (DestTy == Src->getType()) 2600 return replaceInstUsesWith(CI, Src); 2601 2602 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2603 // Beware: messing with this target-specific oddity may cause trouble. 2604 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2605 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2606 return InsertElementInst::Create(PoisonValue::get(DestTy), Elem, 2607 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2608 } 2609 2610 if (isa<IntegerType>(SrcTy)) { 2611 // If this is a cast from an integer to vector, check to see if the input 2612 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2613 // the casts with a shuffle and (potentially) a bitcast. 2614 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2615 CastInst *SrcCast = cast<CastInst>(Src); 2616 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2617 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2618 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2619 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2620 return I; 2621 } 2622 2623 // If the input is an 'or' instruction, we may be doing shifts and ors to 2624 // assemble the elements of the vector manually. Try to rip the code out 2625 // and replace it with insertelements. 2626 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2627 return replaceInstUsesWith(CI, V); 2628 } 2629 } 2630 2631 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2632 if (SrcVTy->getNumElements() == 1) { 2633 // If our destination is not a vector, then make this a straight 2634 // scalar-scalar cast. 2635 if (!DestTy->isVectorTy()) { 2636 Value *Elem = 2637 Builder.CreateExtractElement(Src, 2638 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2639 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2640 } 2641 2642 // Otherwise, see if our source is an insert. If so, then use the scalar 2643 // component directly: 2644 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2645 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2646 return new BitCastInst(InsElt->getOperand(1), DestTy); 2647 } 2648 2649 // Convert an artificial vector insert into more analyzable bitwise logic. 2650 unsigned BitWidth = DestTy->getScalarSizeInBits(); 2651 Value *X, *Y; 2652 uint64_t IndexC; 2653 if (match(Src, m_OneUse(m_InsertElt(m_OneUse(m_BitCast(m_Value(X))), 2654 m_Value(Y), m_ConstantInt(IndexC)))) && 2655 DestTy->isIntegerTy() && X->getType() == DestTy && 2656 Y->getType()->isIntegerTy() && isDesirableIntType(BitWidth)) { 2657 // Adjust for big endian - the LSBs are at the high index. 2658 if (DL.isBigEndian()) 2659 IndexC = SrcVTy->getNumElements() - 1 - IndexC; 2660 2661 // We only handle (endian-normalized) insert to index 0. Any other insert 2662 // would require a left-shift, so that is an extra instruction. 2663 if (IndexC == 0) { 2664 // bitcast (inselt (bitcast X), Y, 0) --> or (and X, MaskC), (zext Y) 2665 unsigned EltWidth = Y->getType()->getScalarSizeInBits(); 2666 APInt MaskC = APInt::getHighBitsSet(BitWidth, BitWidth - EltWidth); 2667 Value *AndX = Builder.CreateAnd(X, MaskC); 2668 Value *ZextY = Builder.CreateZExt(Y, DestTy); 2669 return BinaryOperator::CreateOr(AndX, ZextY); 2670 } 2671 } 2672 } 2673 2674 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2675 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2676 // a bitcast to a vector with the same # elts. 2677 Value *ShufOp0 = Shuf->getOperand(0); 2678 Value *ShufOp1 = Shuf->getOperand(1); 2679 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount(); 2680 auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount(); 2681 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2682 cast<VectorType>(DestTy)->getElementCount() == ShufElts && 2683 ShufElts == SrcVecElts) { 2684 BitCastInst *Tmp; 2685 // If either of the operands is a cast from CI.getType(), then 2686 // evaluating the shuffle in the casted destination's type will allow 2687 // us to eliminate at least one cast. 2688 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2689 Tmp->getOperand(0)->getType() == DestTy) || 2690 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2691 Tmp->getOperand(0)->getType() == DestTy)) { 2692 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2693 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2694 // Return a new shuffle vector. Use the same element ID's, as we 2695 // know the vector types match #elts. 2696 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2697 } 2698 } 2699 2700 // A bitcasted-to-scalar and byte/bit reversing shuffle is better recognized 2701 // as a byte/bit swap: 2702 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) -> bswap (bitcast X) 2703 // bitcast <N x i1> (shuf X, undef, <N, N-1,...0>) -> bitreverse (bitcast X) 2704 if (DestTy->isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 && 2705 Shuf->hasOneUse() && Shuf->isReverse()) { 2706 unsigned IntrinsicNum = 0; 2707 if (DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2708 SrcTy->getScalarSizeInBits() == 8) { 2709 IntrinsicNum = Intrinsic::bswap; 2710 } else if (SrcTy->getScalarSizeInBits() == 1) { 2711 IntrinsicNum = Intrinsic::bitreverse; 2712 } 2713 if (IntrinsicNum != 0) { 2714 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2715 assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op"); 2716 Function *BswapOrBitreverse = 2717 Intrinsic::getDeclaration(CI.getModule(), IntrinsicNum, DestTy); 2718 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2719 return CallInst::Create(BswapOrBitreverse, {ScalarX}); 2720 } 2721 } 2722 } 2723 2724 // Handle the A->B->A cast, and there is an intervening PHI node. 2725 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2726 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2727 return I; 2728 2729 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2730 return I; 2731 2732 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2733 return I; 2734 2735 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2736 return I; 2737 2738 return commonCastTransforms(CI); 2739 } 2740 2741 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2742 return commonCastTransforms(CI); 2743 } 2744