1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines routines for folding instructions into constants. 10 // 11 // Also, to supplement the basic IR ConstantExpr simplifications, 12 // this file defines some additional folding routines that can make use of 13 // DataLayout information. These functions cannot go in IR due to library 14 // dependency issues. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/TargetFolder.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/Config/config.h" 31 #include "llvm/IR/Constant.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/GlobalValue.h" 37 #include "llvm/IR/GlobalVariable.h" 38 #include "llvm/IR/InstrTypes.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/IntrinsicsAMDGPU.h" 44 #include "llvm/IR/IntrinsicsX86.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/IR/Value.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/KnownBits.h" 51 #include "llvm/Support/MathExtras.h" 52 #include <cassert> 53 #include <cerrno> 54 #include <cfenv> 55 #include <cmath> 56 #include <cstddef> 57 #include <cstdint> 58 59 using namespace llvm; 60 61 namespace { 62 63 //===----------------------------------------------------------------------===// 64 // Constant Folding internal helper functions 65 //===----------------------------------------------------------------------===// 66 67 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, 68 Constant *C, Type *SrcEltTy, 69 unsigned NumSrcElts, 70 const DataLayout &DL) { 71 // Now that we know that the input value is a vector of integers, just shift 72 // and insert them into our result. 73 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); 74 for (unsigned i = 0; i != NumSrcElts; ++i) { 75 Constant *Element; 76 if (DL.isLittleEndian()) 77 Element = C->getAggregateElement(NumSrcElts - i - 1); 78 else 79 Element = C->getAggregateElement(i); 80 81 if (Element && isa<UndefValue>(Element)) { 82 Result <<= BitShift; 83 continue; 84 } 85 86 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 87 if (!ElementCI) 88 return ConstantExpr::getBitCast(C, DestTy); 89 90 Result <<= BitShift; 91 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); 92 } 93 94 return nullptr; 95 } 96 97 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 98 /// This always returns a non-null constant, but it may be a 99 /// ConstantExpr if unfoldable. 100 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 101 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && 102 "Invalid constantexpr bitcast!"); 103 104 // Catch the obvious splat cases. 105 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 106 return Constant::getNullValue(DestTy); 107 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 108 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 109 return Constant::getAllOnesValue(DestTy); 110 111 if (auto *VTy = dyn_cast<VectorType>(C->getType())) { 112 // Handle a vector->scalar integer/fp cast. 113 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { 114 unsigned NumSrcElts = VTy->getNumElements(); 115 Type *SrcEltTy = VTy->getElementType(); 116 117 // If the vector is a vector of floating point, convert it to vector of int 118 // to simplify things. 119 if (SrcEltTy->isFloatingPointTy()) { 120 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 121 auto *SrcIVTy = FixedVectorType::get( 122 IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 123 // Ask IR to do the conversion now that #elts line up. 124 C = ConstantExpr::getBitCast(C, SrcIVTy); 125 } 126 127 APInt Result(DL.getTypeSizeInBits(DestTy), 0); 128 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, 129 SrcEltTy, NumSrcElts, DL)) 130 return CE; 131 132 if (isa<IntegerType>(DestTy)) 133 return ConstantInt::get(DestTy, Result); 134 135 APFloat FP(DestTy->getFltSemantics(), Result); 136 return ConstantFP::get(DestTy->getContext(), FP); 137 } 138 } 139 140 // The code below only handles casts to vectors currently. 141 auto *DestVTy = dyn_cast<VectorType>(DestTy); 142 if (!DestVTy) 143 return ConstantExpr::getBitCast(C, DestTy); 144 145 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 146 // vector so the code below can handle it uniformly. 147 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 148 Constant *Ops = C; // don't take the address of C! 149 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 150 } 151 152 // If this is a bitcast from constant vector -> vector, fold it. 153 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 154 return ConstantExpr::getBitCast(C, DestTy); 155 156 // If the element types match, IR can fold it. 157 unsigned NumDstElt = DestVTy->getNumElements(); 158 unsigned NumSrcElt = cast<VectorType>(C->getType())->getNumElements(); 159 if (NumDstElt == NumSrcElt) 160 return ConstantExpr::getBitCast(C, DestTy); 161 162 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType(); 163 Type *DstEltTy = DestVTy->getElementType(); 164 165 // Otherwise, we're changing the number of elements in a vector, which 166 // requires endianness information to do the right thing. For example, 167 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 168 // folds to (little endian): 169 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 170 // and to (big endian): 171 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 172 173 // First thing is first. We only want to think about integer here, so if 174 // we have something in FP form, recast it as integer. 175 if (DstEltTy->isFloatingPointTy()) { 176 // Fold to an vector of integers with same size as our FP type. 177 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 178 auto *DestIVTy = FixedVectorType::get( 179 IntegerType::get(C->getContext(), FPWidth), NumDstElt); 180 // Recursively handle this integer conversion, if possible. 181 C = FoldBitCast(C, DestIVTy, DL); 182 183 // Finally, IR can handle this now that #elts line up. 184 return ConstantExpr::getBitCast(C, DestTy); 185 } 186 187 // Okay, we know the destination is integer, if the input is FP, convert 188 // it to integer first. 189 if (SrcEltTy->isFloatingPointTy()) { 190 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 191 auto *SrcIVTy = FixedVectorType::get( 192 IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 193 // Ask IR to do the conversion now that #elts line up. 194 C = ConstantExpr::getBitCast(C, SrcIVTy); 195 // If IR wasn't able to fold it, bail out. 196 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 197 !isa<ConstantDataVector>(C)) 198 return C; 199 } 200 201 // Now we know that the input and output vectors are both integer vectors 202 // of the same size, and that their #elements is not the same. Do the 203 // conversion here, which depends on whether the input or output has 204 // more elements. 205 bool isLittleEndian = DL.isLittleEndian(); 206 207 SmallVector<Constant*, 32> Result; 208 if (NumDstElt < NumSrcElt) { 209 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 210 Constant *Zero = Constant::getNullValue(DstEltTy); 211 unsigned Ratio = NumSrcElt/NumDstElt; 212 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 213 unsigned SrcElt = 0; 214 for (unsigned i = 0; i != NumDstElt; ++i) { 215 // Build each element of the result. 216 Constant *Elt = Zero; 217 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 218 for (unsigned j = 0; j != Ratio; ++j) { 219 Constant *Src = C->getAggregateElement(SrcElt++); 220 if (Src && isa<UndefValue>(Src)) 221 Src = Constant::getNullValue( 222 cast<VectorType>(C->getType())->getElementType()); 223 else 224 Src = dyn_cast_or_null<ConstantInt>(Src); 225 if (!Src) // Reject constantexpr elements. 226 return ConstantExpr::getBitCast(C, DestTy); 227 228 // Zero extend the element to the right size. 229 Src = ConstantExpr::getZExt(Src, Elt->getType()); 230 231 // Shift it to the right place, depending on endianness. 232 Src = ConstantExpr::getShl(Src, 233 ConstantInt::get(Src->getType(), ShiftAmt)); 234 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 235 236 // Mix it in. 237 Elt = ConstantExpr::getOr(Elt, Src); 238 } 239 Result.push_back(Elt); 240 } 241 return ConstantVector::get(Result); 242 } 243 244 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 245 unsigned Ratio = NumDstElt/NumSrcElt; 246 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 247 248 // Loop over each source value, expanding into multiple results. 249 for (unsigned i = 0; i != NumSrcElt; ++i) { 250 auto *Element = C->getAggregateElement(i); 251 252 if (!Element) // Reject constantexpr elements. 253 return ConstantExpr::getBitCast(C, DestTy); 254 255 if (isa<UndefValue>(Element)) { 256 // Correctly Propagate undef values. 257 Result.append(Ratio, UndefValue::get(DstEltTy)); 258 continue; 259 } 260 261 auto *Src = dyn_cast<ConstantInt>(Element); 262 if (!Src) 263 return ConstantExpr::getBitCast(C, DestTy); 264 265 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 266 for (unsigned j = 0; j != Ratio; ++j) { 267 // Shift the piece of the value into the right place, depending on 268 // endianness. 269 Constant *Elt = ConstantExpr::getLShr(Src, 270 ConstantInt::get(Src->getType(), ShiftAmt)); 271 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 272 273 // Truncate the element to an integer with the same pointer size and 274 // convert the element back to a pointer using a inttoptr. 275 if (DstEltTy->isPointerTy()) { 276 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 277 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 278 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 279 continue; 280 } 281 282 // Truncate and remember this piece. 283 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 284 } 285 } 286 287 return ConstantVector::get(Result); 288 } 289 290 } // end anonymous namespace 291 292 /// If this constant is a constant offset from a global, return the global and 293 /// the constant. Because of constantexprs, this function is recursive. 294 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 295 APInt &Offset, const DataLayout &DL) { 296 // Trivial case, constant is the global. 297 if ((GV = dyn_cast<GlobalValue>(C))) { 298 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 299 Offset = APInt(BitWidth, 0); 300 return true; 301 } 302 303 // Otherwise, if this isn't a constant expr, bail out. 304 auto *CE = dyn_cast<ConstantExpr>(C); 305 if (!CE) return false; 306 307 // Look through ptr->int and ptr->ptr casts. 308 if (CE->getOpcode() == Instruction::PtrToInt || 309 CE->getOpcode() == Instruction::BitCast) 310 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL); 311 312 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 313 auto *GEP = dyn_cast<GEPOperator>(CE); 314 if (!GEP) 315 return false; 316 317 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 318 APInt TmpOffset(BitWidth, 0); 319 320 // If the base isn't a global+constant, we aren't either. 321 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL)) 322 return false; 323 324 // Otherwise, add any offset that our operands provide. 325 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 326 return false; 327 328 Offset = TmpOffset; 329 return true; 330 } 331 332 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, 333 const DataLayout &DL) { 334 do { 335 Type *SrcTy = C->getType(); 336 uint64_t DestSize = DL.getTypeSizeInBits(DestTy); 337 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); 338 if (SrcSize < DestSize) 339 return nullptr; 340 341 // Catch the obvious splat cases (since all-zeros can coerce non-integral 342 // pointers legally). 343 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 344 return Constant::getNullValue(DestTy); 345 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 346 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 347 return Constant::getAllOnesValue(DestTy); 348 349 // If the type sizes are the same and a cast is legal, just directly 350 // cast the constant. 351 // But be careful not to coerce non-integral pointers illegally. 352 if (SrcSize == DestSize && 353 DL.isNonIntegralPointerType(SrcTy->getScalarType()) == 354 DL.isNonIntegralPointerType(DestTy->getScalarType())) { 355 Instruction::CastOps Cast = Instruction::BitCast; 356 // If we are going from a pointer to int or vice versa, we spell the cast 357 // differently. 358 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 359 Cast = Instruction::IntToPtr; 360 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 361 Cast = Instruction::PtrToInt; 362 363 if (CastInst::castIsValid(Cast, C, DestTy)) 364 return ConstantExpr::getCast(Cast, C, DestTy); 365 } 366 367 // If this isn't an aggregate type, there is nothing we can do to drill down 368 // and find a bitcastable constant. 369 if (!SrcTy->isAggregateType()) 370 return nullptr; 371 372 // We're simulating a load through a pointer that was bitcast to point to 373 // a different type, so we can try to walk down through the initial 374 // elements of an aggregate to see if some part of the aggregate is 375 // castable to implement the "load" semantic model. 376 if (SrcTy->isStructTy()) { 377 // Struct types might have leading zero-length elements like [0 x i32], 378 // which are certainly not what we are looking for, so skip them. 379 unsigned Elem = 0; 380 Constant *ElemC; 381 do { 382 ElemC = C->getAggregateElement(Elem++); 383 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero()); 384 C = ElemC; 385 } else { 386 C = C->getAggregateElement(0u); 387 } 388 } while (C); 389 390 return nullptr; 391 } 392 393 namespace { 394 395 /// Recursive helper to read bits out of global. C is the constant being copied 396 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 397 /// results into and BytesLeft is the number of bytes left in 398 /// the CurPtr buffer. DL is the DataLayout. 399 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, 400 unsigned BytesLeft, const DataLayout &DL) { 401 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 402 "Out of range access"); 403 404 // If this element is zero or undefined, we can just return since *CurPtr is 405 // zero initialized. 406 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 407 return true; 408 409 if (auto *CI = dyn_cast<ConstantInt>(C)) { 410 if (CI->getBitWidth() > 64 || 411 (CI->getBitWidth() & 7) != 0) 412 return false; 413 414 uint64_t Val = CI->getZExtValue(); 415 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 416 417 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 418 int n = ByteOffset; 419 if (!DL.isLittleEndian()) 420 n = IntBytes - n - 1; 421 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 422 ++ByteOffset; 423 } 424 return true; 425 } 426 427 if (auto *CFP = dyn_cast<ConstantFP>(C)) { 428 if (CFP->getType()->isDoubleTy()) { 429 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 430 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 431 } 432 if (CFP->getType()->isFloatTy()){ 433 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 434 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 435 } 436 if (CFP->getType()->isHalfTy()){ 437 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 438 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 439 } 440 return false; 441 } 442 443 if (auto *CS = dyn_cast<ConstantStruct>(C)) { 444 const StructLayout *SL = DL.getStructLayout(CS->getType()); 445 unsigned Index = SL->getElementContainingOffset(ByteOffset); 446 uint64_t CurEltOffset = SL->getElementOffset(Index); 447 ByteOffset -= CurEltOffset; 448 449 while (true) { 450 // If the element access is to the element itself and not to tail padding, 451 // read the bytes from the element. 452 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 453 454 if (ByteOffset < EltSize && 455 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 456 BytesLeft, DL)) 457 return false; 458 459 ++Index; 460 461 // Check to see if we read from the last struct element, if so we're done. 462 if (Index == CS->getType()->getNumElements()) 463 return true; 464 465 // If we read all of the bytes we needed from this element we're done. 466 uint64_t NextEltOffset = SL->getElementOffset(Index); 467 468 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 469 return true; 470 471 // Move to the next element of the struct. 472 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 473 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 474 ByteOffset = 0; 475 CurEltOffset = NextEltOffset; 476 } 477 // not reached. 478 } 479 480 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 481 isa<ConstantDataSequential>(C)) { 482 uint64_t NumElts; 483 Type *EltTy; 484 if (auto *AT = dyn_cast<ArrayType>(C->getType())) { 485 NumElts = AT->getNumElements(); 486 EltTy = AT->getElementType(); 487 } else { 488 NumElts = cast<VectorType>(C->getType())->getNumElements(); 489 EltTy = cast<VectorType>(C->getType())->getElementType(); 490 } 491 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 492 uint64_t Index = ByteOffset / EltSize; 493 uint64_t Offset = ByteOffset - Index * EltSize; 494 495 for (; Index != NumElts; ++Index) { 496 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 497 BytesLeft, DL)) 498 return false; 499 500 uint64_t BytesWritten = EltSize - Offset; 501 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 502 if (BytesWritten >= BytesLeft) 503 return true; 504 505 Offset = 0; 506 BytesLeft -= BytesWritten; 507 CurPtr += BytesWritten; 508 } 509 return true; 510 } 511 512 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 513 if (CE->getOpcode() == Instruction::IntToPtr && 514 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 515 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 516 BytesLeft, DL); 517 } 518 } 519 520 // Otherwise, unknown initializer type. 521 return false; 522 } 523 524 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, 525 const DataLayout &DL) { 526 // Bail out early. Not expect to load from scalable global variable. 527 if (isa<ScalableVectorType>(LoadTy)) 528 return nullptr; 529 530 auto *PTy = cast<PointerType>(C->getType()); 531 auto *IntType = dyn_cast<IntegerType>(LoadTy); 532 533 // If this isn't an integer load we can't fold it directly. 534 if (!IntType) { 535 unsigned AS = PTy->getAddressSpace(); 536 537 // If this is a float/double load, we can try folding it as an int32/64 load 538 // and then bitcast the result. This can be useful for union cases. Note 539 // that address spaces don't matter here since we're not going to result in 540 // an actual new load. 541 Type *MapTy; 542 if (LoadTy->isHalfTy()) 543 MapTy = Type::getInt16Ty(C->getContext()); 544 else if (LoadTy->isFloatTy()) 545 MapTy = Type::getInt32Ty(C->getContext()); 546 else if (LoadTy->isDoubleTy()) 547 MapTy = Type::getInt64Ty(C->getContext()); 548 else if (LoadTy->isVectorTy()) { 549 MapTy = PointerType::getIntNTy( 550 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); 551 } else 552 return nullptr; 553 554 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL); 555 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) { 556 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 557 // Materializing a zero can be done trivially without a bitcast 558 return Constant::getNullValue(LoadTy); 559 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; 560 Res = FoldBitCast(Res, CastTy, DL); 561 if (LoadTy->isPtrOrPtrVectorTy()) { 562 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr 563 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 564 return Constant::getNullValue(LoadTy); 565 if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) 566 // Be careful not to replace a load of an addrspace value with an inttoptr here 567 return nullptr; 568 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); 569 } 570 return Res; 571 } 572 return nullptr; 573 } 574 575 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 576 if (BytesLoaded > 32 || BytesLoaded == 0) 577 return nullptr; 578 579 GlobalValue *GVal; 580 APInt OffsetAI; 581 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL)) 582 return nullptr; 583 584 auto *GV = dyn_cast<GlobalVariable>(GVal); 585 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 586 !GV->getInitializer()->getType()->isSized()) 587 return nullptr; 588 589 int64_t Offset = OffsetAI.getSExtValue(); 590 int64_t InitializerSize = 591 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize(); 592 593 // If we're not accessing anything in this constant, the result is undefined. 594 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) 595 return UndefValue::get(IntType); 596 597 // If we're not accessing anything in this constant, the result is undefined. 598 if (Offset >= InitializerSize) 599 return UndefValue::get(IntType); 600 601 unsigned char RawBytes[32] = {0}; 602 unsigned char *CurPtr = RawBytes; 603 unsigned BytesLeft = BytesLoaded; 604 605 // If we're loading off the beginning of the global, some bytes may be valid. 606 if (Offset < 0) { 607 CurPtr += -Offset; 608 BytesLeft += Offset; 609 Offset = 0; 610 } 611 612 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL)) 613 return nullptr; 614 615 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 616 if (DL.isLittleEndian()) { 617 ResultVal = RawBytes[BytesLoaded - 1]; 618 for (unsigned i = 1; i != BytesLoaded; ++i) { 619 ResultVal <<= 8; 620 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 621 } 622 } else { 623 ResultVal = RawBytes[0]; 624 for (unsigned i = 1; i != BytesLoaded; ++i) { 625 ResultVal <<= 8; 626 ResultVal |= RawBytes[i]; 627 } 628 } 629 630 return ConstantInt::get(IntType->getContext(), ResultVal); 631 } 632 633 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy, 634 const DataLayout &DL) { 635 auto *SrcPtr = CE->getOperand(0); 636 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType()); 637 if (!SrcPtrTy) 638 return nullptr; 639 Type *SrcTy = SrcPtrTy->getPointerElementType(); 640 641 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL); 642 if (!C) 643 return nullptr; 644 645 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL); 646 } 647 648 } // end anonymous namespace 649 650 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 651 const DataLayout &DL) { 652 // First, try the easy cases: 653 if (auto *GV = dyn_cast<GlobalVariable>(C)) 654 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 655 return GV->getInitializer(); 656 657 if (auto *GA = dyn_cast<GlobalAlias>(C)) 658 if (GA->getAliasee() && !GA->isInterposable()) 659 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL); 660 661 // If the loaded value isn't a constant expr, we can't handle it. 662 auto *CE = dyn_cast<ConstantExpr>(C); 663 if (!CE) 664 return nullptr; 665 666 if (CE->getOpcode() == Instruction::GetElementPtr) { 667 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 668 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 669 if (Constant *V = 670 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 671 return V; 672 } 673 } 674 } 675 676 if (CE->getOpcode() == Instruction::BitCast) 677 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL)) 678 return LoadedC; 679 680 // Instead of loading constant c string, use corresponding integer value 681 // directly if string length is small enough. 682 StringRef Str; 683 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 684 size_t StrLen = Str.size(); 685 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 686 // Replace load with immediate integer if the result is an integer or fp 687 // value. 688 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 689 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 690 APInt StrVal(NumBits, 0); 691 APInt SingleChar(NumBits, 0); 692 if (DL.isLittleEndian()) { 693 for (unsigned char C : reverse(Str.bytes())) { 694 SingleChar = static_cast<uint64_t>(C); 695 StrVal = (StrVal << 8) | SingleChar; 696 } 697 } else { 698 for (unsigned char C : Str.bytes()) { 699 SingleChar = static_cast<uint64_t>(C); 700 StrVal = (StrVal << 8) | SingleChar; 701 } 702 // Append NULL at the end. 703 SingleChar = 0; 704 StrVal = (StrVal << 8) | SingleChar; 705 } 706 707 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 708 if (Ty->isFloatingPointTy()) 709 Res = ConstantExpr::getBitCast(Res, Ty); 710 return Res; 711 } 712 } 713 714 // If this load comes from anywhere in a constant global, and if the global 715 // is all undef or zero, we know what it loads. 716 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) { 717 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 718 if (GV->getInitializer()->isNullValue()) 719 return Constant::getNullValue(Ty); 720 if (isa<UndefValue>(GV->getInitializer())) 721 return UndefValue::get(Ty); 722 } 723 } 724 725 // Try hard to fold loads from bitcasted strange and non-type-safe things. 726 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL); 727 } 728 729 namespace { 730 731 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) { 732 if (LI->isVolatile()) return nullptr; 733 734 if (auto *C = dyn_cast<Constant>(LI->getOperand(0))) 735 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL); 736 737 return nullptr; 738 } 739 740 /// One of Op0/Op1 is a constant expression. 741 /// Attempt to symbolically evaluate the result of a binary operator merging 742 /// these together. If target data info is available, it is provided as DL, 743 /// otherwise DL is null. 744 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, 745 const DataLayout &DL) { 746 // SROA 747 748 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 749 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 750 // bits. 751 752 if (Opc == Instruction::And) { 753 KnownBits Known0 = computeKnownBits(Op0, DL); 754 KnownBits Known1 = computeKnownBits(Op1, DL); 755 if ((Known1.One | Known0.Zero).isAllOnesValue()) { 756 // All the bits of Op0 that the 'and' could be masking are already zero. 757 return Op0; 758 } 759 if ((Known0.One | Known1.Zero).isAllOnesValue()) { 760 // All the bits of Op1 that the 'and' could be masking are already zero. 761 return Op1; 762 } 763 764 Known0 &= Known1; 765 if (Known0.isConstant()) 766 return ConstantInt::get(Op0->getType(), Known0.getConstant()); 767 } 768 769 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 770 // constant. This happens frequently when iterating over a global array. 771 if (Opc == Instruction::Sub) { 772 GlobalValue *GV1, *GV2; 773 APInt Offs1, Offs2; 774 775 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 776 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 777 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 778 779 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 780 // PtrToInt may change the bitwidth so we have convert to the right size 781 // first. 782 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 783 Offs2.zextOrTrunc(OpSize)); 784 } 785 } 786 787 return nullptr; 788 } 789 790 /// If array indices are not pointer-sized integers, explicitly cast them so 791 /// that they aren't implicitly casted by the getelementptr. 792 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, 793 Type *ResultTy, Optional<unsigned> InRangeIndex, 794 const DataLayout &DL, const TargetLibraryInfo *TLI) { 795 Type *IntIdxTy = DL.getIndexType(ResultTy); 796 Type *IntIdxScalarTy = IntIdxTy->getScalarType(); 797 798 bool Any = false; 799 SmallVector<Constant*, 32> NewIdxs; 800 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 801 if ((i == 1 || 802 !isa<StructType>(GetElementPtrInst::getIndexedType( 803 SrcElemTy, Ops.slice(1, i - 1)))) && 804 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { 805 Any = true; 806 Type *NewType = Ops[i]->getType()->isVectorTy() 807 ? IntIdxTy 808 : IntIdxScalarTy; 809 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 810 true, 811 NewType, 812 true), 813 Ops[i], NewType)); 814 } else 815 NewIdxs.push_back(Ops[i]); 816 } 817 818 if (!Any) 819 return nullptr; 820 821 Constant *C = ConstantExpr::getGetElementPtr( 822 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex); 823 return ConstantFoldConstant(C, DL, TLI); 824 } 825 826 /// Strip the pointer casts, but preserve the address space information. 827 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) { 828 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 829 auto *OldPtrTy = cast<PointerType>(Ptr->getType()); 830 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 831 auto *NewPtrTy = cast<PointerType>(Ptr->getType()); 832 833 ElemTy = NewPtrTy->getPointerElementType(); 834 835 // Preserve the address space number of the pointer. 836 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 837 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace()); 838 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 839 } 840 return Ptr; 841 } 842 843 /// If we can symbolically evaluate the GEP constant expression, do so. 844 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 845 ArrayRef<Constant *> Ops, 846 const DataLayout &DL, 847 const TargetLibraryInfo *TLI) { 848 const GEPOperator *InnermostGEP = GEP; 849 bool InBounds = GEP->isInBounds(); 850 851 Type *SrcElemTy = GEP->getSourceElementType(); 852 Type *ResElemTy = GEP->getResultElementType(); 853 Type *ResTy = GEP->getType(); 854 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy)) 855 return nullptr; 856 857 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, 858 GEP->getInRangeIndex(), DL, TLI)) 859 return C; 860 861 Constant *Ptr = Ops[0]; 862 if (!Ptr->getType()->isPointerTy()) 863 return nullptr; 864 865 Type *IntIdxTy = DL.getIndexType(Ptr->getType()); 866 867 // If this is a constant expr gep that is effectively computing an 868 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 869 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 870 if (!isa<ConstantInt>(Ops[i])) { 871 872 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 873 // "inttoptr (sub (ptrtoint Ptr), V)" 874 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { 875 auto *CE = dyn_cast<ConstantExpr>(Ops[1]); 876 assert((!CE || CE->getType() == IntIdxTy) && 877 "CastGEPIndices didn't canonicalize index types!"); 878 if (CE && CE->getOpcode() == Instruction::Sub && 879 CE->getOperand(0)->isNullValue()) { 880 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 881 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 882 Res = ConstantExpr::getIntToPtr(Res, ResTy); 883 return ConstantFoldConstant(Res, DL, TLI); 884 } 885 } 886 return nullptr; 887 } 888 889 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); 890 APInt Offset = 891 APInt(BitWidth, 892 DL.getIndexedOffsetInType( 893 SrcElemTy, 894 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 895 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 896 897 // If this is a GEP of a GEP, fold it all into a single GEP. 898 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 899 InnermostGEP = GEP; 900 InBounds &= GEP->isInBounds(); 901 902 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 903 904 // Do not try the incorporate the sub-GEP if some index is not a number. 905 bool AllConstantInt = true; 906 for (Value *NestedOp : NestedOps) 907 if (!isa<ConstantInt>(NestedOp)) { 908 AllConstantInt = false; 909 break; 910 } 911 if (!AllConstantInt) 912 break; 913 914 Ptr = cast<Constant>(GEP->getOperand(0)); 915 SrcElemTy = GEP->getSourceElementType(); 916 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); 917 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 918 } 919 920 // If the base value for this address is a literal integer value, fold the 921 // getelementptr to the resulting integer value casted to the pointer type. 922 APInt BasePtr(BitWidth, 0); 923 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { 924 if (CE->getOpcode() == Instruction::IntToPtr) { 925 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 926 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 927 } 928 } 929 930 auto *PTy = cast<PointerType>(Ptr->getType()); 931 if ((Ptr->isNullValue() || BasePtr != 0) && 932 !DL.isNonIntegralPointerType(PTy)) { 933 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 934 return ConstantExpr::getIntToPtr(C, ResTy); 935 } 936 937 // Otherwise form a regular getelementptr. Recompute the indices so that 938 // we eliminate over-indexing of the notional static type array bounds. 939 // This makes it easy to determine if the getelementptr is "inbounds". 940 // Also, this helps GlobalOpt do SROA on GlobalVariables. 941 Type *Ty = PTy; 942 SmallVector<Constant *, 32> NewIdxs; 943 944 do { 945 if (!Ty->isStructTy()) { 946 if (Ty->isPointerTy()) { 947 // The only pointer indexing we'll do is on the first index of the GEP. 948 if (!NewIdxs.empty()) 949 break; 950 951 Ty = SrcElemTy; 952 953 // Only handle pointers to sized types, not pointers to functions. 954 if (!Ty->isSized()) 955 return nullptr; 956 } else { 957 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 958 if (!NextTy) 959 break; 960 Ty = NextTy; 961 } 962 963 // Determine which element of the array the offset points into. 964 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty)); 965 if (ElemSize == 0) { 966 // The element size is 0. This may be [0 x Ty]*, so just use a zero 967 // index for this level and proceed to the next level to see if it can 968 // accommodate the offset. 969 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); 970 } else { 971 // The element size is non-zero divide the offset by the element 972 // size (rounding down), to compute the index at this level. 973 bool Overflow; 974 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow); 975 if (Overflow) 976 break; 977 Offset -= NewIdx * ElemSize; 978 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); 979 } 980 } else { 981 auto *STy = cast<StructType>(Ty); 982 // If we end up with an offset that isn't valid for this struct type, we 983 // can't re-form this GEP in a regular form, so bail out. The pointer 984 // operand likely went through casts that are necessary to make the GEP 985 // sensible. 986 const StructLayout &SL = *DL.getStructLayout(STy); 987 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes())) 988 break; 989 990 // Determine which field of the struct the offset points into. The 991 // getZExtValue is fine as we've already ensured that the offset is 992 // within the range representable by the StructLayout API. 993 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 994 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 995 ElIdx)); 996 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 997 Ty = STy->getTypeAtIndex(ElIdx); 998 } 999 } while (Ty != ResElemTy); 1000 1001 // If we haven't used up the entire offset by descending the static 1002 // type, then the offset is pointing into the middle of an indivisible 1003 // member, so we can't simplify it. 1004 if (Offset != 0) 1005 return nullptr; 1006 1007 // Preserve the inrange index from the innermost GEP if possible. We must 1008 // have calculated the same indices up to and including the inrange index. 1009 Optional<unsigned> InRangeIndex; 1010 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) 1011 if (SrcElemTy == InnermostGEP->getSourceElementType() && 1012 NewIdxs.size() > *LastIRIndex) { 1013 InRangeIndex = LastIRIndex; 1014 for (unsigned I = 0; I <= *LastIRIndex; ++I) 1015 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) 1016 return nullptr; 1017 } 1018 1019 // Create a GEP. 1020 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, 1021 InBounds, InRangeIndex); 1022 assert(C->getType()->getPointerElementType() == Ty && 1023 "Computed GetElementPtr has unexpected type!"); 1024 1025 // If we ended up indexing a member with a type that doesn't match 1026 // the type of what the original indices indexed, add a cast. 1027 if (Ty != ResElemTy) 1028 C = FoldBitCast(C, ResTy, DL); 1029 1030 return C; 1031 } 1032 1033 /// Attempt to constant fold an instruction with the 1034 /// specified opcode and operands. If successful, the constant result is 1035 /// returned, if not, null is returned. Note that this function can fail when 1036 /// attempting to fold instructions like loads and stores, which have no 1037 /// constant expression form. 1038 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, 1039 ArrayRef<Constant *> Ops, 1040 const DataLayout &DL, 1041 const TargetLibraryInfo *TLI) { 1042 Type *DestTy = InstOrCE->getType(); 1043 1044 if (Instruction::isUnaryOp(Opcode)) 1045 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); 1046 1047 if (Instruction::isBinaryOp(Opcode)) 1048 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); 1049 1050 if (Instruction::isCast(Opcode)) 1051 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); 1052 1053 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { 1054 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI)) 1055 return C; 1056 1057 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], 1058 Ops.slice(1), GEP->isInBounds(), 1059 GEP->getInRangeIndex()); 1060 } 1061 1062 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) 1063 return CE->getWithOperands(Ops); 1064 1065 switch (Opcode) { 1066 default: return nullptr; 1067 case Instruction::ICmp: 1068 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1069 case Instruction::Call: 1070 if (auto *F = dyn_cast<Function>(Ops.back())) { 1071 const auto *Call = cast<CallBase>(InstOrCE); 1072 if (canConstantFoldCallTo(Call, F)) 1073 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); 1074 } 1075 return nullptr; 1076 case Instruction::Select: 1077 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1078 case Instruction::ExtractElement: 1079 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1080 case Instruction::ExtractValue: 1081 return ConstantExpr::getExtractValue( 1082 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); 1083 case Instruction::InsertElement: 1084 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1085 case Instruction::ShuffleVector: 1086 return ConstantExpr::getShuffleVector( 1087 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask()); 1088 } 1089 } 1090 1091 } // end anonymous namespace 1092 1093 //===----------------------------------------------------------------------===// 1094 // Constant Folding public APIs 1095 //===----------------------------------------------------------------------===// 1096 1097 namespace { 1098 1099 Constant * 1100 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, 1101 const TargetLibraryInfo *TLI, 1102 SmallDenseMap<Constant *, Constant *> &FoldedOps) { 1103 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) 1104 return const_cast<Constant *>(C); 1105 1106 SmallVector<Constant *, 8> Ops; 1107 for (const Use &OldU : C->operands()) { 1108 Constant *OldC = cast<Constant>(&OldU); 1109 Constant *NewC = OldC; 1110 // Recursively fold the ConstantExpr's operands. If we have already folded 1111 // a ConstantExpr, we don't have to process it again. 1112 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) { 1113 auto It = FoldedOps.find(OldC); 1114 if (It == FoldedOps.end()) { 1115 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps); 1116 FoldedOps.insert({OldC, NewC}); 1117 } else { 1118 NewC = It->second; 1119 } 1120 } 1121 Ops.push_back(NewC); 1122 } 1123 1124 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1125 if (CE->isCompare()) 1126 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 1127 DL, TLI); 1128 1129 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI); 1130 } 1131 1132 assert(isa<ConstantVector>(C)); 1133 return ConstantVector::get(Ops); 1134 } 1135 1136 } // end anonymous namespace 1137 1138 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 1139 const TargetLibraryInfo *TLI) { 1140 // Handle PHI nodes quickly here... 1141 if (auto *PN = dyn_cast<PHINode>(I)) { 1142 Constant *CommonValue = nullptr; 1143 1144 SmallDenseMap<Constant *, Constant *> FoldedOps; 1145 for (Value *Incoming : PN->incoming_values()) { 1146 // If the incoming value is undef then skip it. Note that while we could 1147 // skip the value if it is equal to the phi node itself we choose not to 1148 // because that would break the rule that constant folding only applies if 1149 // all operands are constants. 1150 if (isa<UndefValue>(Incoming)) 1151 continue; 1152 // If the incoming value is not a constant, then give up. 1153 auto *C = dyn_cast<Constant>(Incoming); 1154 if (!C) 1155 return nullptr; 1156 // Fold the PHI's operands. 1157 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1158 // If the incoming value is a different constant to 1159 // the one we saw previously, then give up. 1160 if (CommonValue && C != CommonValue) 1161 return nullptr; 1162 CommonValue = C; 1163 } 1164 1165 // If we reach here, all incoming values are the same constant or undef. 1166 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 1167 } 1168 1169 // Scan the operand list, checking to see if they are all constants, if so, 1170 // hand off to ConstantFoldInstOperandsImpl. 1171 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) 1172 return nullptr; 1173 1174 SmallDenseMap<Constant *, Constant *> FoldedOps; 1175 SmallVector<Constant *, 8> Ops; 1176 for (const Use &OpU : I->operands()) { 1177 auto *Op = cast<Constant>(&OpU); 1178 // Fold the Instruction's operands. 1179 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps); 1180 Ops.push_back(Op); 1181 } 1182 1183 if (const auto *CI = dyn_cast<CmpInst>(I)) 1184 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 1185 DL, TLI); 1186 1187 if (const auto *LI = dyn_cast<LoadInst>(I)) 1188 return ConstantFoldLoadInst(LI, DL); 1189 1190 if (auto *IVI = dyn_cast<InsertValueInst>(I)) { 1191 return ConstantExpr::getInsertValue( 1192 cast<Constant>(IVI->getAggregateOperand()), 1193 cast<Constant>(IVI->getInsertedValueOperand()), 1194 IVI->getIndices()); 1195 } 1196 1197 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) { 1198 return ConstantExpr::getExtractValue( 1199 cast<Constant>(EVI->getAggregateOperand()), 1200 EVI->getIndices()); 1201 } 1202 1203 return ConstantFoldInstOperands(I, Ops, DL, TLI); 1204 } 1205 1206 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, 1207 const TargetLibraryInfo *TLI) { 1208 SmallDenseMap<Constant *, Constant *> FoldedOps; 1209 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1210 } 1211 1212 Constant *llvm::ConstantFoldInstOperands(Instruction *I, 1213 ArrayRef<Constant *> Ops, 1214 const DataLayout &DL, 1215 const TargetLibraryInfo *TLI) { 1216 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); 1217 } 1218 1219 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1220 Constant *Ops0, Constant *Ops1, 1221 const DataLayout &DL, 1222 const TargetLibraryInfo *TLI) { 1223 // fold: icmp (inttoptr x), null -> icmp x, 0 1224 // fold: icmp null, (inttoptr x) -> icmp 0, x 1225 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1226 // fold: icmp 0, (ptrtoint x) -> icmp null, x 1227 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1228 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1229 // 1230 // FIXME: The following comment is out of data and the DataLayout is here now. 1231 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1232 // around to know if bit truncation is happening. 1233 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1234 if (Ops1->isNullValue()) { 1235 if (CE0->getOpcode() == Instruction::IntToPtr) { 1236 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1237 // Convert the integer value to the right size to ensure we get the 1238 // proper extension or truncation. 1239 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1240 IntPtrTy, false); 1241 Constant *Null = Constant::getNullValue(C->getType()); 1242 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1243 } 1244 1245 // Only do this transformation if the int is intptrty in size, otherwise 1246 // there is a truncation or extension that we aren't modeling. 1247 if (CE0->getOpcode() == Instruction::PtrToInt) { 1248 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1249 if (CE0->getType() == IntPtrTy) { 1250 Constant *C = CE0->getOperand(0); 1251 Constant *Null = Constant::getNullValue(C->getType()); 1252 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1253 } 1254 } 1255 } 1256 1257 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1258 if (CE0->getOpcode() == CE1->getOpcode()) { 1259 if (CE0->getOpcode() == Instruction::IntToPtr) { 1260 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1261 1262 // Convert the integer value to the right size to ensure we get the 1263 // proper extension or truncation. 1264 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1265 IntPtrTy, false); 1266 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1267 IntPtrTy, false); 1268 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1269 } 1270 1271 // Only do this transformation if the int is intptrty in size, otherwise 1272 // there is a truncation or extension that we aren't modeling. 1273 if (CE0->getOpcode() == Instruction::PtrToInt) { 1274 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1275 if (CE0->getType() == IntPtrTy && 1276 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1277 return ConstantFoldCompareInstOperands( 1278 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1279 } 1280 } 1281 } 1282 } 1283 1284 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1285 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1286 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1287 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1288 Constant *LHS = ConstantFoldCompareInstOperands( 1289 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1290 Constant *RHS = ConstantFoldCompareInstOperands( 1291 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1292 unsigned OpC = 1293 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1294 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); 1295 } 1296 } else if (isa<ConstantExpr>(Ops1)) { 1297 // If RHS is a constant expression, but the left side isn't, swap the 1298 // operands and try again. 1299 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate); 1300 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); 1301 } 1302 1303 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1304 } 1305 1306 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, 1307 const DataLayout &DL) { 1308 assert(Instruction::isUnaryOp(Opcode)); 1309 1310 return ConstantExpr::get(Opcode, Op); 1311 } 1312 1313 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, 1314 Constant *RHS, 1315 const DataLayout &DL) { 1316 assert(Instruction::isBinaryOp(Opcode)); 1317 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) 1318 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) 1319 return C; 1320 1321 return ConstantExpr::get(Opcode, LHS, RHS); 1322 } 1323 1324 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, 1325 Type *DestTy, const DataLayout &DL) { 1326 assert(Instruction::isCast(Opcode)); 1327 switch (Opcode) { 1328 default: 1329 llvm_unreachable("Missing case"); 1330 case Instruction::PtrToInt: 1331 // If the input is a inttoptr, eliminate the pair. This requires knowing 1332 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1333 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1334 if (CE->getOpcode() == Instruction::IntToPtr) { 1335 Constant *Input = CE->getOperand(0); 1336 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1337 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1338 if (PtrWidth < InWidth) { 1339 Constant *Mask = 1340 ConstantInt::get(CE->getContext(), 1341 APInt::getLowBitsSet(InWidth, PtrWidth)); 1342 Input = ConstantExpr::getAnd(Input, Mask); 1343 } 1344 // Do a zext or trunc to get to the dest size. 1345 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1346 } 1347 } 1348 return ConstantExpr::getCast(Opcode, C, DestTy); 1349 case Instruction::IntToPtr: 1350 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1351 // the int size is >= the ptr size and the address spaces are the same. 1352 // This requires knowing the width of a pointer, so it can't be done in 1353 // ConstantExpr::getCast. 1354 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1355 if (CE->getOpcode() == Instruction::PtrToInt) { 1356 Constant *SrcPtr = CE->getOperand(0); 1357 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1358 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1359 1360 if (MidIntSize >= SrcPtrSize) { 1361 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1362 if (SrcAS == DestTy->getPointerAddressSpace()) 1363 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1364 } 1365 } 1366 } 1367 1368 return ConstantExpr::getCast(Opcode, C, DestTy); 1369 case Instruction::Trunc: 1370 case Instruction::ZExt: 1371 case Instruction::SExt: 1372 case Instruction::FPTrunc: 1373 case Instruction::FPExt: 1374 case Instruction::UIToFP: 1375 case Instruction::SIToFP: 1376 case Instruction::FPToUI: 1377 case Instruction::FPToSI: 1378 case Instruction::AddrSpaceCast: 1379 return ConstantExpr::getCast(Opcode, C, DestTy); 1380 case Instruction::BitCast: 1381 return FoldBitCast(C, DestTy, DL); 1382 } 1383 } 1384 1385 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1386 ConstantExpr *CE) { 1387 if (!CE->getOperand(1)->isNullValue()) 1388 return nullptr; // Do not allow stepping over the value! 1389 1390 // Loop over all of the operands, tracking down which value we are 1391 // addressing. 1392 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1393 C = C->getAggregateElement(CE->getOperand(i)); 1394 if (!C) 1395 return nullptr; 1396 } 1397 return C; 1398 } 1399 1400 Constant * 1401 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1402 ArrayRef<Constant *> Indices) { 1403 // Loop over all of the operands, tracking down which value we are 1404 // addressing. 1405 for (Constant *Index : Indices) { 1406 C = C->getAggregateElement(Index); 1407 if (!C) 1408 return nullptr; 1409 } 1410 return C; 1411 } 1412 1413 //===----------------------------------------------------------------------===// 1414 // Constant Folding for Calls 1415 // 1416 1417 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { 1418 if (Call->isNoBuiltin()) 1419 return false; 1420 switch (F->getIntrinsicID()) { 1421 // Operations that do not operate floating-point numbers and do not depend on 1422 // FP environment can be folded even in strictfp functions. 1423 case Intrinsic::bswap: 1424 case Intrinsic::ctpop: 1425 case Intrinsic::ctlz: 1426 case Intrinsic::cttz: 1427 case Intrinsic::fshl: 1428 case Intrinsic::fshr: 1429 case Intrinsic::launder_invariant_group: 1430 case Intrinsic::strip_invariant_group: 1431 case Intrinsic::masked_load: 1432 case Intrinsic::sadd_with_overflow: 1433 case Intrinsic::uadd_with_overflow: 1434 case Intrinsic::ssub_with_overflow: 1435 case Intrinsic::usub_with_overflow: 1436 case Intrinsic::smul_with_overflow: 1437 case Intrinsic::umul_with_overflow: 1438 case Intrinsic::sadd_sat: 1439 case Intrinsic::uadd_sat: 1440 case Intrinsic::ssub_sat: 1441 case Intrinsic::usub_sat: 1442 case Intrinsic::smul_fix: 1443 case Intrinsic::smul_fix_sat: 1444 case Intrinsic::bitreverse: 1445 case Intrinsic::is_constant: 1446 case Intrinsic::experimental_vector_reduce_add: 1447 case Intrinsic::experimental_vector_reduce_mul: 1448 case Intrinsic::experimental_vector_reduce_and: 1449 case Intrinsic::experimental_vector_reduce_or: 1450 case Intrinsic::experimental_vector_reduce_xor: 1451 case Intrinsic::experimental_vector_reduce_smin: 1452 case Intrinsic::experimental_vector_reduce_smax: 1453 case Intrinsic::experimental_vector_reduce_umin: 1454 case Intrinsic::experimental_vector_reduce_umax: 1455 return true; 1456 1457 // Floating point operations cannot be folded in strictfp functions in 1458 // general case. They can be folded if FP environment is known to compiler. 1459 case Intrinsic::minnum: 1460 case Intrinsic::maxnum: 1461 case Intrinsic::minimum: 1462 case Intrinsic::maximum: 1463 case Intrinsic::log: 1464 case Intrinsic::log2: 1465 case Intrinsic::log10: 1466 case Intrinsic::exp: 1467 case Intrinsic::exp2: 1468 case Intrinsic::sqrt: 1469 case Intrinsic::sin: 1470 case Intrinsic::cos: 1471 case Intrinsic::pow: 1472 case Intrinsic::powi: 1473 case Intrinsic::fma: 1474 case Intrinsic::fmuladd: 1475 case Intrinsic::convert_from_fp16: 1476 case Intrinsic::convert_to_fp16: 1477 case Intrinsic::amdgcn_cos: 1478 case Intrinsic::amdgcn_cubeid: 1479 case Intrinsic::amdgcn_cubema: 1480 case Intrinsic::amdgcn_cubesc: 1481 case Intrinsic::amdgcn_cubetc: 1482 case Intrinsic::amdgcn_fmul_legacy: 1483 case Intrinsic::amdgcn_fract: 1484 case Intrinsic::amdgcn_ldexp: 1485 case Intrinsic::amdgcn_sin: 1486 // The intrinsics below depend on rounding mode in MXCSR. 1487 case Intrinsic::x86_sse_cvtss2si: 1488 case Intrinsic::x86_sse_cvtss2si64: 1489 case Intrinsic::x86_sse_cvttss2si: 1490 case Intrinsic::x86_sse_cvttss2si64: 1491 case Intrinsic::x86_sse2_cvtsd2si: 1492 case Intrinsic::x86_sse2_cvtsd2si64: 1493 case Intrinsic::x86_sse2_cvttsd2si: 1494 case Intrinsic::x86_sse2_cvttsd2si64: 1495 case Intrinsic::x86_avx512_vcvtss2si32: 1496 case Intrinsic::x86_avx512_vcvtss2si64: 1497 case Intrinsic::x86_avx512_cvttss2si: 1498 case Intrinsic::x86_avx512_cvttss2si64: 1499 case Intrinsic::x86_avx512_vcvtsd2si32: 1500 case Intrinsic::x86_avx512_vcvtsd2si64: 1501 case Intrinsic::x86_avx512_cvttsd2si: 1502 case Intrinsic::x86_avx512_cvttsd2si64: 1503 case Intrinsic::x86_avx512_vcvtss2usi32: 1504 case Intrinsic::x86_avx512_vcvtss2usi64: 1505 case Intrinsic::x86_avx512_cvttss2usi: 1506 case Intrinsic::x86_avx512_cvttss2usi64: 1507 case Intrinsic::x86_avx512_vcvtsd2usi32: 1508 case Intrinsic::x86_avx512_vcvtsd2usi64: 1509 case Intrinsic::x86_avx512_cvttsd2usi: 1510 case Intrinsic::x86_avx512_cvttsd2usi64: 1511 return !Call->isStrictFP(); 1512 1513 // Sign operations are actually bitwise operations, they do not raise 1514 // exceptions even for SNANs. 1515 case Intrinsic::fabs: 1516 case Intrinsic::copysign: 1517 // Non-constrained variants of rounding operations means default FP 1518 // environment, they can be folded in any case. 1519 case Intrinsic::ceil: 1520 case Intrinsic::floor: 1521 case Intrinsic::round: 1522 case Intrinsic::roundeven: 1523 case Intrinsic::trunc: 1524 case Intrinsic::nearbyint: 1525 case Intrinsic::rint: 1526 // Constrained intrinsics can be folded if FP environment is known 1527 // to compiler. 1528 case Intrinsic::experimental_constrained_ceil: 1529 case Intrinsic::experimental_constrained_floor: 1530 case Intrinsic::experimental_constrained_round: 1531 case Intrinsic::experimental_constrained_roundeven: 1532 case Intrinsic::experimental_constrained_trunc: 1533 case Intrinsic::experimental_constrained_nearbyint: 1534 case Intrinsic::experimental_constrained_rint: 1535 return true; 1536 default: 1537 return false; 1538 case Intrinsic::not_intrinsic: break; 1539 } 1540 1541 if (!F->hasName() || Call->isStrictFP()) 1542 return false; 1543 1544 // In these cases, the check of the length is required. We don't want to 1545 // return true for a name like "cos\0blah" which strcmp would return equal to 1546 // "cos", but has length 8. 1547 StringRef Name = F->getName(); 1548 switch (Name[0]) { 1549 default: 1550 return false; 1551 case 'a': 1552 return Name == "acos" || Name == "acosf" || 1553 Name == "asin" || Name == "asinf" || 1554 Name == "atan" || Name == "atanf" || 1555 Name == "atan2" || Name == "atan2f"; 1556 case 'c': 1557 return Name == "ceil" || Name == "ceilf" || 1558 Name == "cos" || Name == "cosf" || 1559 Name == "cosh" || Name == "coshf"; 1560 case 'e': 1561 return Name == "exp" || Name == "expf" || 1562 Name == "exp2" || Name == "exp2f"; 1563 case 'f': 1564 return Name == "fabs" || Name == "fabsf" || 1565 Name == "floor" || Name == "floorf" || 1566 Name == "fmod" || Name == "fmodf"; 1567 case 'l': 1568 return Name == "log" || Name == "logf" || 1569 Name == "log2" || Name == "log2f" || 1570 Name == "log10" || Name == "log10f"; 1571 case 'n': 1572 return Name == "nearbyint" || Name == "nearbyintf"; 1573 case 'p': 1574 return Name == "pow" || Name == "powf"; 1575 case 'r': 1576 return Name == "remainder" || Name == "remainderf" || 1577 Name == "rint" || Name == "rintf" || 1578 Name == "round" || Name == "roundf"; 1579 case 's': 1580 return Name == "sin" || Name == "sinf" || 1581 Name == "sinh" || Name == "sinhf" || 1582 Name == "sqrt" || Name == "sqrtf"; 1583 case 't': 1584 return Name == "tan" || Name == "tanf" || 1585 Name == "tanh" || Name == "tanhf" || 1586 Name == "trunc" || Name == "truncf"; 1587 case '_': 1588 // Check for various function names that get used for the math functions 1589 // when the header files are preprocessed with the macro 1590 // __FINITE_MATH_ONLY__ enabled. 1591 // The '12' here is the length of the shortest name that can match. 1592 // We need to check the size before looking at Name[1] and Name[2] 1593 // so we may as well check a limit that will eliminate mismatches. 1594 if (Name.size() < 12 || Name[1] != '_') 1595 return false; 1596 switch (Name[2]) { 1597 default: 1598 return false; 1599 case 'a': 1600 return Name == "__acos_finite" || Name == "__acosf_finite" || 1601 Name == "__asin_finite" || Name == "__asinf_finite" || 1602 Name == "__atan2_finite" || Name == "__atan2f_finite"; 1603 case 'c': 1604 return Name == "__cosh_finite" || Name == "__coshf_finite"; 1605 case 'e': 1606 return Name == "__exp_finite" || Name == "__expf_finite" || 1607 Name == "__exp2_finite" || Name == "__exp2f_finite"; 1608 case 'l': 1609 return Name == "__log_finite" || Name == "__logf_finite" || 1610 Name == "__log10_finite" || Name == "__log10f_finite"; 1611 case 'p': 1612 return Name == "__pow_finite" || Name == "__powf_finite"; 1613 case 's': 1614 return Name == "__sinh_finite" || Name == "__sinhf_finite"; 1615 } 1616 } 1617 } 1618 1619 namespace { 1620 1621 Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1622 if (Ty->isHalfTy() || Ty->isFloatTy()) { 1623 APFloat APF(V); 1624 bool unused; 1625 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); 1626 return ConstantFP::get(Ty->getContext(), APF); 1627 } 1628 if (Ty->isDoubleTy()) 1629 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1630 llvm_unreachable("Can only constant fold half/float/double"); 1631 } 1632 1633 /// Clear the floating-point exception state. 1634 inline void llvm_fenv_clearexcept() { 1635 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1636 feclearexcept(FE_ALL_EXCEPT); 1637 #endif 1638 errno = 0; 1639 } 1640 1641 /// Test if a floating-point exception was raised. 1642 inline bool llvm_fenv_testexcept() { 1643 int errno_val = errno; 1644 if (errno_val == ERANGE || errno_val == EDOM) 1645 return true; 1646 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1647 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1648 return true; 1649 #endif 1650 return false; 1651 } 1652 1653 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) { 1654 llvm_fenv_clearexcept(); 1655 V = NativeFP(V); 1656 if (llvm_fenv_testexcept()) { 1657 llvm_fenv_clearexcept(); 1658 return nullptr; 1659 } 1660 1661 return GetConstantFoldFPValue(V, Ty); 1662 } 1663 1664 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V, 1665 double W, Type *Ty) { 1666 llvm_fenv_clearexcept(); 1667 V = NativeFP(V, W); 1668 if (llvm_fenv_testexcept()) { 1669 llvm_fenv_clearexcept(); 1670 return nullptr; 1671 } 1672 1673 return GetConstantFoldFPValue(V, Ty); 1674 } 1675 1676 Constant *ConstantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) { 1677 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType()); 1678 if (!VT) 1679 return nullptr; 1680 ConstantInt *CI = dyn_cast<ConstantInt>(Op->getAggregateElement(0U)); 1681 if (!CI) 1682 return nullptr; 1683 APInt Acc = CI->getValue(); 1684 1685 for (unsigned I = 1; I < VT->getNumElements(); I++) { 1686 if (!(CI = dyn_cast<ConstantInt>(Op->getAggregateElement(I)))) 1687 return nullptr; 1688 const APInt &X = CI->getValue(); 1689 switch (IID) { 1690 case Intrinsic::experimental_vector_reduce_add: 1691 Acc = Acc + X; 1692 break; 1693 case Intrinsic::experimental_vector_reduce_mul: 1694 Acc = Acc * X; 1695 break; 1696 case Intrinsic::experimental_vector_reduce_and: 1697 Acc = Acc & X; 1698 break; 1699 case Intrinsic::experimental_vector_reduce_or: 1700 Acc = Acc | X; 1701 break; 1702 case Intrinsic::experimental_vector_reduce_xor: 1703 Acc = Acc ^ X; 1704 break; 1705 case Intrinsic::experimental_vector_reduce_smin: 1706 Acc = APIntOps::smin(Acc, X); 1707 break; 1708 case Intrinsic::experimental_vector_reduce_smax: 1709 Acc = APIntOps::smax(Acc, X); 1710 break; 1711 case Intrinsic::experimental_vector_reduce_umin: 1712 Acc = APIntOps::umin(Acc, X); 1713 break; 1714 case Intrinsic::experimental_vector_reduce_umax: 1715 Acc = APIntOps::umax(Acc, X); 1716 break; 1717 } 1718 } 1719 1720 return ConstantInt::get(Op->getContext(), Acc); 1721 } 1722 1723 /// Attempt to fold an SSE floating point to integer conversion of a constant 1724 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1725 /// used (toward nearest, ties to even). This matches the behavior of the 1726 /// non-truncating SSE instructions in the default rounding mode. The desired 1727 /// integer type Ty is used to select how many bits are available for the 1728 /// result. Returns null if the conversion cannot be performed, otherwise 1729 /// returns the Constant value resulting from the conversion. 1730 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, 1731 Type *Ty, bool IsSigned) { 1732 // All of these conversion intrinsics form an integer of at most 64bits. 1733 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1734 assert(ResultWidth <= 64 && 1735 "Can only constant fold conversions to 64 and 32 bit ints"); 1736 1737 uint64_t UIntVal; 1738 bool isExact = false; 1739 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1740 : APFloat::rmNearestTiesToEven; 1741 APFloat::opStatus status = 1742 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, 1743 IsSigned, mode, &isExact); 1744 if (status != APFloat::opOK && 1745 (!roundTowardZero || status != APFloat::opInexact)) 1746 return nullptr; 1747 return ConstantInt::get(Ty, UIntVal, IsSigned); 1748 } 1749 1750 double getValueAsDouble(ConstantFP *Op) { 1751 Type *Ty = Op->getType(); 1752 1753 if (Ty->isFloatTy()) 1754 return Op->getValueAPF().convertToFloat(); 1755 1756 if (Ty->isDoubleTy()) 1757 return Op->getValueAPF().convertToDouble(); 1758 1759 bool unused; 1760 APFloat APF = Op->getValueAPF(); 1761 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); 1762 return APF.convertToDouble(); 1763 } 1764 1765 static bool isManifestConstant(const Constant *c) { 1766 if (isa<ConstantData>(c)) { 1767 return true; 1768 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) { 1769 for (const Value *subc : c->operand_values()) { 1770 if (!isManifestConstant(cast<Constant>(subc))) 1771 return false; 1772 } 1773 return true; 1774 } 1775 return false; 1776 } 1777 1778 static bool getConstIntOrUndef(Value *Op, const APInt *&C) { 1779 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1780 C = &CI->getValue(); 1781 return true; 1782 } 1783 if (isa<UndefValue>(Op)) { 1784 C = nullptr; 1785 return true; 1786 } 1787 return false; 1788 } 1789 1790 static Constant *ConstantFoldScalarCall1(StringRef Name, 1791 Intrinsic::ID IntrinsicID, 1792 Type *Ty, 1793 ArrayRef<Constant *> Operands, 1794 const TargetLibraryInfo *TLI, 1795 const CallBase *Call) { 1796 assert(Operands.size() == 1 && "Wrong number of operands."); 1797 1798 if (IntrinsicID == Intrinsic::is_constant) { 1799 // We know we have a "Constant" argument. But we want to only 1800 // return true for manifest constants, not those that depend on 1801 // constants with unknowable values, e.g. GlobalValue or BlockAddress. 1802 if (isManifestConstant(Operands[0])) 1803 return ConstantInt::getTrue(Ty->getContext()); 1804 return nullptr; 1805 } 1806 if (isa<UndefValue>(Operands[0])) { 1807 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN. 1808 // ctpop() is between 0 and bitwidth, pick 0 for undef. 1809 if (IntrinsicID == Intrinsic::cos || 1810 IntrinsicID == Intrinsic::ctpop) 1811 return Constant::getNullValue(Ty); 1812 if (IntrinsicID == Intrinsic::bswap || 1813 IntrinsicID == Intrinsic::bitreverse || 1814 IntrinsicID == Intrinsic::launder_invariant_group || 1815 IntrinsicID == Intrinsic::strip_invariant_group) 1816 return Operands[0]; 1817 } 1818 1819 if (isa<ConstantPointerNull>(Operands[0])) { 1820 // launder(null) == null == strip(null) iff in addrspace 0 1821 if (IntrinsicID == Intrinsic::launder_invariant_group || 1822 IntrinsicID == Intrinsic::strip_invariant_group) { 1823 // If instruction is not yet put in a basic block (e.g. when cloning 1824 // a function during inlining), Call's caller may not be available. 1825 // So check Call's BB first before querying Call->getCaller. 1826 const Function *Caller = 1827 Call->getParent() ? Call->getCaller() : nullptr; 1828 if (Caller && 1829 !NullPointerIsDefined( 1830 Caller, Operands[0]->getType()->getPointerAddressSpace())) { 1831 return Operands[0]; 1832 } 1833 return nullptr; 1834 } 1835 } 1836 1837 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { 1838 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1839 APFloat Val(Op->getValueAPF()); 1840 1841 bool lost = false; 1842 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); 1843 1844 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1845 } 1846 1847 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1848 return nullptr; 1849 1850 // Use internal versions of these intrinsics. 1851 APFloat U = Op->getValueAPF(); 1852 1853 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { 1854 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1855 return ConstantFP::get(Ty->getContext(), U); 1856 } 1857 1858 if (IntrinsicID == Intrinsic::round) { 1859 U.roundToIntegral(APFloat::rmNearestTiesToAway); 1860 return ConstantFP::get(Ty->getContext(), U); 1861 } 1862 1863 if (IntrinsicID == Intrinsic::roundeven) { 1864 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1865 return ConstantFP::get(Ty->getContext(), U); 1866 } 1867 1868 if (IntrinsicID == Intrinsic::ceil) { 1869 U.roundToIntegral(APFloat::rmTowardPositive); 1870 return ConstantFP::get(Ty->getContext(), U); 1871 } 1872 1873 if (IntrinsicID == Intrinsic::floor) { 1874 U.roundToIntegral(APFloat::rmTowardNegative); 1875 return ConstantFP::get(Ty->getContext(), U); 1876 } 1877 1878 if (IntrinsicID == Intrinsic::trunc) { 1879 U.roundToIntegral(APFloat::rmTowardZero); 1880 return ConstantFP::get(Ty->getContext(), U); 1881 } 1882 1883 if (IntrinsicID == Intrinsic::fabs) { 1884 U.clearSign(); 1885 return ConstantFP::get(Ty->getContext(), U); 1886 } 1887 1888 if (IntrinsicID == Intrinsic::amdgcn_fract) { 1889 // The v_fract instruction behaves like the OpenCL spec, which defines 1890 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is 1891 // there to prevent fract(-small) from returning 1.0. It returns the 1892 // largest positive floating-point number less than 1.0." 1893 APFloat FloorU(U); 1894 FloorU.roundToIntegral(APFloat::rmTowardNegative); 1895 APFloat FractU(U - FloorU); 1896 APFloat AlmostOne(U.getSemantics(), 1); 1897 AlmostOne.next(/*nextDown*/ true); 1898 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne)); 1899 } 1900 1901 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not 1902 // raise FP exceptions, unless the argument is signaling NaN. 1903 1904 Optional<APFloat::roundingMode> RM; 1905 switch (IntrinsicID) { 1906 default: 1907 break; 1908 case Intrinsic::experimental_constrained_nearbyint: 1909 case Intrinsic::experimental_constrained_rint: { 1910 auto CI = cast<ConstrainedFPIntrinsic>(Call); 1911 RM = CI->getRoundingMode(); 1912 if (!RM || RM.getValue() == RoundingMode::Dynamic) 1913 return nullptr; 1914 break; 1915 } 1916 case Intrinsic::experimental_constrained_round: 1917 RM = APFloat::rmNearestTiesToAway; 1918 break; 1919 case Intrinsic::experimental_constrained_ceil: 1920 RM = APFloat::rmTowardPositive; 1921 break; 1922 case Intrinsic::experimental_constrained_floor: 1923 RM = APFloat::rmTowardNegative; 1924 break; 1925 case Intrinsic::experimental_constrained_trunc: 1926 RM = APFloat::rmTowardZero; 1927 break; 1928 } 1929 if (RM) { 1930 auto CI = cast<ConstrainedFPIntrinsic>(Call); 1931 if (U.isFinite()) { 1932 APFloat::opStatus St = U.roundToIntegral(*RM); 1933 if (IntrinsicID == Intrinsic::experimental_constrained_rint && 1934 St == APFloat::opInexact) { 1935 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 1936 if (EB && *EB == fp::ebStrict) 1937 return nullptr; 1938 } 1939 } else if (U.isSignaling()) { 1940 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 1941 if (EB && *EB != fp::ebIgnore) 1942 return nullptr; 1943 U = APFloat::getQNaN(U.getSemantics()); 1944 } 1945 return ConstantFP::get(Ty->getContext(), U); 1946 } 1947 1948 /// We only fold functions with finite arguments. Folding NaN and inf is 1949 /// likely to be aborted with an exception anyway, and some host libms 1950 /// have known errors raising exceptions. 1951 if (!U.isFinite()) 1952 return nullptr; 1953 1954 /// Currently APFloat versions of these functions do not exist, so we use 1955 /// the host native double versions. Float versions are not called 1956 /// directly but for all these it is true (float)(f((double)arg)) == 1957 /// f(arg). Long double not supported yet. 1958 double V = getValueAsDouble(Op); 1959 1960 switch (IntrinsicID) { 1961 default: break; 1962 case Intrinsic::log: 1963 return ConstantFoldFP(log, V, Ty); 1964 case Intrinsic::log2: 1965 // TODO: What about hosts that lack a C99 library? 1966 return ConstantFoldFP(Log2, V, Ty); 1967 case Intrinsic::log10: 1968 // TODO: What about hosts that lack a C99 library? 1969 return ConstantFoldFP(log10, V, Ty); 1970 case Intrinsic::exp: 1971 return ConstantFoldFP(exp, V, Ty); 1972 case Intrinsic::exp2: 1973 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 1974 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1975 case Intrinsic::sin: 1976 return ConstantFoldFP(sin, V, Ty); 1977 case Intrinsic::cos: 1978 return ConstantFoldFP(cos, V, Ty); 1979 case Intrinsic::sqrt: 1980 return ConstantFoldFP(sqrt, V, Ty); 1981 case Intrinsic::amdgcn_cos: 1982 case Intrinsic::amdgcn_sin: 1983 if (V < -256.0 || V > 256.0) 1984 // The gfx8 and gfx9 architectures handle arguments outside the range 1985 // [-256, 256] differently. This should be a rare case so bail out 1986 // rather than trying to handle the difference. 1987 return nullptr; 1988 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos; 1989 double V4 = V * 4.0; 1990 if (V4 == floor(V4)) { 1991 // Force exact results for quarter-integer inputs. 1992 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 }; 1993 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3]; 1994 } else { 1995 if (IsCos) 1996 V = cos(V * 2.0 * numbers::pi); 1997 else 1998 V = sin(V * 2.0 * numbers::pi); 1999 } 2000 return GetConstantFoldFPValue(V, Ty); 2001 } 2002 2003 if (!TLI) 2004 return nullptr; 2005 2006 LibFunc Func = NotLibFunc; 2007 TLI->getLibFunc(Name, Func); 2008 switch (Func) { 2009 default: 2010 break; 2011 case LibFunc_acos: 2012 case LibFunc_acosf: 2013 case LibFunc_acos_finite: 2014 case LibFunc_acosf_finite: 2015 if (TLI->has(Func)) 2016 return ConstantFoldFP(acos, V, Ty); 2017 break; 2018 case LibFunc_asin: 2019 case LibFunc_asinf: 2020 case LibFunc_asin_finite: 2021 case LibFunc_asinf_finite: 2022 if (TLI->has(Func)) 2023 return ConstantFoldFP(asin, V, Ty); 2024 break; 2025 case LibFunc_atan: 2026 case LibFunc_atanf: 2027 if (TLI->has(Func)) 2028 return ConstantFoldFP(atan, V, Ty); 2029 break; 2030 case LibFunc_ceil: 2031 case LibFunc_ceilf: 2032 if (TLI->has(Func)) { 2033 U.roundToIntegral(APFloat::rmTowardPositive); 2034 return ConstantFP::get(Ty->getContext(), U); 2035 } 2036 break; 2037 case LibFunc_cos: 2038 case LibFunc_cosf: 2039 if (TLI->has(Func)) 2040 return ConstantFoldFP(cos, V, Ty); 2041 break; 2042 case LibFunc_cosh: 2043 case LibFunc_coshf: 2044 case LibFunc_cosh_finite: 2045 case LibFunc_coshf_finite: 2046 if (TLI->has(Func)) 2047 return ConstantFoldFP(cosh, V, Ty); 2048 break; 2049 case LibFunc_exp: 2050 case LibFunc_expf: 2051 case LibFunc_exp_finite: 2052 case LibFunc_expf_finite: 2053 if (TLI->has(Func)) 2054 return ConstantFoldFP(exp, V, Ty); 2055 break; 2056 case LibFunc_exp2: 2057 case LibFunc_exp2f: 2058 case LibFunc_exp2_finite: 2059 case LibFunc_exp2f_finite: 2060 if (TLI->has(Func)) 2061 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2062 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 2063 break; 2064 case LibFunc_fabs: 2065 case LibFunc_fabsf: 2066 if (TLI->has(Func)) { 2067 U.clearSign(); 2068 return ConstantFP::get(Ty->getContext(), U); 2069 } 2070 break; 2071 case LibFunc_floor: 2072 case LibFunc_floorf: 2073 if (TLI->has(Func)) { 2074 U.roundToIntegral(APFloat::rmTowardNegative); 2075 return ConstantFP::get(Ty->getContext(), U); 2076 } 2077 break; 2078 case LibFunc_log: 2079 case LibFunc_logf: 2080 case LibFunc_log_finite: 2081 case LibFunc_logf_finite: 2082 if (V > 0.0 && TLI->has(Func)) 2083 return ConstantFoldFP(log, V, Ty); 2084 break; 2085 case LibFunc_log2: 2086 case LibFunc_log2f: 2087 case LibFunc_log2_finite: 2088 case LibFunc_log2f_finite: 2089 if (V > 0.0 && TLI->has(Func)) 2090 // TODO: What about hosts that lack a C99 library? 2091 return ConstantFoldFP(Log2, V, Ty); 2092 break; 2093 case LibFunc_log10: 2094 case LibFunc_log10f: 2095 case LibFunc_log10_finite: 2096 case LibFunc_log10f_finite: 2097 if (V > 0.0 && TLI->has(Func)) 2098 // TODO: What about hosts that lack a C99 library? 2099 return ConstantFoldFP(log10, V, Ty); 2100 break; 2101 case LibFunc_nearbyint: 2102 case LibFunc_nearbyintf: 2103 case LibFunc_rint: 2104 case LibFunc_rintf: 2105 if (TLI->has(Func)) { 2106 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2107 return ConstantFP::get(Ty->getContext(), U); 2108 } 2109 break; 2110 case LibFunc_round: 2111 case LibFunc_roundf: 2112 if (TLI->has(Func)) { 2113 U.roundToIntegral(APFloat::rmNearestTiesToAway); 2114 return ConstantFP::get(Ty->getContext(), U); 2115 } 2116 break; 2117 case LibFunc_sin: 2118 case LibFunc_sinf: 2119 if (TLI->has(Func)) 2120 return ConstantFoldFP(sin, V, Ty); 2121 break; 2122 case LibFunc_sinh: 2123 case LibFunc_sinhf: 2124 case LibFunc_sinh_finite: 2125 case LibFunc_sinhf_finite: 2126 if (TLI->has(Func)) 2127 return ConstantFoldFP(sinh, V, Ty); 2128 break; 2129 case LibFunc_sqrt: 2130 case LibFunc_sqrtf: 2131 if (V >= 0.0 && TLI->has(Func)) 2132 return ConstantFoldFP(sqrt, V, Ty); 2133 break; 2134 case LibFunc_tan: 2135 case LibFunc_tanf: 2136 if (TLI->has(Func)) 2137 return ConstantFoldFP(tan, V, Ty); 2138 break; 2139 case LibFunc_tanh: 2140 case LibFunc_tanhf: 2141 if (TLI->has(Func)) 2142 return ConstantFoldFP(tanh, V, Ty); 2143 break; 2144 case LibFunc_trunc: 2145 case LibFunc_truncf: 2146 if (TLI->has(Func)) { 2147 U.roundToIntegral(APFloat::rmTowardZero); 2148 return ConstantFP::get(Ty->getContext(), U); 2149 } 2150 break; 2151 } 2152 return nullptr; 2153 } 2154 2155 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2156 switch (IntrinsicID) { 2157 case Intrinsic::bswap: 2158 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 2159 case Intrinsic::ctpop: 2160 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 2161 case Intrinsic::bitreverse: 2162 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); 2163 case Intrinsic::convert_from_fp16: { 2164 APFloat Val(APFloat::IEEEhalf(), Op->getValue()); 2165 2166 bool lost = false; 2167 APFloat::opStatus status = Val.convert( 2168 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 2169 2170 // Conversion is always precise. 2171 (void)status; 2172 assert(status == APFloat::opOK && !lost && 2173 "Precision lost during fp16 constfolding"); 2174 2175 return ConstantFP::get(Ty->getContext(), Val); 2176 } 2177 default: 2178 return nullptr; 2179 } 2180 } 2181 2182 if (isa<ConstantAggregateZero>(Operands[0])) { 2183 switch (IntrinsicID) { 2184 default: break; 2185 case Intrinsic::experimental_vector_reduce_add: 2186 case Intrinsic::experimental_vector_reduce_mul: 2187 case Intrinsic::experimental_vector_reduce_and: 2188 case Intrinsic::experimental_vector_reduce_or: 2189 case Intrinsic::experimental_vector_reduce_xor: 2190 case Intrinsic::experimental_vector_reduce_smin: 2191 case Intrinsic::experimental_vector_reduce_smax: 2192 case Intrinsic::experimental_vector_reduce_umin: 2193 case Intrinsic::experimental_vector_reduce_umax: 2194 return ConstantInt::get(Ty, 0); 2195 } 2196 } 2197 2198 // Support ConstantVector in case we have an Undef in the top. 2199 if (isa<ConstantVector>(Operands[0]) || 2200 isa<ConstantDataVector>(Operands[0])) { 2201 auto *Op = cast<Constant>(Operands[0]); 2202 switch (IntrinsicID) { 2203 default: break; 2204 case Intrinsic::experimental_vector_reduce_add: 2205 case Intrinsic::experimental_vector_reduce_mul: 2206 case Intrinsic::experimental_vector_reduce_and: 2207 case Intrinsic::experimental_vector_reduce_or: 2208 case Intrinsic::experimental_vector_reduce_xor: 2209 case Intrinsic::experimental_vector_reduce_smin: 2210 case Intrinsic::experimental_vector_reduce_smax: 2211 case Intrinsic::experimental_vector_reduce_umin: 2212 case Intrinsic::experimental_vector_reduce_umax: 2213 if (Constant *C = ConstantFoldVectorReduce(IntrinsicID, Op)) 2214 return C; 2215 break; 2216 case Intrinsic::x86_sse_cvtss2si: 2217 case Intrinsic::x86_sse_cvtss2si64: 2218 case Intrinsic::x86_sse2_cvtsd2si: 2219 case Intrinsic::x86_sse2_cvtsd2si64: 2220 if (ConstantFP *FPOp = 2221 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2222 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2223 /*roundTowardZero=*/false, Ty, 2224 /*IsSigned*/true); 2225 break; 2226 case Intrinsic::x86_sse_cvttss2si: 2227 case Intrinsic::x86_sse_cvttss2si64: 2228 case Intrinsic::x86_sse2_cvttsd2si: 2229 case Intrinsic::x86_sse2_cvttsd2si64: 2230 if (ConstantFP *FPOp = 2231 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2232 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2233 /*roundTowardZero=*/true, Ty, 2234 /*IsSigned*/true); 2235 break; 2236 } 2237 } 2238 2239 return nullptr; 2240 } 2241 2242 static Constant *ConstantFoldScalarCall2(StringRef Name, 2243 Intrinsic::ID IntrinsicID, 2244 Type *Ty, 2245 ArrayRef<Constant *> Operands, 2246 const TargetLibraryInfo *TLI, 2247 const CallBase *Call) { 2248 assert(Operands.size() == 2 && "Wrong number of operands."); 2249 2250 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2251 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2252 return nullptr; 2253 double Op1V = getValueAsDouble(Op1); 2254 2255 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2256 if (Op2->getType() != Op1->getType()) 2257 return nullptr; 2258 2259 double Op2V = getValueAsDouble(Op2); 2260 if (IntrinsicID == Intrinsic::pow) { 2261 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2262 } 2263 if (IntrinsicID == Intrinsic::copysign) { 2264 APFloat V1 = Op1->getValueAPF(); 2265 const APFloat &V2 = Op2->getValueAPF(); 2266 V1.copySign(V2); 2267 return ConstantFP::get(Ty->getContext(), V1); 2268 } 2269 2270 if (IntrinsicID == Intrinsic::minnum) { 2271 const APFloat &C1 = Op1->getValueAPF(); 2272 const APFloat &C2 = Op2->getValueAPF(); 2273 return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); 2274 } 2275 2276 if (IntrinsicID == Intrinsic::maxnum) { 2277 const APFloat &C1 = Op1->getValueAPF(); 2278 const APFloat &C2 = Op2->getValueAPF(); 2279 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); 2280 } 2281 2282 if (IntrinsicID == Intrinsic::minimum) { 2283 const APFloat &C1 = Op1->getValueAPF(); 2284 const APFloat &C2 = Op2->getValueAPF(); 2285 return ConstantFP::get(Ty->getContext(), minimum(C1, C2)); 2286 } 2287 2288 if (IntrinsicID == Intrinsic::maximum) { 2289 const APFloat &C1 = Op1->getValueAPF(); 2290 const APFloat &C2 = Op2->getValueAPF(); 2291 return ConstantFP::get(Ty->getContext(), maximum(C1, C2)); 2292 } 2293 2294 if (IntrinsicID == Intrinsic::amdgcn_fmul_legacy) { 2295 const APFloat &C1 = Op1->getValueAPF(); 2296 const APFloat &C2 = Op2->getValueAPF(); 2297 // The legacy behaviour is that multiplying zero by anything, even NaN 2298 // or infinity, gives +0.0. 2299 if (C1.isZero() || C2.isZero()) 2300 return ConstantFP::getNullValue(Ty); 2301 return ConstantFP::get(Ty->getContext(), C1 * C2); 2302 } 2303 2304 if (!TLI) 2305 return nullptr; 2306 2307 LibFunc Func = NotLibFunc; 2308 TLI->getLibFunc(Name, Func); 2309 switch (Func) { 2310 default: 2311 break; 2312 case LibFunc_pow: 2313 case LibFunc_powf: 2314 case LibFunc_pow_finite: 2315 case LibFunc_powf_finite: 2316 if (TLI->has(Func)) 2317 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2318 break; 2319 case LibFunc_fmod: 2320 case LibFunc_fmodf: 2321 if (TLI->has(Func)) { 2322 APFloat V = Op1->getValueAPF(); 2323 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) 2324 return ConstantFP::get(Ty->getContext(), V); 2325 } 2326 break; 2327 case LibFunc_remainder: 2328 case LibFunc_remainderf: 2329 if (TLI->has(Func)) { 2330 APFloat V = Op1->getValueAPF(); 2331 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) 2332 return ConstantFP::get(Ty->getContext(), V); 2333 } 2334 break; 2335 case LibFunc_atan2: 2336 case LibFunc_atan2f: 2337 case LibFunc_atan2_finite: 2338 case LibFunc_atan2f_finite: 2339 if (TLI->has(Func)) 2340 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 2341 break; 2342 } 2343 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 2344 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 2345 return ConstantFP::get(Ty->getContext(), 2346 APFloat((float)std::pow((float)Op1V, 2347 (int)Op2C->getZExtValue()))); 2348 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 2349 return ConstantFP::get(Ty->getContext(), 2350 APFloat((float)std::pow((float)Op1V, 2351 (int)Op2C->getZExtValue()))); 2352 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 2353 return ConstantFP::get(Ty->getContext(), 2354 APFloat((double)std::pow((double)Op1V, 2355 (int)Op2C->getZExtValue()))); 2356 2357 if (IntrinsicID == Intrinsic::amdgcn_ldexp) { 2358 // FIXME: Should flush denorms depending on FP mode, but that's ignored 2359 // everywhere else. 2360 2361 // scalbn is equivalent to ldexp with float radix 2 2362 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(), 2363 APFloat::rmNearestTiesToEven); 2364 return ConstantFP::get(Ty->getContext(), Result); 2365 } 2366 } 2367 return nullptr; 2368 } 2369 2370 if (Operands[0]->getType()->isIntegerTy() && 2371 Operands[1]->getType()->isIntegerTy()) { 2372 const APInt *C0, *C1; 2373 if (!getConstIntOrUndef(Operands[0], C0) || 2374 !getConstIntOrUndef(Operands[1], C1)) 2375 return nullptr; 2376 2377 switch (IntrinsicID) { 2378 default: break; 2379 case Intrinsic::usub_with_overflow: 2380 case Intrinsic::ssub_with_overflow: 2381 case Intrinsic::uadd_with_overflow: 2382 case Intrinsic::sadd_with_overflow: 2383 // X - undef -> { undef, false } 2384 // undef - X -> { undef, false } 2385 // X + undef -> { undef, false } 2386 // undef + x -> { undef, false } 2387 if (!C0 || !C1) { 2388 return ConstantStruct::get( 2389 cast<StructType>(Ty), 2390 {UndefValue::get(Ty->getStructElementType(0)), 2391 Constant::getNullValue(Ty->getStructElementType(1))}); 2392 } 2393 LLVM_FALLTHROUGH; 2394 case Intrinsic::smul_with_overflow: 2395 case Intrinsic::umul_with_overflow: { 2396 // undef * X -> { 0, false } 2397 // X * undef -> { 0, false } 2398 if (!C0 || !C1) 2399 return Constant::getNullValue(Ty); 2400 2401 APInt Res; 2402 bool Overflow; 2403 switch (IntrinsicID) { 2404 default: llvm_unreachable("Invalid case"); 2405 case Intrinsic::sadd_with_overflow: 2406 Res = C0->sadd_ov(*C1, Overflow); 2407 break; 2408 case Intrinsic::uadd_with_overflow: 2409 Res = C0->uadd_ov(*C1, Overflow); 2410 break; 2411 case Intrinsic::ssub_with_overflow: 2412 Res = C0->ssub_ov(*C1, Overflow); 2413 break; 2414 case Intrinsic::usub_with_overflow: 2415 Res = C0->usub_ov(*C1, Overflow); 2416 break; 2417 case Intrinsic::smul_with_overflow: 2418 Res = C0->smul_ov(*C1, Overflow); 2419 break; 2420 case Intrinsic::umul_with_overflow: 2421 Res = C0->umul_ov(*C1, Overflow); 2422 break; 2423 } 2424 Constant *Ops[] = { 2425 ConstantInt::get(Ty->getContext(), Res), 2426 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 2427 }; 2428 return ConstantStruct::get(cast<StructType>(Ty), Ops); 2429 } 2430 case Intrinsic::uadd_sat: 2431 case Intrinsic::sadd_sat: 2432 if (!C0 && !C1) 2433 return UndefValue::get(Ty); 2434 if (!C0 || !C1) 2435 return Constant::getAllOnesValue(Ty); 2436 if (IntrinsicID == Intrinsic::uadd_sat) 2437 return ConstantInt::get(Ty, C0->uadd_sat(*C1)); 2438 else 2439 return ConstantInt::get(Ty, C0->sadd_sat(*C1)); 2440 case Intrinsic::usub_sat: 2441 case Intrinsic::ssub_sat: 2442 if (!C0 && !C1) 2443 return UndefValue::get(Ty); 2444 if (!C0 || !C1) 2445 return Constant::getNullValue(Ty); 2446 if (IntrinsicID == Intrinsic::usub_sat) 2447 return ConstantInt::get(Ty, C0->usub_sat(*C1)); 2448 else 2449 return ConstantInt::get(Ty, C0->ssub_sat(*C1)); 2450 case Intrinsic::cttz: 2451 case Intrinsic::ctlz: 2452 assert(C1 && "Must be constant int"); 2453 2454 // cttz(0, 1) and ctlz(0, 1) are undef. 2455 if (C1->isOneValue() && (!C0 || C0->isNullValue())) 2456 return UndefValue::get(Ty); 2457 if (!C0) 2458 return Constant::getNullValue(Ty); 2459 if (IntrinsicID == Intrinsic::cttz) 2460 return ConstantInt::get(Ty, C0->countTrailingZeros()); 2461 else 2462 return ConstantInt::get(Ty, C0->countLeadingZeros()); 2463 } 2464 2465 return nullptr; 2466 } 2467 2468 // Support ConstantVector in case we have an Undef in the top. 2469 if ((isa<ConstantVector>(Operands[0]) || 2470 isa<ConstantDataVector>(Operands[0])) && 2471 // Check for default rounding mode. 2472 // FIXME: Support other rounding modes? 2473 isa<ConstantInt>(Operands[1]) && 2474 cast<ConstantInt>(Operands[1])->getValue() == 4) { 2475 auto *Op = cast<Constant>(Operands[0]); 2476 switch (IntrinsicID) { 2477 default: break; 2478 case Intrinsic::x86_avx512_vcvtss2si32: 2479 case Intrinsic::x86_avx512_vcvtss2si64: 2480 case Intrinsic::x86_avx512_vcvtsd2si32: 2481 case Intrinsic::x86_avx512_vcvtsd2si64: 2482 if (ConstantFP *FPOp = 2483 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2484 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2485 /*roundTowardZero=*/false, Ty, 2486 /*IsSigned*/true); 2487 break; 2488 case Intrinsic::x86_avx512_vcvtss2usi32: 2489 case Intrinsic::x86_avx512_vcvtss2usi64: 2490 case Intrinsic::x86_avx512_vcvtsd2usi32: 2491 case Intrinsic::x86_avx512_vcvtsd2usi64: 2492 if (ConstantFP *FPOp = 2493 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2494 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2495 /*roundTowardZero=*/false, Ty, 2496 /*IsSigned*/false); 2497 break; 2498 case Intrinsic::x86_avx512_cvttss2si: 2499 case Intrinsic::x86_avx512_cvttss2si64: 2500 case Intrinsic::x86_avx512_cvttsd2si: 2501 case Intrinsic::x86_avx512_cvttsd2si64: 2502 if (ConstantFP *FPOp = 2503 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2504 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2505 /*roundTowardZero=*/true, Ty, 2506 /*IsSigned*/true); 2507 break; 2508 case Intrinsic::x86_avx512_cvttss2usi: 2509 case Intrinsic::x86_avx512_cvttss2usi64: 2510 case Intrinsic::x86_avx512_cvttsd2usi: 2511 case Intrinsic::x86_avx512_cvttsd2usi64: 2512 if (ConstantFP *FPOp = 2513 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2514 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2515 /*roundTowardZero=*/true, Ty, 2516 /*IsSigned*/false); 2517 break; 2518 } 2519 } 2520 return nullptr; 2521 } 2522 2523 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID, 2524 const APFloat &S0, 2525 const APFloat &S1, 2526 const APFloat &S2) { 2527 unsigned ID; 2528 const fltSemantics &Sem = S0.getSemantics(); 2529 APFloat MA(Sem), SC(Sem), TC(Sem); 2530 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) { 2531 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) { 2532 // S2 < 0 2533 ID = 5; 2534 SC = -S0; 2535 } else { 2536 ID = 4; 2537 SC = S0; 2538 } 2539 MA = S2; 2540 TC = -S1; 2541 } else if (abs(S1) >= abs(S0)) { 2542 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) { 2543 // S1 < 0 2544 ID = 3; 2545 TC = -S2; 2546 } else { 2547 ID = 2; 2548 TC = S2; 2549 } 2550 MA = S1; 2551 SC = S0; 2552 } else { 2553 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) { 2554 // S0 < 0 2555 ID = 1; 2556 SC = S2; 2557 } else { 2558 ID = 0; 2559 SC = -S2; 2560 } 2561 MA = S0; 2562 TC = -S1; 2563 } 2564 switch (IntrinsicID) { 2565 default: 2566 llvm_unreachable("unhandled amdgcn cube intrinsic"); 2567 case Intrinsic::amdgcn_cubeid: 2568 return APFloat(Sem, ID); 2569 case Intrinsic::amdgcn_cubema: 2570 return MA + MA; 2571 case Intrinsic::amdgcn_cubesc: 2572 return SC; 2573 case Intrinsic::amdgcn_cubetc: 2574 return TC; 2575 } 2576 } 2577 2578 static Constant *ConstantFoldScalarCall3(StringRef Name, 2579 Intrinsic::ID IntrinsicID, 2580 Type *Ty, 2581 ArrayRef<Constant *> Operands, 2582 const TargetLibraryInfo *TLI, 2583 const CallBase *Call) { 2584 assert(Operands.size() == 3 && "Wrong number of operands."); 2585 2586 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2587 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2588 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 2589 switch (IntrinsicID) { 2590 default: break; 2591 case Intrinsic::fma: 2592 case Intrinsic::fmuladd: { 2593 APFloat V = Op1->getValueAPF(); 2594 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(), 2595 APFloat::rmNearestTiesToEven); 2596 return ConstantFP::get(Ty->getContext(), V); 2597 } 2598 case Intrinsic::amdgcn_cubeid: 2599 case Intrinsic::amdgcn_cubema: 2600 case Intrinsic::amdgcn_cubesc: 2601 case Intrinsic::amdgcn_cubetc: { 2602 APFloat V = ConstantFoldAMDGCNCubeIntrinsic( 2603 IntrinsicID, Op1->getValueAPF(), Op2->getValueAPF(), 2604 Op3->getValueAPF()); 2605 return ConstantFP::get(Ty->getContext(), V); 2606 } 2607 } 2608 } 2609 } 2610 } 2611 2612 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 2613 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 2614 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) { 2615 switch (IntrinsicID) { 2616 default: break; 2617 case Intrinsic::smul_fix: 2618 case Intrinsic::smul_fix_sat: { 2619 // This code performs rounding towards negative infinity in case the 2620 // result cannot be represented exactly for the given scale. Targets 2621 // that do care about rounding should use a target hook for specifying 2622 // how rounding should be done, and provide their own folding to be 2623 // consistent with rounding. This is the same approach as used by 2624 // DAGTypeLegalizer::ExpandIntRes_MULFIX. 2625 const APInt &Lhs = Op1->getValue(); 2626 const APInt &Rhs = Op2->getValue(); 2627 unsigned Scale = Op3->getValue().getZExtValue(); 2628 unsigned Width = Lhs.getBitWidth(); 2629 assert(Scale < Width && "Illegal scale."); 2630 unsigned ExtendedWidth = Width * 2; 2631 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) * 2632 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale); 2633 if (IntrinsicID == Intrinsic::smul_fix_sat) { 2634 APInt MaxValue = 2635 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); 2636 APInt MinValue = 2637 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); 2638 Product = APIntOps::smin(Product, MaxValue); 2639 Product = APIntOps::smax(Product, MinValue); 2640 } 2641 return ConstantInt::get(Ty->getContext(), 2642 Product.sextOrTrunc(Width)); 2643 } 2644 } 2645 } 2646 } 2647 } 2648 2649 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { 2650 const APInt *C0, *C1, *C2; 2651 if (!getConstIntOrUndef(Operands[0], C0) || 2652 !getConstIntOrUndef(Operands[1], C1) || 2653 !getConstIntOrUndef(Operands[2], C2)) 2654 return nullptr; 2655 2656 bool IsRight = IntrinsicID == Intrinsic::fshr; 2657 if (!C2) 2658 return Operands[IsRight ? 1 : 0]; 2659 if (!C0 && !C1) 2660 return UndefValue::get(Ty); 2661 2662 // The shift amount is interpreted as modulo the bitwidth. If the shift 2663 // amount is effectively 0, avoid UB due to oversized inverse shift below. 2664 unsigned BitWidth = C2->getBitWidth(); 2665 unsigned ShAmt = C2->urem(BitWidth); 2666 if (!ShAmt) 2667 return Operands[IsRight ? 1 : 0]; 2668 2669 // (C0 << ShlAmt) | (C1 >> LshrAmt) 2670 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; 2671 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; 2672 if (!C0) 2673 return ConstantInt::get(Ty, C1->lshr(LshrAmt)); 2674 if (!C1) 2675 return ConstantInt::get(Ty, C0->shl(ShlAmt)); 2676 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); 2677 } 2678 2679 return nullptr; 2680 } 2681 2682 static Constant *ConstantFoldScalarCall(StringRef Name, 2683 Intrinsic::ID IntrinsicID, 2684 Type *Ty, 2685 ArrayRef<Constant *> Operands, 2686 const TargetLibraryInfo *TLI, 2687 const CallBase *Call) { 2688 if (Operands.size() == 1) 2689 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); 2690 2691 if (Operands.size() == 2) 2692 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); 2693 2694 if (Operands.size() == 3) 2695 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); 2696 2697 return nullptr; 2698 } 2699 2700 static Constant *ConstantFoldVectorCall(StringRef Name, 2701 Intrinsic::ID IntrinsicID, 2702 VectorType *VTy, 2703 ArrayRef<Constant *> Operands, 2704 const DataLayout &DL, 2705 const TargetLibraryInfo *TLI, 2706 const CallBase *Call) { 2707 // Do not iterate on scalable vector. The number of elements is unknown at 2708 // compile-time. 2709 if (isa<ScalableVectorType>(VTy)) 2710 return nullptr; 2711 2712 auto *FVTy = cast<FixedVectorType>(VTy); 2713 2714 SmallVector<Constant *, 4> Result(FVTy->getNumElements()); 2715 SmallVector<Constant *, 4> Lane(Operands.size()); 2716 Type *Ty = FVTy->getElementType(); 2717 2718 if (IntrinsicID == Intrinsic::masked_load) { 2719 auto *SrcPtr = Operands[0]; 2720 auto *Mask = Operands[2]; 2721 auto *Passthru = Operands[3]; 2722 2723 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL); 2724 2725 SmallVector<Constant *, 32> NewElements; 2726 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2727 auto *MaskElt = Mask->getAggregateElement(I); 2728 if (!MaskElt) 2729 break; 2730 auto *PassthruElt = Passthru->getAggregateElement(I); 2731 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; 2732 if (isa<UndefValue>(MaskElt)) { 2733 if (PassthruElt) 2734 NewElements.push_back(PassthruElt); 2735 else if (VecElt) 2736 NewElements.push_back(VecElt); 2737 else 2738 return nullptr; 2739 } 2740 if (MaskElt->isNullValue()) { 2741 if (!PassthruElt) 2742 return nullptr; 2743 NewElements.push_back(PassthruElt); 2744 } else if (MaskElt->isOneValue()) { 2745 if (!VecElt) 2746 return nullptr; 2747 NewElements.push_back(VecElt); 2748 } else { 2749 return nullptr; 2750 } 2751 } 2752 if (NewElements.size() != FVTy->getNumElements()) 2753 return nullptr; 2754 return ConstantVector::get(NewElements); 2755 } 2756 2757 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2758 // Gather a column of constants. 2759 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 2760 // Some intrinsics use a scalar type for certain arguments. 2761 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) { 2762 Lane[J] = Operands[J]; 2763 continue; 2764 } 2765 2766 Constant *Agg = Operands[J]->getAggregateElement(I); 2767 if (!Agg) 2768 return nullptr; 2769 2770 Lane[J] = Agg; 2771 } 2772 2773 // Use the regular scalar folding to simplify this column. 2774 Constant *Folded = 2775 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); 2776 if (!Folded) 2777 return nullptr; 2778 Result[I] = Folded; 2779 } 2780 2781 return ConstantVector::get(Result); 2782 } 2783 2784 } // end anonymous namespace 2785 2786 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, 2787 ArrayRef<Constant *> Operands, 2788 const TargetLibraryInfo *TLI) { 2789 if (Call->isNoBuiltin()) 2790 return nullptr; 2791 if (!F->hasName()) 2792 return nullptr; 2793 StringRef Name = F->getName(); 2794 2795 Type *Ty = F->getReturnType(); 2796 2797 if (auto *VTy = dyn_cast<VectorType>(Ty)) 2798 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, 2799 F->getParent()->getDataLayout(), TLI, Call); 2800 2801 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, 2802 Call); 2803 } 2804 2805 bool llvm::isMathLibCallNoop(const CallBase *Call, 2806 const TargetLibraryInfo *TLI) { 2807 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap 2808 // (and to some extent ConstantFoldScalarCall). 2809 if (Call->isNoBuiltin() || Call->isStrictFP()) 2810 return false; 2811 Function *F = Call->getCalledFunction(); 2812 if (!F) 2813 return false; 2814 2815 LibFunc Func; 2816 if (!TLI || !TLI->getLibFunc(*F, Func)) 2817 return false; 2818 2819 if (Call->getNumArgOperands() == 1) { 2820 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { 2821 const APFloat &Op = OpC->getValueAPF(); 2822 switch (Func) { 2823 case LibFunc_logl: 2824 case LibFunc_log: 2825 case LibFunc_logf: 2826 case LibFunc_log2l: 2827 case LibFunc_log2: 2828 case LibFunc_log2f: 2829 case LibFunc_log10l: 2830 case LibFunc_log10: 2831 case LibFunc_log10f: 2832 return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); 2833 2834 case LibFunc_expl: 2835 case LibFunc_exp: 2836 case LibFunc_expf: 2837 // FIXME: These boundaries are slightly conservative. 2838 if (OpC->getType()->isDoubleTy()) 2839 return !(Op < APFloat(-745.0) || Op > APFloat(709.0)); 2840 if (OpC->getType()->isFloatTy()) 2841 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f)); 2842 break; 2843 2844 case LibFunc_exp2l: 2845 case LibFunc_exp2: 2846 case LibFunc_exp2f: 2847 // FIXME: These boundaries are slightly conservative. 2848 if (OpC->getType()->isDoubleTy()) 2849 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0)); 2850 if (OpC->getType()->isFloatTy()) 2851 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f)); 2852 break; 2853 2854 case LibFunc_sinl: 2855 case LibFunc_sin: 2856 case LibFunc_sinf: 2857 case LibFunc_cosl: 2858 case LibFunc_cos: 2859 case LibFunc_cosf: 2860 return !Op.isInfinity(); 2861 2862 case LibFunc_tanl: 2863 case LibFunc_tan: 2864 case LibFunc_tanf: { 2865 // FIXME: Stop using the host math library. 2866 // FIXME: The computation isn't done in the right precision. 2867 Type *Ty = OpC->getType(); 2868 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 2869 double OpV = getValueAsDouble(OpC); 2870 return ConstantFoldFP(tan, OpV, Ty) != nullptr; 2871 } 2872 break; 2873 } 2874 2875 case LibFunc_asinl: 2876 case LibFunc_asin: 2877 case LibFunc_asinf: 2878 case LibFunc_acosl: 2879 case LibFunc_acos: 2880 case LibFunc_acosf: 2881 return !(Op < APFloat(Op.getSemantics(), "-1") || 2882 Op > APFloat(Op.getSemantics(), "1")); 2883 2884 case LibFunc_sinh: 2885 case LibFunc_cosh: 2886 case LibFunc_sinhf: 2887 case LibFunc_coshf: 2888 case LibFunc_sinhl: 2889 case LibFunc_coshl: 2890 // FIXME: These boundaries are slightly conservative. 2891 if (OpC->getType()->isDoubleTy()) 2892 return !(Op < APFloat(-710.0) || Op > APFloat(710.0)); 2893 if (OpC->getType()->isFloatTy()) 2894 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f)); 2895 break; 2896 2897 case LibFunc_sqrtl: 2898 case LibFunc_sqrt: 2899 case LibFunc_sqrtf: 2900 return Op.isNaN() || Op.isZero() || !Op.isNegative(); 2901 2902 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p, 2903 // maybe others? 2904 default: 2905 break; 2906 } 2907 } 2908 } 2909 2910 if (Call->getNumArgOperands() == 2) { 2911 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); 2912 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); 2913 if (Op0C && Op1C) { 2914 const APFloat &Op0 = Op0C->getValueAPF(); 2915 const APFloat &Op1 = Op1C->getValueAPF(); 2916 2917 switch (Func) { 2918 case LibFunc_powl: 2919 case LibFunc_pow: 2920 case LibFunc_powf: { 2921 // FIXME: Stop using the host math library. 2922 // FIXME: The computation isn't done in the right precision. 2923 Type *Ty = Op0C->getType(); 2924 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 2925 if (Ty == Op1C->getType()) { 2926 double Op0V = getValueAsDouble(Op0C); 2927 double Op1V = getValueAsDouble(Op1C); 2928 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr; 2929 } 2930 } 2931 break; 2932 } 2933 2934 case LibFunc_fmodl: 2935 case LibFunc_fmod: 2936 case LibFunc_fmodf: 2937 case LibFunc_remainderl: 2938 case LibFunc_remainder: 2939 case LibFunc_remainderf: 2940 return Op0.isNaN() || Op1.isNaN() || 2941 (!Op0.isInfinity() && !Op1.isZero()); 2942 2943 default: 2944 break; 2945 } 2946 } 2947 } 2948 2949 return false; 2950 } 2951 2952 void TargetFolder::anchor() {} 2953