1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsAArch64.h"
45 #include "llvm/IR/IntrinsicsAMDGPU.h"
46 #include "llvm/IR/IntrinsicsARM.h"
47 #include "llvm/IR/IntrinsicsWebAssembly.h"
48 #include "llvm/IR/IntrinsicsX86.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/KnownBits.h"
55 #include "llvm/Support/MathExtras.h"
56 #include <cassert>
57 #include <cerrno>
58 #include <cfenv>
59 #include <cmath>
60 #include <cstddef>
61 #include <cstdint>
62
63 using namespace llvm;
64
65 namespace {
66
67 //===----------------------------------------------------------------------===//
68 // Constant Folding internal helper functions
69 //===----------------------------------------------------------------------===//
70
foldConstVectorToAPInt(APInt & Result,Type * DestTy,Constant * C,Type * SrcEltTy,unsigned NumSrcElts,const DataLayout & DL)71 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72 Constant *C, Type *SrcEltTy,
73 unsigned NumSrcElts,
74 const DataLayout &DL) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78 for (unsigned i = 0; i != NumSrcElts; ++i) {
79 Constant *Element;
80 if (DL.isLittleEndian())
81 Element = C->getAggregateElement(NumSrcElts - i - 1);
82 else
83 Element = C->getAggregateElement(i);
84
85 if (Element && isa<UndefValue>(Element)) {
86 Result <<= BitShift;
87 continue;
88 }
89
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91 if (!ElementCI)
92 return ConstantExpr::getBitCast(C, DestTy);
93
94 Result <<= BitShift;
95 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
96 }
97
98 return nullptr;
99 }
100
101 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
102 /// This always returns a non-null constant, but it may be a
103 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)104 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
106 "Invalid constantexpr bitcast!");
107
108 // Catch the obvious splat cases.
109 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
110 return Constant::getNullValue(DestTy);
111 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
112 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
113 return Constant::getAllOnesValue(DestTy);
114
115 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
116 // Handle a vector->scalar integer/fp cast.
117 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
118 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
119 Type *SrcEltTy = VTy->getElementType();
120
121 // If the vector is a vector of floating point, convert it to vector of int
122 // to simplify things.
123 if (SrcEltTy->isFloatingPointTy()) {
124 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
125 auto *SrcIVTy = FixedVectorType::get(
126 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
127 // Ask IR to do the conversion now that #elts line up.
128 C = ConstantExpr::getBitCast(C, SrcIVTy);
129 }
130
131 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
132 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
133 SrcEltTy, NumSrcElts, DL))
134 return CE;
135
136 if (isa<IntegerType>(DestTy))
137 return ConstantInt::get(DestTy, Result);
138
139 APFloat FP(DestTy->getFltSemantics(), Result);
140 return ConstantFP::get(DestTy->getContext(), FP);
141 }
142 }
143
144 // The code below only handles casts to vectors currently.
145 auto *DestVTy = dyn_cast<VectorType>(DestTy);
146 if (!DestVTy)
147 return ConstantExpr::getBitCast(C, DestTy);
148
149 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
150 // vector so the code below can handle it uniformly.
151 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
152 Constant *Ops = C; // don't take the address of C!
153 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
154 }
155
156 // If this is a bitcast from constant vector -> vector, fold it.
157 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
158 return ConstantExpr::getBitCast(C, DestTy);
159
160 // If the element types match, IR can fold it.
161 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
162 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
163 if (NumDstElt == NumSrcElt)
164 return ConstantExpr::getBitCast(C, DestTy);
165
166 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
167 Type *DstEltTy = DestVTy->getElementType();
168
169 // Otherwise, we're changing the number of elements in a vector, which
170 // requires endianness information to do the right thing. For example,
171 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
172 // folds to (little endian):
173 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
174 // and to (big endian):
175 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
176
177 // First thing is first. We only want to think about integer here, so if
178 // we have something in FP form, recast it as integer.
179 if (DstEltTy->isFloatingPointTy()) {
180 // Fold to an vector of integers with same size as our FP type.
181 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
182 auto *DestIVTy = FixedVectorType::get(
183 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
184 // Recursively handle this integer conversion, if possible.
185 C = FoldBitCast(C, DestIVTy, DL);
186
187 // Finally, IR can handle this now that #elts line up.
188 return ConstantExpr::getBitCast(C, DestTy);
189 }
190
191 // Okay, we know the destination is integer, if the input is FP, convert
192 // it to integer first.
193 if (SrcEltTy->isFloatingPointTy()) {
194 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
195 auto *SrcIVTy = FixedVectorType::get(
196 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
197 // Ask IR to do the conversion now that #elts line up.
198 C = ConstantExpr::getBitCast(C, SrcIVTy);
199 // If IR wasn't able to fold it, bail out.
200 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
201 !isa<ConstantDataVector>(C))
202 return C;
203 }
204
205 // Now we know that the input and output vectors are both integer vectors
206 // of the same size, and that their #elements is not the same. Do the
207 // conversion here, which depends on whether the input or output has
208 // more elements.
209 bool isLittleEndian = DL.isLittleEndian();
210
211 SmallVector<Constant*, 32> Result;
212 if (NumDstElt < NumSrcElt) {
213 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
214 Constant *Zero = Constant::getNullValue(DstEltTy);
215 unsigned Ratio = NumSrcElt/NumDstElt;
216 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
217 unsigned SrcElt = 0;
218 for (unsigned i = 0; i != NumDstElt; ++i) {
219 // Build each element of the result.
220 Constant *Elt = Zero;
221 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
222 for (unsigned j = 0; j != Ratio; ++j) {
223 Constant *Src = C->getAggregateElement(SrcElt++);
224 if (Src && isa<UndefValue>(Src))
225 Src = Constant::getNullValue(
226 cast<VectorType>(C->getType())->getElementType());
227 else
228 Src = dyn_cast_or_null<ConstantInt>(Src);
229 if (!Src) // Reject constantexpr elements.
230 return ConstantExpr::getBitCast(C, DestTy);
231
232 // Zero extend the element to the right size.
233 Src = ConstantExpr::getZExt(Src, Elt->getType());
234
235 // Shift it to the right place, depending on endianness.
236 Src = ConstantExpr::getShl(Src,
237 ConstantInt::get(Src->getType(), ShiftAmt));
238 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
239
240 // Mix it in.
241 Elt = ConstantExpr::getOr(Elt, Src);
242 }
243 Result.push_back(Elt);
244 }
245 return ConstantVector::get(Result);
246 }
247
248 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
249 unsigned Ratio = NumDstElt/NumSrcElt;
250 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
251
252 // Loop over each source value, expanding into multiple results.
253 for (unsigned i = 0; i != NumSrcElt; ++i) {
254 auto *Element = C->getAggregateElement(i);
255
256 if (!Element) // Reject constantexpr elements.
257 return ConstantExpr::getBitCast(C, DestTy);
258
259 if (isa<UndefValue>(Element)) {
260 // Correctly Propagate undef values.
261 Result.append(Ratio, UndefValue::get(DstEltTy));
262 continue;
263 }
264
265 auto *Src = dyn_cast<ConstantInt>(Element);
266 if (!Src)
267 return ConstantExpr::getBitCast(C, DestTy);
268
269 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
270 for (unsigned j = 0; j != Ratio; ++j) {
271 // Shift the piece of the value into the right place, depending on
272 // endianness.
273 Constant *Elt = ConstantExpr::getLShr(Src,
274 ConstantInt::get(Src->getType(), ShiftAmt));
275 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
276
277 // Truncate the element to an integer with the same pointer size and
278 // convert the element back to a pointer using a inttoptr.
279 if (DstEltTy->isPointerTy()) {
280 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
281 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
282 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
283 continue;
284 }
285
286 // Truncate and remember this piece.
287 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
288 }
289 }
290
291 return ConstantVector::get(Result);
292 }
293
294 } // end anonymous namespace
295
296 /// If this constant is a constant offset from a global, return the global and
297 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL,DSOLocalEquivalent ** DSOEquiv)298 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
299 APInt &Offset, const DataLayout &DL,
300 DSOLocalEquivalent **DSOEquiv) {
301 if (DSOEquiv)
302 *DSOEquiv = nullptr;
303
304 // Trivial case, constant is the global.
305 if ((GV = dyn_cast<GlobalValue>(C))) {
306 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
307 Offset = APInt(BitWidth, 0);
308 return true;
309 }
310
311 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
312 if (DSOEquiv)
313 *DSOEquiv = FoundDSOEquiv;
314 GV = FoundDSOEquiv->getGlobalValue();
315 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
316 Offset = APInt(BitWidth, 0);
317 return true;
318 }
319
320 // Otherwise, if this isn't a constant expr, bail out.
321 auto *CE = dyn_cast<ConstantExpr>(C);
322 if (!CE) return false;
323
324 // Look through ptr->int and ptr->ptr casts.
325 if (CE->getOpcode() == Instruction::PtrToInt ||
326 CE->getOpcode() == Instruction::BitCast)
327 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
328 DSOEquiv);
329
330 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
331 auto *GEP = dyn_cast<GEPOperator>(CE);
332 if (!GEP)
333 return false;
334
335 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
336 APInt TmpOffset(BitWidth, 0);
337
338 // If the base isn't a global+constant, we aren't either.
339 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
340 DSOEquiv))
341 return false;
342
343 // Otherwise, add any offset that our operands provide.
344 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
345 return false;
346
347 Offset = TmpOffset;
348 return true;
349 }
350
ConstantFoldLoadThroughBitcast(Constant * C,Type * DestTy,const DataLayout & DL)351 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
352 const DataLayout &DL) {
353 do {
354 Type *SrcTy = C->getType();
355 uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
356 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
357 if (SrcSize < DestSize)
358 return nullptr;
359
360 // Catch the obvious splat cases (since all-zeros can coerce non-integral
361 // pointers legally).
362 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
363 return Constant::getNullValue(DestTy);
364 if (C->isAllOnesValue() &&
365 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
366 DestTy->isVectorTy()) &&
367 !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() &&
368 !DestTy->isPtrOrPtrVectorTy())
369 // Get ones when the input is trivial, but
370 // only for supported types inside getAllOnesValue.
371 return Constant::getAllOnesValue(DestTy);
372
373 // If the type sizes are the same and a cast is legal, just directly
374 // cast the constant.
375 // But be careful not to coerce non-integral pointers illegally.
376 if (SrcSize == DestSize &&
377 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
378 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
379 Instruction::CastOps Cast = Instruction::BitCast;
380 // If we are going from a pointer to int or vice versa, we spell the cast
381 // differently.
382 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
383 Cast = Instruction::IntToPtr;
384 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
385 Cast = Instruction::PtrToInt;
386
387 if (CastInst::castIsValid(Cast, C, DestTy))
388 return ConstantExpr::getCast(Cast, C, DestTy);
389 }
390
391 // If this isn't an aggregate type, there is nothing we can do to drill down
392 // and find a bitcastable constant.
393 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
394 return nullptr;
395
396 // We're simulating a load through a pointer that was bitcast to point to
397 // a different type, so we can try to walk down through the initial
398 // elements of an aggregate to see if some part of the aggregate is
399 // castable to implement the "load" semantic model.
400 if (SrcTy->isStructTy()) {
401 // Struct types might have leading zero-length elements like [0 x i32],
402 // which are certainly not what we are looking for, so skip them.
403 unsigned Elem = 0;
404 Constant *ElemC;
405 do {
406 ElemC = C->getAggregateElement(Elem++);
407 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
408 C = ElemC;
409 } else {
410 C = C->getAggregateElement(0u);
411 }
412 } while (C);
413
414 return nullptr;
415 }
416
417 namespace {
418
419 /// Recursive helper to read bits out of global. C is the constant being copied
420 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
421 /// results into and BytesLeft is the number of bytes left in
422 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)423 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
424 unsigned BytesLeft, const DataLayout &DL) {
425 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
426 "Out of range access");
427
428 // If this element is zero or undefined, we can just return since *CurPtr is
429 // zero initialized.
430 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
431 return true;
432
433 if (auto *CI = dyn_cast<ConstantInt>(C)) {
434 if (CI->getBitWidth() > 64 ||
435 (CI->getBitWidth() & 7) != 0)
436 return false;
437
438 uint64_t Val = CI->getZExtValue();
439 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
440
441 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
442 int n = ByteOffset;
443 if (!DL.isLittleEndian())
444 n = IntBytes - n - 1;
445 CurPtr[i] = (unsigned char)(Val >> (n * 8));
446 ++ByteOffset;
447 }
448 return true;
449 }
450
451 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
452 if (CFP->getType()->isDoubleTy()) {
453 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
454 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
455 }
456 if (CFP->getType()->isFloatTy()){
457 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
458 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
459 }
460 if (CFP->getType()->isHalfTy()){
461 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
462 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
463 }
464 return false;
465 }
466
467 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
468 const StructLayout *SL = DL.getStructLayout(CS->getType());
469 unsigned Index = SL->getElementContainingOffset(ByteOffset);
470 uint64_t CurEltOffset = SL->getElementOffset(Index);
471 ByteOffset -= CurEltOffset;
472
473 while (true) {
474 // If the element access is to the element itself and not to tail padding,
475 // read the bytes from the element.
476 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
477
478 if (ByteOffset < EltSize &&
479 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
480 BytesLeft, DL))
481 return false;
482
483 ++Index;
484
485 // Check to see if we read from the last struct element, if so we're done.
486 if (Index == CS->getType()->getNumElements())
487 return true;
488
489 // If we read all of the bytes we needed from this element we're done.
490 uint64_t NextEltOffset = SL->getElementOffset(Index);
491
492 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
493 return true;
494
495 // Move to the next element of the struct.
496 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
497 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
498 ByteOffset = 0;
499 CurEltOffset = NextEltOffset;
500 }
501 // not reached.
502 }
503
504 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
505 isa<ConstantDataSequential>(C)) {
506 uint64_t NumElts;
507 Type *EltTy;
508 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
509 NumElts = AT->getNumElements();
510 EltTy = AT->getElementType();
511 } else {
512 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
513 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
514 }
515 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
516 uint64_t Index = ByteOffset / EltSize;
517 uint64_t Offset = ByteOffset - Index * EltSize;
518
519 for (; Index != NumElts; ++Index) {
520 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
521 BytesLeft, DL))
522 return false;
523
524 uint64_t BytesWritten = EltSize - Offset;
525 assert(BytesWritten <= EltSize && "Not indexing into this element?");
526 if (BytesWritten >= BytesLeft)
527 return true;
528
529 Offset = 0;
530 BytesLeft -= BytesWritten;
531 CurPtr += BytesWritten;
532 }
533 return true;
534 }
535
536 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
537 if (CE->getOpcode() == Instruction::IntToPtr &&
538 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
539 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
540 BytesLeft, DL);
541 }
542 }
543
544 // Otherwise, unknown initializer type.
545 return false;
546 }
547
FoldReinterpretLoadFromConstPtr(Constant * C,Type * LoadTy,const DataLayout & DL)548 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
549 const DataLayout &DL) {
550 // Bail out early. Not expect to load from scalable global variable.
551 if (isa<ScalableVectorType>(LoadTy))
552 return nullptr;
553
554 auto *PTy = cast<PointerType>(C->getType());
555 auto *IntType = dyn_cast<IntegerType>(LoadTy);
556
557 // If this isn't an integer load we can't fold it directly.
558 if (!IntType) {
559 unsigned AS = PTy->getAddressSpace();
560
561 // If this is a float/double load, we can try folding it as an int32/64 load
562 // and then bitcast the result. This can be useful for union cases. Note
563 // that address spaces don't matter here since we're not going to result in
564 // an actual new load.
565 Type *MapTy;
566 if (LoadTy->isHalfTy())
567 MapTy = Type::getInt16Ty(C->getContext());
568 else if (LoadTy->isFloatTy())
569 MapTy = Type::getInt32Ty(C->getContext());
570 else if (LoadTy->isDoubleTy())
571 MapTy = Type::getInt64Ty(C->getContext());
572 else if (LoadTy->isVectorTy()) {
573 MapTy = PointerType::getIntNTy(
574 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
575 } else
576 return nullptr;
577
578 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
579 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) {
580 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
581 !LoadTy->isX86_AMXTy())
582 // Materializing a zero can be done trivially without a bitcast
583 return Constant::getNullValue(LoadTy);
584 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
585 Res = FoldBitCast(Res, CastTy, DL);
586 if (LoadTy->isPtrOrPtrVectorTy()) {
587 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
588 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
589 !LoadTy->isX86_AMXTy())
590 return Constant::getNullValue(LoadTy);
591 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
592 // Be careful not to replace a load of an addrspace value with an inttoptr here
593 return nullptr;
594 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
595 }
596 return Res;
597 }
598 return nullptr;
599 }
600
601 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
602 if (BytesLoaded > 32 || BytesLoaded == 0)
603 return nullptr;
604
605 GlobalValue *GVal;
606 APInt OffsetAI;
607 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
608 return nullptr;
609
610 auto *GV = dyn_cast<GlobalVariable>(GVal);
611 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
612 !GV->getInitializer()->getType()->isSized())
613 return nullptr;
614
615 int64_t Offset = OffsetAI.getSExtValue();
616 int64_t InitializerSize =
617 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize();
618
619 // If we're not accessing anything in this constant, the result is undefined.
620 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
621 return UndefValue::get(IntType);
622
623 // If we're not accessing anything in this constant, the result is undefined.
624 if (Offset >= InitializerSize)
625 return UndefValue::get(IntType);
626
627 unsigned char RawBytes[32] = {0};
628 unsigned char *CurPtr = RawBytes;
629 unsigned BytesLeft = BytesLoaded;
630
631 // If we're loading off the beginning of the global, some bytes may be valid.
632 if (Offset < 0) {
633 CurPtr += -Offset;
634 BytesLeft += Offset;
635 Offset = 0;
636 }
637
638 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
639 return nullptr;
640
641 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
642 if (DL.isLittleEndian()) {
643 ResultVal = RawBytes[BytesLoaded - 1];
644 for (unsigned i = 1; i != BytesLoaded; ++i) {
645 ResultVal <<= 8;
646 ResultVal |= RawBytes[BytesLoaded - 1 - i];
647 }
648 } else {
649 ResultVal = RawBytes[0];
650 for (unsigned i = 1; i != BytesLoaded; ++i) {
651 ResultVal <<= 8;
652 ResultVal |= RawBytes[i];
653 }
654 }
655
656 return ConstantInt::get(IntType->getContext(), ResultVal);
657 }
658
ConstantFoldLoadThroughBitcastExpr(ConstantExpr * CE,Type * DestTy,const DataLayout & DL)659 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
660 const DataLayout &DL) {
661 auto *SrcPtr = CE->getOperand(0);
662 if (!SrcPtr->getType()->isPointerTy())
663 return nullptr;
664
665 return ConstantFoldLoadFromConstPtr(SrcPtr, DestTy, DL);
666 }
667
668 } // end anonymous namespace
669
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)670 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
671 const DataLayout &DL) {
672 // First, try the easy cases:
673 if (auto *GV = dyn_cast<GlobalVariable>(C))
674 if (GV->isConstant() && GV->hasDefinitiveInitializer())
675 return ConstantFoldLoadThroughBitcast(GV->getInitializer(), Ty, DL);
676
677 if (auto *GA = dyn_cast<GlobalAlias>(C))
678 if (GA->getAliasee() && !GA->isInterposable())
679 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
680
681 // If the loaded value isn't a constant expr, we can't handle it.
682 auto *CE = dyn_cast<ConstantExpr>(C);
683 if (!CE)
684 return nullptr;
685
686 if (CE->getOpcode() == Instruction::GetElementPtr) {
687 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
688 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
689 if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(
690 GV->getInitializer(), CE, Ty, DL))
691 return V;
692 }
693 }
694 }
695
696 if (CE->getOpcode() == Instruction::BitCast)
697 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
698 return LoadedC;
699
700 // Instead of loading constant c string, use corresponding integer value
701 // directly if string length is small enough.
702 StringRef Str;
703 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
704 size_t StrLen = Str.size();
705 unsigned NumBits = Ty->getPrimitiveSizeInBits();
706 // Replace load with immediate integer if the result is an integer or fp
707 // value.
708 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
709 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
710 APInt StrVal(NumBits, 0);
711 APInt SingleChar(NumBits, 0);
712 if (DL.isLittleEndian()) {
713 for (unsigned char C : reverse(Str.bytes())) {
714 SingleChar = static_cast<uint64_t>(C);
715 StrVal = (StrVal << 8) | SingleChar;
716 }
717 } else {
718 for (unsigned char C : Str.bytes()) {
719 SingleChar = static_cast<uint64_t>(C);
720 StrVal = (StrVal << 8) | SingleChar;
721 }
722 // Append NULL at the end.
723 SingleChar = 0;
724 StrVal = (StrVal << 8) | SingleChar;
725 }
726
727 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
728 if (Ty->isFloatingPointTy())
729 Res = ConstantExpr::getBitCast(Res, Ty);
730 return Res;
731 }
732 }
733
734 // If this load comes from anywhere in a constant global, and if the global
735 // is all undef or zero, we know what it loads.
736 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) {
737 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
738 if (GV->getInitializer()->isNullValue())
739 return Constant::getNullValue(Ty);
740 if (isa<UndefValue>(GV->getInitializer()))
741 return UndefValue::get(Ty);
742 }
743 }
744
745 // Try hard to fold loads from bitcasted strange and non-type-safe things.
746 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
747 }
748
749 namespace {
750
751 /// One of Op0/Op1 is a constant expression.
752 /// Attempt to symbolically evaluate the result of a binary operator merging
753 /// these together. If target data info is available, it is provided as DL,
754 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)755 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
756 const DataLayout &DL) {
757 // SROA
758
759 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
760 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
761 // bits.
762
763 if (Opc == Instruction::And) {
764 KnownBits Known0 = computeKnownBits(Op0, DL);
765 KnownBits Known1 = computeKnownBits(Op1, DL);
766 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
767 // All the bits of Op0 that the 'and' could be masking are already zero.
768 return Op0;
769 }
770 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
771 // All the bits of Op1 that the 'and' could be masking are already zero.
772 return Op1;
773 }
774
775 Known0 &= Known1;
776 if (Known0.isConstant())
777 return ConstantInt::get(Op0->getType(), Known0.getConstant());
778 }
779
780 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
781 // constant. This happens frequently when iterating over a global array.
782 if (Opc == Instruction::Sub) {
783 GlobalValue *GV1, *GV2;
784 APInt Offs1, Offs2;
785
786 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
787 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
788 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
789
790 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
791 // PtrToInt may change the bitwidth so we have convert to the right size
792 // first.
793 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
794 Offs2.zextOrTrunc(OpSize));
795 }
796 }
797
798 return nullptr;
799 }
800
801 /// If array indices are not pointer-sized integers, explicitly cast them so
802 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,Optional<unsigned> InRangeIndex,const DataLayout & DL,const TargetLibraryInfo * TLI)803 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
804 Type *ResultTy, Optional<unsigned> InRangeIndex,
805 const DataLayout &DL, const TargetLibraryInfo *TLI) {
806 Type *IntIdxTy = DL.getIndexType(ResultTy);
807 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
808
809 bool Any = false;
810 SmallVector<Constant*, 32> NewIdxs;
811 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
812 if ((i == 1 ||
813 !isa<StructType>(GetElementPtrInst::getIndexedType(
814 SrcElemTy, Ops.slice(1, i - 1)))) &&
815 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
816 Any = true;
817 Type *NewType = Ops[i]->getType()->isVectorTy()
818 ? IntIdxTy
819 : IntIdxScalarTy;
820 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
821 true,
822 NewType,
823 true),
824 Ops[i], NewType));
825 } else
826 NewIdxs.push_back(Ops[i]);
827 }
828
829 if (!Any)
830 return nullptr;
831
832 Constant *C = ConstantExpr::getGetElementPtr(
833 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
834 return ConstantFoldConstant(C, DL, TLI);
835 }
836
837 /// Strip the pointer casts, but preserve the address space information.
StripPtrCastKeepAS(Constant * Ptr,Type * & ElemTy)838 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
839 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
840 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
841 Ptr = cast<Constant>(Ptr->stripPointerCasts());
842 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
843
844 ElemTy = NewPtrTy->getPointerElementType();
845
846 // Preserve the address space number of the pointer.
847 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
848 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
849 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
850 }
851 return Ptr;
852 }
853
854 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)855 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
856 ArrayRef<Constant *> Ops,
857 const DataLayout &DL,
858 const TargetLibraryInfo *TLI) {
859 const GEPOperator *InnermostGEP = GEP;
860 bool InBounds = GEP->isInBounds();
861
862 Type *SrcElemTy = GEP->getSourceElementType();
863 Type *ResElemTy = GEP->getResultElementType();
864 Type *ResTy = GEP->getType();
865 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
866 return nullptr;
867
868 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
869 GEP->getInRangeIndex(), DL, TLI))
870 return C;
871
872 Constant *Ptr = Ops[0];
873 if (!Ptr->getType()->isPointerTy())
874 return nullptr;
875
876 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
877
878 // If this is a constant expr gep that is effectively computing an
879 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
880 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
881 if (!isa<ConstantInt>(Ops[i])) {
882
883 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
884 // "inttoptr (sub (ptrtoint Ptr), V)"
885 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
886 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
887 assert((!CE || CE->getType() == IntIdxTy) &&
888 "CastGEPIndices didn't canonicalize index types!");
889 if (CE && CE->getOpcode() == Instruction::Sub &&
890 CE->getOperand(0)->isNullValue()) {
891 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
892 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
893 Res = ConstantExpr::getIntToPtr(Res, ResTy);
894 return ConstantFoldConstant(Res, DL, TLI);
895 }
896 }
897 return nullptr;
898 }
899
900 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
901 APInt Offset =
902 APInt(BitWidth,
903 DL.getIndexedOffsetInType(
904 SrcElemTy,
905 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
906 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
907
908 // If this is a GEP of a GEP, fold it all into a single GEP.
909 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
910 InnermostGEP = GEP;
911 InBounds &= GEP->isInBounds();
912
913 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
914
915 // Do not try the incorporate the sub-GEP if some index is not a number.
916 bool AllConstantInt = true;
917 for (Value *NestedOp : NestedOps)
918 if (!isa<ConstantInt>(NestedOp)) {
919 AllConstantInt = false;
920 break;
921 }
922 if (!AllConstantInt)
923 break;
924
925 Ptr = cast<Constant>(GEP->getOperand(0));
926 SrcElemTy = GEP->getSourceElementType();
927 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
928 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
929 }
930
931 // If the base value for this address is a literal integer value, fold the
932 // getelementptr to the resulting integer value casted to the pointer type.
933 APInt BasePtr(BitWidth, 0);
934 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
935 if (CE->getOpcode() == Instruction::IntToPtr) {
936 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
937 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
938 }
939 }
940
941 auto *PTy = cast<PointerType>(Ptr->getType());
942 if ((Ptr->isNullValue() || BasePtr != 0) &&
943 !DL.isNonIntegralPointerType(PTy)) {
944 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
945 return ConstantExpr::getIntToPtr(C, ResTy);
946 }
947
948 // Otherwise form a regular getelementptr. Recompute the indices so that
949 // we eliminate over-indexing of the notional static type array bounds.
950 // This makes it easy to determine if the getelementptr is "inbounds".
951 // Also, this helps GlobalOpt do SROA on GlobalVariables.
952 Type *Ty = PTy;
953 SmallVector<Constant *, 32> NewIdxs;
954
955 do {
956 if (!Ty->isStructTy()) {
957 if (Ty->isPointerTy()) {
958 // The only pointer indexing we'll do is on the first index of the GEP.
959 if (!NewIdxs.empty())
960 break;
961
962 Ty = SrcElemTy;
963
964 // Only handle pointers to sized types, not pointers to functions.
965 if (!Ty->isSized())
966 return nullptr;
967 } else {
968 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
969 if (!NextTy)
970 break;
971 Ty = NextTy;
972 }
973
974 // Determine which element of the array the offset points into.
975 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
976 if (ElemSize == 0) {
977 // The element size is 0. This may be [0 x Ty]*, so just use a zero
978 // index for this level and proceed to the next level to see if it can
979 // accommodate the offset.
980 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
981 } else {
982 // The element size is non-zero divide the offset by the element
983 // size (rounding down), to compute the index at this level.
984 bool Overflow;
985 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
986 if (Overflow)
987 break;
988 Offset -= NewIdx * ElemSize;
989 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
990 }
991 } else {
992 auto *STy = cast<StructType>(Ty);
993 // If we end up with an offset that isn't valid for this struct type, we
994 // can't re-form this GEP in a regular form, so bail out. The pointer
995 // operand likely went through casts that are necessary to make the GEP
996 // sensible.
997 const StructLayout &SL = *DL.getStructLayout(STy);
998 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
999 break;
1000
1001 // Determine which field of the struct the offset points into. The
1002 // getZExtValue is fine as we've already ensured that the offset is
1003 // within the range representable by the StructLayout API.
1004 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
1005 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
1006 ElIdx));
1007 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
1008 Ty = STy->getTypeAtIndex(ElIdx);
1009 }
1010 } while (Ty != ResElemTy);
1011
1012 // If we haven't used up the entire offset by descending the static
1013 // type, then the offset is pointing into the middle of an indivisible
1014 // member, so we can't simplify it.
1015 if (Offset != 0)
1016 return nullptr;
1017
1018 // Preserve the inrange index from the innermost GEP if possible. We must
1019 // have calculated the same indices up to and including the inrange index.
1020 Optional<unsigned> InRangeIndex;
1021 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
1022 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
1023 NewIdxs.size() > *LastIRIndex) {
1024 InRangeIndex = LastIRIndex;
1025 for (unsigned I = 0; I <= *LastIRIndex; ++I)
1026 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
1027 return nullptr;
1028 }
1029
1030 // Create a GEP.
1031 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1032 InBounds, InRangeIndex);
1033 assert(C->getType()->getPointerElementType() == Ty &&
1034 "Computed GetElementPtr has unexpected type!");
1035
1036 // If we ended up indexing a member with a type that doesn't match
1037 // the type of what the original indices indexed, add a cast.
1038 if (Ty != ResElemTy)
1039 C = FoldBitCast(C, ResTy, DL);
1040
1041 return C;
1042 }
1043
1044 /// Attempt to constant fold an instruction with the
1045 /// specified opcode and operands. If successful, the constant result is
1046 /// returned, if not, null is returned. Note that this function can fail when
1047 /// attempting to fold instructions like loads and stores, which have no
1048 /// constant expression form.
ConstantFoldInstOperandsImpl(const Value * InstOrCE,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1049 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1050 ArrayRef<Constant *> Ops,
1051 const DataLayout &DL,
1052 const TargetLibraryInfo *TLI) {
1053 Type *DestTy = InstOrCE->getType();
1054
1055 if (Instruction::isUnaryOp(Opcode))
1056 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1057
1058 if (Instruction::isBinaryOp(Opcode))
1059 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1060
1061 if (Instruction::isCast(Opcode))
1062 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1063
1064 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1065 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1066 return C;
1067
1068 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1069 Ops.slice(1), GEP->isInBounds(),
1070 GEP->getInRangeIndex());
1071 }
1072
1073 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1074 return CE->getWithOperands(Ops);
1075
1076 switch (Opcode) {
1077 default: return nullptr;
1078 case Instruction::ICmp:
1079 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1080 case Instruction::Freeze:
1081 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1082 case Instruction::Call:
1083 if (auto *F = dyn_cast<Function>(Ops.back())) {
1084 const auto *Call = cast<CallBase>(InstOrCE);
1085 if (canConstantFoldCallTo(Call, F))
1086 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1087 }
1088 return nullptr;
1089 case Instruction::Select:
1090 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1091 case Instruction::ExtractElement:
1092 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1093 case Instruction::ExtractValue:
1094 return ConstantExpr::getExtractValue(
1095 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1096 case Instruction::InsertElement:
1097 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1098 case Instruction::ShuffleVector:
1099 return ConstantExpr::getShuffleVector(
1100 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1101 }
1102 }
1103
1104 } // end anonymous namespace
1105
1106 //===----------------------------------------------------------------------===//
1107 // Constant Folding public APIs
1108 //===----------------------------------------------------------------------===//
1109
1110 namespace {
1111
1112 Constant *
ConstantFoldConstantImpl(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallDenseMap<Constant *,Constant * > & FoldedOps)1113 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1114 const TargetLibraryInfo *TLI,
1115 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1116 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1117 return const_cast<Constant *>(C);
1118
1119 SmallVector<Constant *, 8> Ops;
1120 for (const Use &OldU : C->operands()) {
1121 Constant *OldC = cast<Constant>(&OldU);
1122 Constant *NewC = OldC;
1123 // Recursively fold the ConstantExpr's operands. If we have already folded
1124 // a ConstantExpr, we don't have to process it again.
1125 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1126 auto It = FoldedOps.find(OldC);
1127 if (It == FoldedOps.end()) {
1128 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1129 FoldedOps.insert({OldC, NewC});
1130 } else {
1131 NewC = It->second;
1132 }
1133 }
1134 Ops.push_back(NewC);
1135 }
1136
1137 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1138 if (CE->isCompare())
1139 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1140 DL, TLI);
1141
1142 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1143 }
1144
1145 assert(isa<ConstantVector>(C));
1146 return ConstantVector::get(Ops);
1147 }
1148
1149 } // end anonymous namespace
1150
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)1151 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1152 const TargetLibraryInfo *TLI) {
1153 // Handle PHI nodes quickly here...
1154 if (auto *PN = dyn_cast<PHINode>(I)) {
1155 Constant *CommonValue = nullptr;
1156
1157 SmallDenseMap<Constant *, Constant *> FoldedOps;
1158 for (Value *Incoming : PN->incoming_values()) {
1159 // If the incoming value is undef then skip it. Note that while we could
1160 // skip the value if it is equal to the phi node itself we choose not to
1161 // because that would break the rule that constant folding only applies if
1162 // all operands are constants.
1163 if (isa<UndefValue>(Incoming))
1164 continue;
1165 // If the incoming value is not a constant, then give up.
1166 auto *C = dyn_cast<Constant>(Incoming);
1167 if (!C)
1168 return nullptr;
1169 // Fold the PHI's operands.
1170 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1171 // If the incoming value is a different constant to
1172 // the one we saw previously, then give up.
1173 if (CommonValue && C != CommonValue)
1174 return nullptr;
1175 CommonValue = C;
1176 }
1177
1178 // If we reach here, all incoming values are the same constant or undef.
1179 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1180 }
1181
1182 // Scan the operand list, checking to see if they are all constants, if so,
1183 // hand off to ConstantFoldInstOperandsImpl.
1184 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1185 return nullptr;
1186
1187 SmallDenseMap<Constant *, Constant *> FoldedOps;
1188 SmallVector<Constant *, 8> Ops;
1189 for (const Use &OpU : I->operands()) {
1190 auto *Op = cast<Constant>(&OpU);
1191 // Fold the Instruction's operands.
1192 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1193 Ops.push_back(Op);
1194 }
1195
1196 if (const auto *CI = dyn_cast<CmpInst>(I))
1197 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1198 DL, TLI);
1199
1200 if (const auto *LI = dyn_cast<LoadInst>(I)) {
1201 if (LI->isVolatile())
1202 return nullptr;
1203 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1204 }
1205
1206 if (auto *IVI = dyn_cast<InsertValueInst>(I))
1207 return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices());
1208
1209 if (auto *EVI = dyn_cast<ExtractValueInst>(I))
1210 return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices());
1211
1212 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1213 }
1214
ConstantFoldConstant(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI)1215 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1216 const TargetLibraryInfo *TLI) {
1217 SmallDenseMap<Constant *, Constant *> FoldedOps;
1218 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1219 }
1220
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1221 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1222 ArrayRef<Constant *> Ops,
1223 const DataLayout &DL,
1224 const TargetLibraryInfo *TLI) {
1225 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1226 }
1227
ConstantFoldCompareInstOperands(unsigned Predicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI)1228 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1229 Constant *Ops0, Constant *Ops1,
1230 const DataLayout &DL,
1231 const TargetLibraryInfo *TLI) {
1232 // fold: icmp (inttoptr x), null -> icmp x, 0
1233 // fold: icmp null, (inttoptr x) -> icmp 0, x
1234 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1235 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1236 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1237 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1238 //
1239 // FIXME: The following comment is out of data and the DataLayout is here now.
1240 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1241 // around to know if bit truncation is happening.
1242 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1243 if (Ops1->isNullValue()) {
1244 if (CE0->getOpcode() == Instruction::IntToPtr) {
1245 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1246 // Convert the integer value to the right size to ensure we get the
1247 // proper extension or truncation.
1248 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1249 IntPtrTy, false);
1250 Constant *Null = Constant::getNullValue(C->getType());
1251 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1252 }
1253
1254 // Only do this transformation if the int is intptrty in size, otherwise
1255 // there is a truncation or extension that we aren't modeling.
1256 if (CE0->getOpcode() == Instruction::PtrToInt) {
1257 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1258 if (CE0->getType() == IntPtrTy) {
1259 Constant *C = CE0->getOperand(0);
1260 Constant *Null = Constant::getNullValue(C->getType());
1261 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1262 }
1263 }
1264 }
1265
1266 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1267 if (CE0->getOpcode() == CE1->getOpcode()) {
1268 if (CE0->getOpcode() == Instruction::IntToPtr) {
1269 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1270
1271 // Convert the integer value to the right size to ensure we get the
1272 // proper extension or truncation.
1273 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1274 IntPtrTy, false);
1275 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1276 IntPtrTy, false);
1277 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1278 }
1279
1280 // Only do this transformation if the int is intptrty in size, otherwise
1281 // there is a truncation or extension that we aren't modeling.
1282 if (CE0->getOpcode() == Instruction::PtrToInt) {
1283 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1284 if (CE0->getType() == IntPtrTy &&
1285 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1286 return ConstantFoldCompareInstOperands(
1287 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1288 }
1289 }
1290 }
1291 }
1292
1293 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1294 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1295 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1296 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1297 Constant *LHS = ConstantFoldCompareInstOperands(
1298 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1299 Constant *RHS = ConstantFoldCompareInstOperands(
1300 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1301 unsigned OpC =
1302 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1303 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1304 }
1305 } else if (isa<ConstantExpr>(Ops1)) {
1306 // If RHS is a constant expression, but the left side isn't, swap the
1307 // operands and try again.
1308 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1309 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1310 }
1311
1312 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1313 }
1314
ConstantFoldUnaryOpOperand(unsigned Opcode,Constant * Op,const DataLayout & DL)1315 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1316 const DataLayout &DL) {
1317 assert(Instruction::isUnaryOp(Opcode));
1318
1319 return ConstantExpr::get(Opcode, Op);
1320 }
1321
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1322 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1323 Constant *RHS,
1324 const DataLayout &DL) {
1325 assert(Instruction::isBinaryOp(Opcode));
1326 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1327 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1328 return C;
1329
1330 return ConstantExpr::get(Opcode, LHS, RHS);
1331 }
1332
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1333 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1334 Type *DestTy, const DataLayout &DL) {
1335 assert(Instruction::isCast(Opcode));
1336 switch (Opcode) {
1337 default:
1338 llvm_unreachable("Missing case");
1339 case Instruction::PtrToInt:
1340 // If the input is a inttoptr, eliminate the pair. This requires knowing
1341 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1342 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1343 if (CE->getOpcode() == Instruction::IntToPtr) {
1344 Constant *Input = CE->getOperand(0);
1345 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1346 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1347 if (PtrWidth < InWidth) {
1348 Constant *Mask =
1349 ConstantInt::get(CE->getContext(),
1350 APInt::getLowBitsSet(InWidth, PtrWidth));
1351 Input = ConstantExpr::getAnd(Input, Mask);
1352 }
1353 // Do a zext or trunc to get to the dest size.
1354 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1355 }
1356 }
1357 return ConstantExpr::getCast(Opcode, C, DestTy);
1358 case Instruction::IntToPtr:
1359 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1360 // the int size is >= the ptr size and the address spaces are the same.
1361 // This requires knowing the width of a pointer, so it can't be done in
1362 // ConstantExpr::getCast.
1363 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1364 if (CE->getOpcode() == Instruction::PtrToInt) {
1365 Constant *SrcPtr = CE->getOperand(0);
1366 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1367 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1368
1369 if (MidIntSize >= SrcPtrSize) {
1370 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1371 if (SrcAS == DestTy->getPointerAddressSpace())
1372 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1373 }
1374 }
1375 }
1376
1377 return ConstantExpr::getCast(Opcode, C, DestTy);
1378 case Instruction::Trunc:
1379 case Instruction::ZExt:
1380 case Instruction::SExt:
1381 case Instruction::FPTrunc:
1382 case Instruction::FPExt:
1383 case Instruction::UIToFP:
1384 case Instruction::SIToFP:
1385 case Instruction::FPToUI:
1386 case Instruction::FPToSI:
1387 case Instruction::AddrSpaceCast:
1388 return ConstantExpr::getCast(Opcode, C, DestTy);
1389 case Instruction::BitCast:
1390 return FoldBitCast(C, DestTy, DL);
1391 }
1392 }
1393
ConstantFoldLoadThroughGEPConstantExpr(Constant * C,ConstantExpr * CE,Type * Ty,const DataLayout & DL)1394 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1395 ConstantExpr *CE,
1396 Type *Ty,
1397 const DataLayout &DL) {
1398 if (!CE->getOperand(1)->isNullValue())
1399 return nullptr; // Do not allow stepping over the value!
1400
1401 // Loop over all of the operands, tracking down which value we are
1402 // addressing.
1403 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1404 C = C->getAggregateElement(CE->getOperand(i));
1405 if (!C)
1406 return nullptr;
1407 }
1408 return ConstantFoldLoadThroughBitcast(C, Ty, DL);
1409 }
1410
1411 Constant *
ConstantFoldLoadThroughGEPIndices(Constant * C,ArrayRef<Constant * > Indices)1412 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1413 ArrayRef<Constant *> Indices) {
1414 // Loop over all of the operands, tracking down which value we are
1415 // addressing.
1416 for (Constant *Index : Indices) {
1417 C = C->getAggregateElement(Index);
1418 if (!C)
1419 return nullptr;
1420 }
1421 return C;
1422 }
1423
1424 //===----------------------------------------------------------------------===//
1425 // Constant Folding for Calls
1426 //
1427
canConstantFoldCallTo(const CallBase * Call,const Function * F)1428 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1429 if (Call->isNoBuiltin())
1430 return false;
1431 switch (F->getIntrinsicID()) {
1432 // Operations that do not operate floating-point numbers and do not depend on
1433 // FP environment can be folded even in strictfp functions.
1434 case Intrinsic::bswap:
1435 case Intrinsic::ctpop:
1436 case Intrinsic::ctlz:
1437 case Intrinsic::cttz:
1438 case Intrinsic::fshl:
1439 case Intrinsic::fshr:
1440 case Intrinsic::launder_invariant_group:
1441 case Intrinsic::strip_invariant_group:
1442 case Intrinsic::masked_load:
1443 case Intrinsic::get_active_lane_mask:
1444 case Intrinsic::abs:
1445 case Intrinsic::smax:
1446 case Intrinsic::smin:
1447 case Intrinsic::umax:
1448 case Intrinsic::umin:
1449 case Intrinsic::sadd_with_overflow:
1450 case Intrinsic::uadd_with_overflow:
1451 case Intrinsic::ssub_with_overflow:
1452 case Intrinsic::usub_with_overflow:
1453 case Intrinsic::smul_with_overflow:
1454 case Intrinsic::umul_with_overflow:
1455 case Intrinsic::sadd_sat:
1456 case Intrinsic::uadd_sat:
1457 case Intrinsic::ssub_sat:
1458 case Intrinsic::usub_sat:
1459 case Intrinsic::smul_fix:
1460 case Intrinsic::smul_fix_sat:
1461 case Intrinsic::bitreverse:
1462 case Intrinsic::is_constant:
1463 case Intrinsic::vector_reduce_add:
1464 case Intrinsic::vector_reduce_mul:
1465 case Intrinsic::vector_reduce_and:
1466 case Intrinsic::vector_reduce_or:
1467 case Intrinsic::vector_reduce_xor:
1468 case Intrinsic::vector_reduce_smin:
1469 case Intrinsic::vector_reduce_smax:
1470 case Intrinsic::vector_reduce_umin:
1471 case Intrinsic::vector_reduce_umax:
1472 // Target intrinsics
1473 case Intrinsic::amdgcn_perm:
1474 case Intrinsic::arm_mve_vctp8:
1475 case Intrinsic::arm_mve_vctp16:
1476 case Intrinsic::arm_mve_vctp32:
1477 case Intrinsic::arm_mve_vctp64:
1478 case Intrinsic::aarch64_sve_convert_from_svbool:
1479 // WebAssembly float semantics are always known
1480 case Intrinsic::wasm_trunc_signed:
1481 case Intrinsic::wasm_trunc_unsigned:
1482 return true;
1483
1484 // Floating point operations cannot be folded in strictfp functions in
1485 // general case. They can be folded if FP environment is known to compiler.
1486 case Intrinsic::minnum:
1487 case Intrinsic::maxnum:
1488 case Intrinsic::minimum:
1489 case Intrinsic::maximum:
1490 case Intrinsic::log:
1491 case Intrinsic::log2:
1492 case Intrinsic::log10:
1493 case Intrinsic::exp:
1494 case Intrinsic::exp2:
1495 case Intrinsic::sqrt:
1496 case Intrinsic::sin:
1497 case Intrinsic::cos:
1498 case Intrinsic::pow:
1499 case Intrinsic::powi:
1500 case Intrinsic::fma:
1501 case Intrinsic::fmuladd:
1502 case Intrinsic::fptoui_sat:
1503 case Intrinsic::fptosi_sat:
1504 case Intrinsic::convert_from_fp16:
1505 case Intrinsic::convert_to_fp16:
1506 case Intrinsic::amdgcn_cos:
1507 case Intrinsic::amdgcn_cubeid:
1508 case Intrinsic::amdgcn_cubema:
1509 case Intrinsic::amdgcn_cubesc:
1510 case Intrinsic::amdgcn_cubetc:
1511 case Intrinsic::amdgcn_fmul_legacy:
1512 case Intrinsic::amdgcn_fma_legacy:
1513 case Intrinsic::amdgcn_fract:
1514 case Intrinsic::amdgcn_ldexp:
1515 case Intrinsic::amdgcn_sin:
1516 // The intrinsics below depend on rounding mode in MXCSR.
1517 case Intrinsic::x86_sse_cvtss2si:
1518 case Intrinsic::x86_sse_cvtss2si64:
1519 case Intrinsic::x86_sse_cvttss2si:
1520 case Intrinsic::x86_sse_cvttss2si64:
1521 case Intrinsic::x86_sse2_cvtsd2si:
1522 case Intrinsic::x86_sse2_cvtsd2si64:
1523 case Intrinsic::x86_sse2_cvttsd2si:
1524 case Intrinsic::x86_sse2_cvttsd2si64:
1525 case Intrinsic::x86_avx512_vcvtss2si32:
1526 case Intrinsic::x86_avx512_vcvtss2si64:
1527 case Intrinsic::x86_avx512_cvttss2si:
1528 case Intrinsic::x86_avx512_cvttss2si64:
1529 case Intrinsic::x86_avx512_vcvtsd2si32:
1530 case Intrinsic::x86_avx512_vcvtsd2si64:
1531 case Intrinsic::x86_avx512_cvttsd2si:
1532 case Intrinsic::x86_avx512_cvttsd2si64:
1533 case Intrinsic::x86_avx512_vcvtss2usi32:
1534 case Intrinsic::x86_avx512_vcvtss2usi64:
1535 case Intrinsic::x86_avx512_cvttss2usi:
1536 case Intrinsic::x86_avx512_cvttss2usi64:
1537 case Intrinsic::x86_avx512_vcvtsd2usi32:
1538 case Intrinsic::x86_avx512_vcvtsd2usi64:
1539 case Intrinsic::x86_avx512_cvttsd2usi:
1540 case Intrinsic::x86_avx512_cvttsd2usi64:
1541 return !Call->isStrictFP();
1542
1543 // Sign operations are actually bitwise operations, they do not raise
1544 // exceptions even for SNANs.
1545 case Intrinsic::fabs:
1546 case Intrinsic::copysign:
1547 // Non-constrained variants of rounding operations means default FP
1548 // environment, they can be folded in any case.
1549 case Intrinsic::ceil:
1550 case Intrinsic::floor:
1551 case Intrinsic::round:
1552 case Intrinsic::roundeven:
1553 case Intrinsic::trunc:
1554 case Intrinsic::nearbyint:
1555 case Intrinsic::rint:
1556 // Constrained intrinsics can be folded if FP environment is known
1557 // to compiler.
1558 case Intrinsic::experimental_constrained_ceil:
1559 case Intrinsic::experimental_constrained_floor:
1560 case Intrinsic::experimental_constrained_round:
1561 case Intrinsic::experimental_constrained_roundeven:
1562 case Intrinsic::experimental_constrained_trunc:
1563 case Intrinsic::experimental_constrained_nearbyint:
1564 case Intrinsic::experimental_constrained_rint:
1565 return true;
1566 default:
1567 return false;
1568 case Intrinsic::not_intrinsic: break;
1569 }
1570
1571 if (!F->hasName() || Call->isStrictFP())
1572 return false;
1573
1574 // In these cases, the check of the length is required. We don't want to
1575 // return true for a name like "cos\0blah" which strcmp would return equal to
1576 // "cos", but has length 8.
1577 StringRef Name = F->getName();
1578 switch (Name[0]) {
1579 default:
1580 return false;
1581 case 'a':
1582 return Name == "acos" || Name == "acosf" ||
1583 Name == "asin" || Name == "asinf" ||
1584 Name == "atan" || Name == "atanf" ||
1585 Name == "atan2" || Name == "atan2f";
1586 case 'c':
1587 return Name == "ceil" || Name == "ceilf" ||
1588 Name == "cos" || Name == "cosf" ||
1589 Name == "cosh" || Name == "coshf";
1590 case 'e':
1591 return Name == "exp" || Name == "expf" ||
1592 Name == "exp2" || Name == "exp2f";
1593 case 'f':
1594 return Name == "fabs" || Name == "fabsf" ||
1595 Name == "floor" || Name == "floorf" ||
1596 Name == "fmod" || Name == "fmodf";
1597 case 'l':
1598 return Name == "log" || Name == "logf" ||
1599 Name == "log2" || Name == "log2f" ||
1600 Name == "log10" || Name == "log10f";
1601 case 'n':
1602 return Name == "nearbyint" || Name == "nearbyintf";
1603 case 'p':
1604 return Name == "pow" || Name == "powf";
1605 case 'r':
1606 return Name == "remainder" || Name == "remainderf" ||
1607 Name == "rint" || Name == "rintf" ||
1608 Name == "round" || Name == "roundf";
1609 case 's':
1610 return Name == "sin" || Name == "sinf" ||
1611 Name == "sinh" || Name == "sinhf" ||
1612 Name == "sqrt" || Name == "sqrtf";
1613 case 't':
1614 return Name == "tan" || Name == "tanf" ||
1615 Name == "tanh" || Name == "tanhf" ||
1616 Name == "trunc" || Name == "truncf";
1617 case '_':
1618 // Check for various function names that get used for the math functions
1619 // when the header files are preprocessed with the macro
1620 // __FINITE_MATH_ONLY__ enabled.
1621 // The '12' here is the length of the shortest name that can match.
1622 // We need to check the size before looking at Name[1] and Name[2]
1623 // so we may as well check a limit that will eliminate mismatches.
1624 if (Name.size() < 12 || Name[1] != '_')
1625 return false;
1626 switch (Name[2]) {
1627 default:
1628 return false;
1629 case 'a':
1630 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1631 Name == "__asin_finite" || Name == "__asinf_finite" ||
1632 Name == "__atan2_finite" || Name == "__atan2f_finite";
1633 case 'c':
1634 return Name == "__cosh_finite" || Name == "__coshf_finite";
1635 case 'e':
1636 return Name == "__exp_finite" || Name == "__expf_finite" ||
1637 Name == "__exp2_finite" || Name == "__exp2f_finite";
1638 case 'l':
1639 return Name == "__log_finite" || Name == "__logf_finite" ||
1640 Name == "__log10_finite" || Name == "__log10f_finite";
1641 case 'p':
1642 return Name == "__pow_finite" || Name == "__powf_finite";
1643 case 's':
1644 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1645 }
1646 }
1647 }
1648
1649 namespace {
1650
GetConstantFoldFPValue(double V,Type * Ty)1651 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1652 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1653 APFloat APF(V);
1654 bool unused;
1655 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1656 return ConstantFP::get(Ty->getContext(), APF);
1657 }
1658 if (Ty->isDoubleTy())
1659 return ConstantFP::get(Ty->getContext(), APFloat(V));
1660 llvm_unreachable("Can only constant fold half/float/double");
1661 }
1662
1663 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1664 inline void llvm_fenv_clearexcept() {
1665 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1666 feclearexcept(FE_ALL_EXCEPT);
1667 #endif
1668 errno = 0;
1669 }
1670
1671 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1672 inline bool llvm_fenv_testexcept() {
1673 int errno_val = errno;
1674 if (errno_val == ERANGE || errno_val == EDOM)
1675 return true;
1676 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1677 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1678 return true;
1679 #endif
1680 return false;
1681 }
1682
ConstantFoldFP(double (* NativeFP)(double),const APFloat & V,Type * Ty)1683 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1684 Type *Ty) {
1685 llvm_fenv_clearexcept();
1686 double Result = NativeFP(V.convertToDouble());
1687 if (llvm_fenv_testexcept()) {
1688 llvm_fenv_clearexcept();
1689 return nullptr;
1690 }
1691
1692 return GetConstantFoldFPValue(Result, Ty);
1693 }
1694
ConstantFoldBinaryFP(double (* NativeFP)(double,double),const APFloat & V,const APFloat & W,Type * Ty)1695 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1696 const APFloat &V, const APFloat &W, Type *Ty) {
1697 llvm_fenv_clearexcept();
1698 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1699 if (llvm_fenv_testexcept()) {
1700 llvm_fenv_clearexcept();
1701 return nullptr;
1702 }
1703
1704 return GetConstantFoldFPValue(Result, Ty);
1705 }
1706
constantFoldVectorReduce(Intrinsic::ID IID,Constant * Op)1707 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1708 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1709 if (!VT)
1710 return nullptr;
1711
1712 // This isn't strictly necessary, but handle the special/common case of zero:
1713 // all integer reductions of a zero input produce zero.
1714 if (isa<ConstantAggregateZero>(Op))
1715 return ConstantInt::get(VT->getElementType(), 0);
1716
1717 // This is the same as the underlying binops - poison propagates.
1718 if (isa<PoisonValue>(Op))
1719 return PoisonValue::get(VT->getElementType());
1720
1721 // TODO: Handle undef.
1722 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1723 return nullptr;
1724
1725 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1726 if (!EltC)
1727 return nullptr;
1728
1729 APInt Acc = EltC->getValue();
1730 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1731 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1732 return nullptr;
1733 const APInt &X = EltC->getValue();
1734 switch (IID) {
1735 case Intrinsic::vector_reduce_add:
1736 Acc = Acc + X;
1737 break;
1738 case Intrinsic::vector_reduce_mul:
1739 Acc = Acc * X;
1740 break;
1741 case Intrinsic::vector_reduce_and:
1742 Acc = Acc & X;
1743 break;
1744 case Intrinsic::vector_reduce_or:
1745 Acc = Acc | X;
1746 break;
1747 case Intrinsic::vector_reduce_xor:
1748 Acc = Acc ^ X;
1749 break;
1750 case Intrinsic::vector_reduce_smin:
1751 Acc = APIntOps::smin(Acc, X);
1752 break;
1753 case Intrinsic::vector_reduce_smax:
1754 Acc = APIntOps::smax(Acc, X);
1755 break;
1756 case Intrinsic::vector_reduce_umin:
1757 Acc = APIntOps::umin(Acc, X);
1758 break;
1759 case Intrinsic::vector_reduce_umax:
1760 Acc = APIntOps::umax(Acc, X);
1761 break;
1762 }
1763 }
1764
1765 return ConstantInt::get(Op->getContext(), Acc);
1766 }
1767
1768 /// Attempt to fold an SSE floating point to integer conversion of a constant
1769 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1770 /// used (toward nearest, ties to even). This matches the behavior of the
1771 /// non-truncating SSE instructions in the default rounding mode. The desired
1772 /// integer type Ty is used to select how many bits are available for the
1773 /// result. Returns null if the conversion cannot be performed, otherwise
1774 /// returns the Constant value resulting from the conversion.
ConstantFoldSSEConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty,bool IsSigned)1775 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1776 Type *Ty, bool IsSigned) {
1777 // All of these conversion intrinsics form an integer of at most 64bits.
1778 unsigned ResultWidth = Ty->getIntegerBitWidth();
1779 assert(ResultWidth <= 64 &&
1780 "Can only constant fold conversions to 64 and 32 bit ints");
1781
1782 uint64_t UIntVal;
1783 bool isExact = false;
1784 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1785 : APFloat::rmNearestTiesToEven;
1786 APFloat::opStatus status =
1787 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1788 IsSigned, mode, &isExact);
1789 if (status != APFloat::opOK &&
1790 (!roundTowardZero || status != APFloat::opInexact))
1791 return nullptr;
1792 return ConstantInt::get(Ty, UIntVal, IsSigned);
1793 }
1794
getValueAsDouble(ConstantFP * Op)1795 double getValueAsDouble(ConstantFP *Op) {
1796 Type *Ty = Op->getType();
1797
1798 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1799 return Op->getValueAPF().convertToDouble();
1800
1801 bool unused;
1802 APFloat APF = Op->getValueAPF();
1803 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1804 return APF.convertToDouble();
1805 }
1806
getConstIntOrUndef(Value * Op,const APInt * & C)1807 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1808 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1809 C = &CI->getValue();
1810 return true;
1811 }
1812 if (isa<UndefValue>(Op)) {
1813 C = nullptr;
1814 return true;
1815 }
1816 return false;
1817 }
1818
ConstantFoldScalarCall1(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)1819 static Constant *ConstantFoldScalarCall1(StringRef Name,
1820 Intrinsic::ID IntrinsicID,
1821 Type *Ty,
1822 ArrayRef<Constant *> Operands,
1823 const TargetLibraryInfo *TLI,
1824 const CallBase *Call) {
1825 assert(Operands.size() == 1 && "Wrong number of operands.");
1826
1827 if (IntrinsicID == Intrinsic::is_constant) {
1828 // We know we have a "Constant" argument. But we want to only
1829 // return true for manifest constants, not those that depend on
1830 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1831 if (Operands[0]->isManifestConstant())
1832 return ConstantInt::getTrue(Ty->getContext());
1833 return nullptr;
1834 }
1835 if (isa<UndefValue>(Operands[0])) {
1836 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1837 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1838 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1839 if (IntrinsicID == Intrinsic::cos ||
1840 IntrinsicID == Intrinsic::ctpop ||
1841 IntrinsicID == Intrinsic::fptoui_sat ||
1842 IntrinsicID == Intrinsic::fptosi_sat)
1843 return Constant::getNullValue(Ty);
1844 if (IntrinsicID == Intrinsic::bswap ||
1845 IntrinsicID == Intrinsic::bitreverse ||
1846 IntrinsicID == Intrinsic::launder_invariant_group ||
1847 IntrinsicID == Intrinsic::strip_invariant_group)
1848 return Operands[0];
1849 }
1850
1851 if (isa<ConstantPointerNull>(Operands[0])) {
1852 // launder(null) == null == strip(null) iff in addrspace 0
1853 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1854 IntrinsicID == Intrinsic::strip_invariant_group) {
1855 // If instruction is not yet put in a basic block (e.g. when cloning
1856 // a function during inlining), Call's caller may not be available.
1857 // So check Call's BB first before querying Call->getCaller.
1858 const Function *Caller =
1859 Call->getParent() ? Call->getCaller() : nullptr;
1860 if (Caller &&
1861 !NullPointerIsDefined(
1862 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1863 return Operands[0];
1864 }
1865 return nullptr;
1866 }
1867 }
1868
1869 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1870 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1871 APFloat Val(Op->getValueAPF());
1872
1873 bool lost = false;
1874 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1875
1876 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1877 }
1878
1879 APFloat U = Op->getValueAPF();
1880
1881 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
1882 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
1883 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
1884
1885 if (U.isNaN())
1886 return nullptr;
1887
1888 unsigned Width = Ty->getIntegerBitWidth();
1889 APSInt Int(Width, !Signed);
1890 bool IsExact = false;
1891 APFloat::opStatus Status =
1892 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1893
1894 if (Status == APFloat::opOK || Status == APFloat::opInexact)
1895 return ConstantInt::get(Ty, Int);
1896
1897 return nullptr;
1898 }
1899
1900 if (IntrinsicID == Intrinsic::fptoui_sat ||
1901 IntrinsicID == Intrinsic::fptosi_sat) {
1902 // convertToInteger() already has the desired saturation semantics.
1903 APSInt Int(Ty->getIntegerBitWidth(),
1904 IntrinsicID == Intrinsic::fptoui_sat);
1905 bool IsExact;
1906 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1907 return ConstantInt::get(Ty, Int);
1908 }
1909
1910 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1911 return nullptr;
1912
1913 // Use internal versions of these intrinsics.
1914
1915 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1916 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1917 return ConstantFP::get(Ty->getContext(), U);
1918 }
1919
1920 if (IntrinsicID == Intrinsic::round) {
1921 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1922 return ConstantFP::get(Ty->getContext(), U);
1923 }
1924
1925 if (IntrinsicID == Intrinsic::roundeven) {
1926 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1927 return ConstantFP::get(Ty->getContext(), U);
1928 }
1929
1930 if (IntrinsicID == Intrinsic::ceil) {
1931 U.roundToIntegral(APFloat::rmTowardPositive);
1932 return ConstantFP::get(Ty->getContext(), U);
1933 }
1934
1935 if (IntrinsicID == Intrinsic::floor) {
1936 U.roundToIntegral(APFloat::rmTowardNegative);
1937 return ConstantFP::get(Ty->getContext(), U);
1938 }
1939
1940 if (IntrinsicID == Intrinsic::trunc) {
1941 U.roundToIntegral(APFloat::rmTowardZero);
1942 return ConstantFP::get(Ty->getContext(), U);
1943 }
1944
1945 if (IntrinsicID == Intrinsic::fabs) {
1946 U.clearSign();
1947 return ConstantFP::get(Ty->getContext(), U);
1948 }
1949
1950 if (IntrinsicID == Intrinsic::amdgcn_fract) {
1951 // The v_fract instruction behaves like the OpenCL spec, which defines
1952 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
1953 // there to prevent fract(-small) from returning 1.0. It returns the
1954 // largest positive floating-point number less than 1.0."
1955 APFloat FloorU(U);
1956 FloorU.roundToIntegral(APFloat::rmTowardNegative);
1957 APFloat FractU(U - FloorU);
1958 APFloat AlmostOne(U.getSemantics(), 1);
1959 AlmostOne.next(/*nextDown*/ true);
1960 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
1961 }
1962
1963 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
1964 // raise FP exceptions, unless the argument is signaling NaN.
1965
1966 Optional<APFloat::roundingMode> RM;
1967 switch (IntrinsicID) {
1968 default:
1969 break;
1970 case Intrinsic::experimental_constrained_nearbyint:
1971 case Intrinsic::experimental_constrained_rint: {
1972 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1973 RM = CI->getRoundingMode();
1974 if (!RM || RM.getValue() == RoundingMode::Dynamic)
1975 return nullptr;
1976 break;
1977 }
1978 case Intrinsic::experimental_constrained_round:
1979 RM = APFloat::rmNearestTiesToAway;
1980 break;
1981 case Intrinsic::experimental_constrained_ceil:
1982 RM = APFloat::rmTowardPositive;
1983 break;
1984 case Intrinsic::experimental_constrained_floor:
1985 RM = APFloat::rmTowardNegative;
1986 break;
1987 case Intrinsic::experimental_constrained_trunc:
1988 RM = APFloat::rmTowardZero;
1989 break;
1990 }
1991 if (RM) {
1992 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1993 if (U.isFinite()) {
1994 APFloat::opStatus St = U.roundToIntegral(*RM);
1995 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
1996 St == APFloat::opInexact) {
1997 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1998 if (EB && *EB == fp::ebStrict)
1999 return nullptr;
2000 }
2001 } else if (U.isSignaling()) {
2002 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2003 if (EB && *EB != fp::ebIgnore)
2004 return nullptr;
2005 U = APFloat::getQNaN(U.getSemantics());
2006 }
2007 return ConstantFP::get(Ty->getContext(), U);
2008 }
2009
2010 /// We only fold functions with finite arguments. Folding NaN and inf is
2011 /// likely to be aborted with an exception anyway, and some host libms
2012 /// have known errors raising exceptions.
2013 if (!U.isFinite())
2014 return nullptr;
2015
2016 /// Currently APFloat versions of these functions do not exist, so we use
2017 /// the host native double versions. Float versions are not called
2018 /// directly but for all these it is true (float)(f((double)arg)) ==
2019 /// f(arg). Long double not supported yet.
2020 APFloat APF = Op->getValueAPF();
2021
2022 switch (IntrinsicID) {
2023 default: break;
2024 case Intrinsic::log:
2025 return ConstantFoldFP(log, APF, Ty);
2026 case Intrinsic::log2:
2027 // TODO: What about hosts that lack a C99 library?
2028 return ConstantFoldFP(Log2, APF, Ty);
2029 case Intrinsic::log10:
2030 // TODO: What about hosts that lack a C99 library?
2031 return ConstantFoldFP(log10, APF, Ty);
2032 case Intrinsic::exp:
2033 return ConstantFoldFP(exp, APF, Ty);
2034 case Intrinsic::exp2:
2035 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2036 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2037 case Intrinsic::sin:
2038 return ConstantFoldFP(sin, APF, Ty);
2039 case Intrinsic::cos:
2040 return ConstantFoldFP(cos, APF, Ty);
2041 case Intrinsic::sqrt:
2042 return ConstantFoldFP(sqrt, APF, Ty);
2043 case Intrinsic::amdgcn_cos:
2044 case Intrinsic::amdgcn_sin: {
2045 double V = getValueAsDouble(Op);
2046 if (V < -256.0 || V > 256.0)
2047 // The gfx8 and gfx9 architectures handle arguments outside the range
2048 // [-256, 256] differently. This should be a rare case so bail out
2049 // rather than trying to handle the difference.
2050 return nullptr;
2051 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2052 double V4 = V * 4.0;
2053 if (V4 == floor(V4)) {
2054 // Force exact results for quarter-integer inputs.
2055 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2056 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2057 } else {
2058 if (IsCos)
2059 V = cos(V * 2.0 * numbers::pi);
2060 else
2061 V = sin(V * 2.0 * numbers::pi);
2062 }
2063 return GetConstantFoldFPValue(V, Ty);
2064 }
2065 }
2066
2067 if (!TLI)
2068 return nullptr;
2069
2070 LibFunc Func = NotLibFunc;
2071 TLI->getLibFunc(Name, Func);
2072 switch (Func) {
2073 default:
2074 break;
2075 case LibFunc_acos:
2076 case LibFunc_acosf:
2077 case LibFunc_acos_finite:
2078 case LibFunc_acosf_finite:
2079 if (TLI->has(Func))
2080 return ConstantFoldFP(acos, APF, Ty);
2081 break;
2082 case LibFunc_asin:
2083 case LibFunc_asinf:
2084 case LibFunc_asin_finite:
2085 case LibFunc_asinf_finite:
2086 if (TLI->has(Func))
2087 return ConstantFoldFP(asin, APF, Ty);
2088 break;
2089 case LibFunc_atan:
2090 case LibFunc_atanf:
2091 if (TLI->has(Func))
2092 return ConstantFoldFP(atan, APF, Ty);
2093 break;
2094 case LibFunc_ceil:
2095 case LibFunc_ceilf:
2096 if (TLI->has(Func)) {
2097 U.roundToIntegral(APFloat::rmTowardPositive);
2098 return ConstantFP::get(Ty->getContext(), U);
2099 }
2100 break;
2101 case LibFunc_cos:
2102 case LibFunc_cosf:
2103 if (TLI->has(Func))
2104 return ConstantFoldFP(cos, APF, Ty);
2105 break;
2106 case LibFunc_cosh:
2107 case LibFunc_coshf:
2108 case LibFunc_cosh_finite:
2109 case LibFunc_coshf_finite:
2110 if (TLI->has(Func))
2111 return ConstantFoldFP(cosh, APF, Ty);
2112 break;
2113 case LibFunc_exp:
2114 case LibFunc_expf:
2115 case LibFunc_exp_finite:
2116 case LibFunc_expf_finite:
2117 if (TLI->has(Func))
2118 return ConstantFoldFP(exp, APF, Ty);
2119 break;
2120 case LibFunc_exp2:
2121 case LibFunc_exp2f:
2122 case LibFunc_exp2_finite:
2123 case LibFunc_exp2f_finite:
2124 if (TLI->has(Func))
2125 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2126 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2127 break;
2128 case LibFunc_fabs:
2129 case LibFunc_fabsf:
2130 if (TLI->has(Func)) {
2131 U.clearSign();
2132 return ConstantFP::get(Ty->getContext(), U);
2133 }
2134 break;
2135 case LibFunc_floor:
2136 case LibFunc_floorf:
2137 if (TLI->has(Func)) {
2138 U.roundToIntegral(APFloat::rmTowardNegative);
2139 return ConstantFP::get(Ty->getContext(), U);
2140 }
2141 break;
2142 case LibFunc_log:
2143 case LibFunc_logf:
2144 case LibFunc_log_finite:
2145 case LibFunc_logf_finite:
2146 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2147 return ConstantFoldFP(log, APF, Ty);
2148 break;
2149 case LibFunc_log2:
2150 case LibFunc_log2f:
2151 case LibFunc_log2_finite:
2152 case LibFunc_log2f_finite:
2153 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2154 // TODO: What about hosts that lack a C99 library?
2155 return ConstantFoldFP(Log2, APF, Ty);
2156 break;
2157 case LibFunc_log10:
2158 case LibFunc_log10f:
2159 case LibFunc_log10_finite:
2160 case LibFunc_log10f_finite:
2161 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2162 // TODO: What about hosts that lack a C99 library?
2163 return ConstantFoldFP(log10, APF, Ty);
2164 break;
2165 case LibFunc_nearbyint:
2166 case LibFunc_nearbyintf:
2167 case LibFunc_rint:
2168 case LibFunc_rintf:
2169 if (TLI->has(Func)) {
2170 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2171 return ConstantFP::get(Ty->getContext(), U);
2172 }
2173 break;
2174 case LibFunc_round:
2175 case LibFunc_roundf:
2176 if (TLI->has(Func)) {
2177 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2178 return ConstantFP::get(Ty->getContext(), U);
2179 }
2180 break;
2181 case LibFunc_sin:
2182 case LibFunc_sinf:
2183 if (TLI->has(Func))
2184 return ConstantFoldFP(sin, APF, Ty);
2185 break;
2186 case LibFunc_sinh:
2187 case LibFunc_sinhf:
2188 case LibFunc_sinh_finite:
2189 case LibFunc_sinhf_finite:
2190 if (TLI->has(Func))
2191 return ConstantFoldFP(sinh, APF, Ty);
2192 break;
2193 case LibFunc_sqrt:
2194 case LibFunc_sqrtf:
2195 if (!APF.isNegative() && TLI->has(Func))
2196 return ConstantFoldFP(sqrt, APF, Ty);
2197 break;
2198 case LibFunc_tan:
2199 case LibFunc_tanf:
2200 if (TLI->has(Func))
2201 return ConstantFoldFP(tan, APF, Ty);
2202 break;
2203 case LibFunc_tanh:
2204 case LibFunc_tanhf:
2205 if (TLI->has(Func))
2206 return ConstantFoldFP(tanh, APF, Ty);
2207 break;
2208 case LibFunc_trunc:
2209 case LibFunc_truncf:
2210 if (TLI->has(Func)) {
2211 U.roundToIntegral(APFloat::rmTowardZero);
2212 return ConstantFP::get(Ty->getContext(), U);
2213 }
2214 break;
2215 }
2216 return nullptr;
2217 }
2218
2219 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2220 switch (IntrinsicID) {
2221 case Intrinsic::bswap:
2222 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2223 case Intrinsic::ctpop:
2224 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2225 case Intrinsic::bitreverse:
2226 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2227 case Intrinsic::convert_from_fp16: {
2228 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2229
2230 bool lost = false;
2231 APFloat::opStatus status = Val.convert(
2232 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2233
2234 // Conversion is always precise.
2235 (void)status;
2236 assert(status == APFloat::opOK && !lost &&
2237 "Precision lost during fp16 constfolding");
2238
2239 return ConstantFP::get(Ty->getContext(), Val);
2240 }
2241 default:
2242 return nullptr;
2243 }
2244 }
2245
2246 switch (IntrinsicID) {
2247 default: break;
2248 case Intrinsic::vector_reduce_add:
2249 case Intrinsic::vector_reduce_mul:
2250 case Intrinsic::vector_reduce_and:
2251 case Intrinsic::vector_reduce_or:
2252 case Intrinsic::vector_reduce_xor:
2253 case Intrinsic::vector_reduce_smin:
2254 case Intrinsic::vector_reduce_smax:
2255 case Intrinsic::vector_reduce_umin:
2256 case Intrinsic::vector_reduce_umax:
2257 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2258 return C;
2259 break;
2260 }
2261
2262 // Support ConstantVector in case we have an Undef in the top.
2263 if (isa<ConstantVector>(Operands[0]) ||
2264 isa<ConstantDataVector>(Operands[0])) {
2265 auto *Op = cast<Constant>(Operands[0]);
2266 switch (IntrinsicID) {
2267 default: break;
2268 case Intrinsic::x86_sse_cvtss2si:
2269 case Intrinsic::x86_sse_cvtss2si64:
2270 case Intrinsic::x86_sse2_cvtsd2si:
2271 case Intrinsic::x86_sse2_cvtsd2si64:
2272 if (ConstantFP *FPOp =
2273 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2274 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2275 /*roundTowardZero=*/false, Ty,
2276 /*IsSigned*/true);
2277 break;
2278 case Intrinsic::x86_sse_cvttss2si:
2279 case Intrinsic::x86_sse_cvttss2si64:
2280 case Intrinsic::x86_sse2_cvttsd2si:
2281 case Intrinsic::x86_sse2_cvttsd2si64:
2282 if (ConstantFP *FPOp =
2283 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2284 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2285 /*roundTowardZero=*/true, Ty,
2286 /*IsSigned*/true);
2287 break;
2288 }
2289 }
2290
2291 return nullptr;
2292 }
2293
ConstantFoldScalarCall2(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2294 static Constant *ConstantFoldScalarCall2(StringRef Name,
2295 Intrinsic::ID IntrinsicID,
2296 Type *Ty,
2297 ArrayRef<Constant *> Operands,
2298 const TargetLibraryInfo *TLI,
2299 const CallBase *Call) {
2300 assert(Operands.size() == 2 && "Wrong number of operands.");
2301
2302 if (Ty->isFloatingPointTy()) {
2303 // TODO: We should have undef handling for all of the FP intrinsics that
2304 // are attempted to be folded in this function.
2305 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2306 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2307 switch (IntrinsicID) {
2308 case Intrinsic::maxnum:
2309 case Intrinsic::minnum:
2310 case Intrinsic::maximum:
2311 case Intrinsic::minimum:
2312 // If one argument is undef, return the other argument.
2313 if (IsOp0Undef)
2314 return Operands[1];
2315 if (IsOp1Undef)
2316 return Operands[0];
2317 break;
2318 }
2319 }
2320
2321 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2322 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2323 return nullptr;
2324 APFloat Op1V = Op1->getValueAPF();
2325
2326 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2327 if (Op2->getType() != Op1->getType())
2328 return nullptr;
2329 APFloat Op2V = Op2->getValueAPF();
2330
2331 switch (IntrinsicID) {
2332 default:
2333 break;
2334 case Intrinsic::pow:
2335 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2336 case Intrinsic::copysign:
2337 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2338 case Intrinsic::minnum:
2339 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2340 case Intrinsic::maxnum:
2341 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2342 case Intrinsic::minimum:
2343 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2344 case Intrinsic::maximum:
2345 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2346 case Intrinsic::amdgcn_fmul_legacy:
2347 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2348 // NaN or infinity, gives +0.0.
2349 if (Op1V.isZero() || Op2V.isZero())
2350 return ConstantFP::getNullValue(Ty);
2351 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2352 }
2353
2354 if (!TLI)
2355 return nullptr;
2356
2357 LibFunc Func = NotLibFunc;
2358 TLI->getLibFunc(Name, Func);
2359 switch (Func) {
2360 default:
2361 break;
2362 case LibFunc_pow:
2363 case LibFunc_powf:
2364 case LibFunc_pow_finite:
2365 case LibFunc_powf_finite:
2366 if (TLI->has(Func))
2367 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2368 break;
2369 case LibFunc_fmod:
2370 case LibFunc_fmodf:
2371 if (TLI->has(Func)) {
2372 APFloat V = Op1->getValueAPF();
2373 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2374 return ConstantFP::get(Ty->getContext(), V);
2375 }
2376 break;
2377 case LibFunc_remainder:
2378 case LibFunc_remainderf:
2379 if (TLI->has(Func)) {
2380 APFloat V = Op1->getValueAPF();
2381 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2382 return ConstantFP::get(Ty->getContext(), V);
2383 }
2384 break;
2385 case LibFunc_atan2:
2386 case LibFunc_atan2f:
2387 case LibFunc_atan2_finite:
2388 case LibFunc_atan2f_finite:
2389 if (TLI->has(Func))
2390 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2391 break;
2392 }
2393 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2394 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2395 return ConstantFP::get(
2396 Ty->getContext(),
2397 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2398 (int)Op2C->getZExtValue())));
2399 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2400 return ConstantFP::get(
2401 Ty->getContext(),
2402 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2403 (int)Op2C->getZExtValue())));
2404 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2405 return ConstantFP::get(
2406 Ty->getContext(),
2407 APFloat((double)std::pow(Op1V.convertToDouble(),
2408 (int)Op2C->getZExtValue())));
2409
2410 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2411 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2412 // everywhere else.
2413
2414 // scalbn is equivalent to ldexp with float radix 2
2415 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2416 APFloat::rmNearestTiesToEven);
2417 return ConstantFP::get(Ty->getContext(), Result);
2418 }
2419 }
2420 return nullptr;
2421 }
2422
2423 if (Operands[0]->getType()->isIntegerTy() &&
2424 Operands[1]->getType()->isIntegerTy()) {
2425 const APInt *C0, *C1;
2426 if (!getConstIntOrUndef(Operands[0], C0) ||
2427 !getConstIntOrUndef(Operands[1], C1))
2428 return nullptr;
2429
2430 unsigned BitWidth = Ty->getScalarSizeInBits();
2431 switch (IntrinsicID) {
2432 default: break;
2433 case Intrinsic::smax:
2434 if (!C0 && !C1)
2435 return UndefValue::get(Ty);
2436 if (!C0 || !C1)
2437 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
2438 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1);
2439
2440 case Intrinsic::smin:
2441 if (!C0 && !C1)
2442 return UndefValue::get(Ty);
2443 if (!C0 || !C1)
2444 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth));
2445 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1);
2446
2447 case Intrinsic::umax:
2448 if (!C0 && !C1)
2449 return UndefValue::get(Ty);
2450 if (!C0 || !C1)
2451 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth));
2452 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1);
2453
2454 case Intrinsic::umin:
2455 if (!C0 && !C1)
2456 return UndefValue::get(Ty);
2457 if (!C0 || !C1)
2458 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth));
2459 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1);
2460
2461 case Intrinsic::usub_with_overflow:
2462 case Intrinsic::ssub_with_overflow:
2463 // X - undef -> { 0, false }
2464 // undef - X -> { 0, false }
2465 if (!C0 || !C1)
2466 return Constant::getNullValue(Ty);
2467 LLVM_FALLTHROUGH;
2468 case Intrinsic::uadd_with_overflow:
2469 case Intrinsic::sadd_with_overflow:
2470 // X + undef -> { -1, false }
2471 // undef + x -> { -1, false }
2472 if (!C0 || !C1) {
2473 return ConstantStruct::get(
2474 cast<StructType>(Ty),
2475 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2476 Constant::getNullValue(Ty->getStructElementType(1))});
2477 }
2478 LLVM_FALLTHROUGH;
2479 case Intrinsic::smul_with_overflow:
2480 case Intrinsic::umul_with_overflow: {
2481 // undef * X -> { 0, false }
2482 // X * undef -> { 0, false }
2483 if (!C0 || !C1)
2484 return Constant::getNullValue(Ty);
2485
2486 APInt Res;
2487 bool Overflow;
2488 switch (IntrinsicID) {
2489 default: llvm_unreachable("Invalid case");
2490 case Intrinsic::sadd_with_overflow:
2491 Res = C0->sadd_ov(*C1, Overflow);
2492 break;
2493 case Intrinsic::uadd_with_overflow:
2494 Res = C0->uadd_ov(*C1, Overflow);
2495 break;
2496 case Intrinsic::ssub_with_overflow:
2497 Res = C0->ssub_ov(*C1, Overflow);
2498 break;
2499 case Intrinsic::usub_with_overflow:
2500 Res = C0->usub_ov(*C1, Overflow);
2501 break;
2502 case Intrinsic::smul_with_overflow:
2503 Res = C0->smul_ov(*C1, Overflow);
2504 break;
2505 case Intrinsic::umul_with_overflow:
2506 Res = C0->umul_ov(*C1, Overflow);
2507 break;
2508 }
2509 Constant *Ops[] = {
2510 ConstantInt::get(Ty->getContext(), Res),
2511 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2512 };
2513 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2514 }
2515 case Intrinsic::uadd_sat:
2516 case Intrinsic::sadd_sat:
2517 if (!C0 && !C1)
2518 return UndefValue::get(Ty);
2519 if (!C0 || !C1)
2520 return Constant::getAllOnesValue(Ty);
2521 if (IntrinsicID == Intrinsic::uadd_sat)
2522 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2523 else
2524 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2525 case Intrinsic::usub_sat:
2526 case Intrinsic::ssub_sat:
2527 if (!C0 && !C1)
2528 return UndefValue::get(Ty);
2529 if (!C0 || !C1)
2530 return Constant::getNullValue(Ty);
2531 if (IntrinsicID == Intrinsic::usub_sat)
2532 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2533 else
2534 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2535 case Intrinsic::cttz:
2536 case Intrinsic::ctlz:
2537 assert(C1 && "Must be constant int");
2538
2539 // cttz(0, 1) and ctlz(0, 1) are undef.
2540 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2541 return UndefValue::get(Ty);
2542 if (!C0)
2543 return Constant::getNullValue(Ty);
2544 if (IntrinsicID == Intrinsic::cttz)
2545 return ConstantInt::get(Ty, C0->countTrailingZeros());
2546 else
2547 return ConstantInt::get(Ty, C0->countLeadingZeros());
2548
2549 case Intrinsic::abs:
2550 // Undef or minimum val operand with poison min --> undef
2551 assert(C1 && "Must be constant int");
2552 if (C1->isOneValue() && (!C0 || C0->isMinSignedValue()))
2553 return UndefValue::get(Ty);
2554
2555 // Undef operand with no poison min --> 0 (sign bit must be clear)
2556 if (C1->isNullValue() && !C0)
2557 return Constant::getNullValue(Ty);
2558
2559 return ConstantInt::get(Ty, C0->abs());
2560 }
2561
2562 return nullptr;
2563 }
2564
2565 // Support ConstantVector in case we have an Undef in the top.
2566 if ((isa<ConstantVector>(Operands[0]) ||
2567 isa<ConstantDataVector>(Operands[0])) &&
2568 // Check for default rounding mode.
2569 // FIXME: Support other rounding modes?
2570 isa<ConstantInt>(Operands[1]) &&
2571 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2572 auto *Op = cast<Constant>(Operands[0]);
2573 switch (IntrinsicID) {
2574 default: break;
2575 case Intrinsic::x86_avx512_vcvtss2si32:
2576 case Intrinsic::x86_avx512_vcvtss2si64:
2577 case Intrinsic::x86_avx512_vcvtsd2si32:
2578 case Intrinsic::x86_avx512_vcvtsd2si64:
2579 if (ConstantFP *FPOp =
2580 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2581 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2582 /*roundTowardZero=*/false, Ty,
2583 /*IsSigned*/true);
2584 break;
2585 case Intrinsic::x86_avx512_vcvtss2usi32:
2586 case Intrinsic::x86_avx512_vcvtss2usi64:
2587 case Intrinsic::x86_avx512_vcvtsd2usi32:
2588 case Intrinsic::x86_avx512_vcvtsd2usi64:
2589 if (ConstantFP *FPOp =
2590 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2591 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2592 /*roundTowardZero=*/false, Ty,
2593 /*IsSigned*/false);
2594 break;
2595 case Intrinsic::x86_avx512_cvttss2si:
2596 case Intrinsic::x86_avx512_cvttss2si64:
2597 case Intrinsic::x86_avx512_cvttsd2si:
2598 case Intrinsic::x86_avx512_cvttsd2si64:
2599 if (ConstantFP *FPOp =
2600 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2601 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2602 /*roundTowardZero=*/true, Ty,
2603 /*IsSigned*/true);
2604 break;
2605 case Intrinsic::x86_avx512_cvttss2usi:
2606 case Intrinsic::x86_avx512_cvttss2usi64:
2607 case Intrinsic::x86_avx512_cvttsd2usi:
2608 case Intrinsic::x86_avx512_cvttsd2usi64:
2609 if (ConstantFP *FPOp =
2610 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2611 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2612 /*roundTowardZero=*/true, Ty,
2613 /*IsSigned*/false);
2614 break;
2615 }
2616 }
2617 return nullptr;
2618 }
2619
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,const APFloat & S0,const APFloat & S1,const APFloat & S2)2620 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2621 const APFloat &S0,
2622 const APFloat &S1,
2623 const APFloat &S2) {
2624 unsigned ID;
2625 const fltSemantics &Sem = S0.getSemantics();
2626 APFloat MA(Sem), SC(Sem), TC(Sem);
2627 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2628 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2629 // S2 < 0
2630 ID = 5;
2631 SC = -S0;
2632 } else {
2633 ID = 4;
2634 SC = S0;
2635 }
2636 MA = S2;
2637 TC = -S1;
2638 } else if (abs(S1) >= abs(S0)) {
2639 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2640 // S1 < 0
2641 ID = 3;
2642 TC = -S2;
2643 } else {
2644 ID = 2;
2645 TC = S2;
2646 }
2647 MA = S1;
2648 SC = S0;
2649 } else {
2650 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2651 // S0 < 0
2652 ID = 1;
2653 SC = S2;
2654 } else {
2655 ID = 0;
2656 SC = -S2;
2657 }
2658 MA = S0;
2659 TC = -S1;
2660 }
2661 switch (IntrinsicID) {
2662 default:
2663 llvm_unreachable("unhandled amdgcn cube intrinsic");
2664 case Intrinsic::amdgcn_cubeid:
2665 return APFloat(Sem, ID);
2666 case Intrinsic::amdgcn_cubema:
2667 return MA + MA;
2668 case Intrinsic::amdgcn_cubesc:
2669 return SC;
2670 case Intrinsic::amdgcn_cubetc:
2671 return TC;
2672 }
2673 }
2674
ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant * > Operands,Type * Ty)2675 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2676 Type *Ty) {
2677 const APInt *C0, *C1, *C2;
2678 if (!getConstIntOrUndef(Operands[0], C0) ||
2679 !getConstIntOrUndef(Operands[1], C1) ||
2680 !getConstIntOrUndef(Operands[2], C2))
2681 return nullptr;
2682
2683 if (!C2)
2684 return UndefValue::get(Ty);
2685
2686 APInt Val(32, 0);
2687 unsigned NumUndefBytes = 0;
2688 for (unsigned I = 0; I < 32; I += 8) {
2689 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2690 unsigned B = 0;
2691
2692 if (Sel >= 13)
2693 B = 0xff;
2694 else if (Sel == 12)
2695 B = 0x00;
2696 else {
2697 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2698 if (!Src)
2699 ++NumUndefBytes;
2700 else if (Sel < 8)
2701 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2702 else
2703 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2704 }
2705
2706 Val.insertBits(B, I, 8);
2707 }
2708
2709 if (NumUndefBytes == 4)
2710 return UndefValue::get(Ty);
2711
2712 return ConstantInt::get(Ty, Val);
2713 }
2714
ConstantFoldScalarCall3(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2715 static Constant *ConstantFoldScalarCall3(StringRef Name,
2716 Intrinsic::ID IntrinsicID,
2717 Type *Ty,
2718 ArrayRef<Constant *> Operands,
2719 const TargetLibraryInfo *TLI,
2720 const CallBase *Call) {
2721 assert(Operands.size() == 3 && "Wrong number of operands.");
2722
2723 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2724 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2725 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2726 const APFloat &C1 = Op1->getValueAPF();
2727 const APFloat &C2 = Op2->getValueAPF();
2728 const APFloat &C3 = Op3->getValueAPF();
2729 switch (IntrinsicID) {
2730 default: break;
2731 case Intrinsic::amdgcn_fma_legacy: {
2732 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2733 // NaN or infinity, gives +0.0.
2734 if (C1.isZero() || C2.isZero()) {
2735 // It's tempting to just return C3 here, but that would give the
2736 // wrong result if C3 was -0.0.
2737 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2738 }
2739 LLVM_FALLTHROUGH;
2740 }
2741 case Intrinsic::fma:
2742 case Intrinsic::fmuladd: {
2743 APFloat V = C1;
2744 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
2745 return ConstantFP::get(Ty->getContext(), V);
2746 }
2747 case Intrinsic::amdgcn_cubeid:
2748 case Intrinsic::amdgcn_cubema:
2749 case Intrinsic::amdgcn_cubesc:
2750 case Intrinsic::amdgcn_cubetc: {
2751 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
2752 return ConstantFP::get(Ty->getContext(), V);
2753 }
2754 }
2755 }
2756 }
2757 }
2758
2759 if (IntrinsicID == Intrinsic::smul_fix ||
2760 IntrinsicID == Intrinsic::smul_fix_sat) {
2761 // poison * C -> poison
2762 // C * poison -> poison
2763 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2764 return PoisonValue::get(Ty);
2765
2766 const APInt *C0, *C1;
2767 if (!getConstIntOrUndef(Operands[0], C0) ||
2768 !getConstIntOrUndef(Operands[1], C1))
2769 return nullptr;
2770
2771 // undef * C -> 0
2772 // C * undef -> 0
2773 if (!C0 || !C1)
2774 return Constant::getNullValue(Ty);
2775
2776 // This code performs rounding towards negative infinity in case the result
2777 // cannot be represented exactly for the given scale. Targets that do care
2778 // about rounding should use a target hook for specifying how rounding
2779 // should be done, and provide their own folding to be consistent with
2780 // rounding. This is the same approach as used by
2781 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2782 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
2783 unsigned Width = C0->getBitWidth();
2784 assert(Scale < Width && "Illegal scale.");
2785 unsigned ExtendedWidth = Width * 2;
2786 APInt Product = (C0->sextOrSelf(ExtendedWidth) *
2787 C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
2788 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2789 APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2790 APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2791 Product = APIntOps::smin(Product, Max);
2792 Product = APIntOps::smax(Product, Min);
2793 }
2794 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
2795 }
2796
2797 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2798 const APInt *C0, *C1, *C2;
2799 if (!getConstIntOrUndef(Operands[0], C0) ||
2800 !getConstIntOrUndef(Operands[1], C1) ||
2801 !getConstIntOrUndef(Operands[2], C2))
2802 return nullptr;
2803
2804 bool IsRight = IntrinsicID == Intrinsic::fshr;
2805 if (!C2)
2806 return Operands[IsRight ? 1 : 0];
2807 if (!C0 && !C1)
2808 return UndefValue::get(Ty);
2809
2810 // The shift amount is interpreted as modulo the bitwidth. If the shift
2811 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2812 unsigned BitWidth = C2->getBitWidth();
2813 unsigned ShAmt = C2->urem(BitWidth);
2814 if (!ShAmt)
2815 return Operands[IsRight ? 1 : 0];
2816
2817 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2818 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2819 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2820 if (!C0)
2821 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2822 if (!C1)
2823 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2824 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2825 }
2826
2827 if (IntrinsicID == Intrinsic::amdgcn_perm)
2828 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
2829
2830 return nullptr;
2831 }
2832
ConstantFoldScalarCall(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2833 static Constant *ConstantFoldScalarCall(StringRef Name,
2834 Intrinsic::ID IntrinsicID,
2835 Type *Ty,
2836 ArrayRef<Constant *> Operands,
2837 const TargetLibraryInfo *TLI,
2838 const CallBase *Call) {
2839 if (Operands.size() == 1)
2840 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2841
2842 if (Operands.size() == 2)
2843 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2844
2845 if (Operands.size() == 3)
2846 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2847
2848 return nullptr;
2849 }
2850
ConstantFoldFixedVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,FixedVectorType * FVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)2851 static Constant *ConstantFoldFixedVectorCall(
2852 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
2853 ArrayRef<Constant *> Operands, const DataLayout &DL,
2854 const TargetLibraryInfo *TLI, const CallBase *Call) {
2855 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
2856 SmallVector<Constant *, 4> Lane(Operands.size());
2857 Type *Ty = FVTy->getElementType();
2858
2859 switch (IntrinsicID) {
2860 case Intrinsic::masked_load: {
2861 auto *SrcPtr = Operands[0];
2862 auto *Mask = Operands[2];
2863 auto *Passthru = Operands[3];
2864
2865 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
2866
2867 SmallVector<Constant *, 32> NewElements;
2868 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2869 auto *MaskElt = Mask->getAggregateElement(I);
2870 if (!MaskElt)
2871 break;
2872 auto *PassthruElt = Passthru->getAggregateElement(I);
2873 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2874 if (isa<UndefValue>(MaskElt)) {
2875 if (PassthruElt)
2876 NewElements.push_back(PassthruElt);
2877 else if (VecElt)
2878 NewElements.push_back(VecElt);
2879 else
2880 return nullptr;
2881 }
2882 if (MaskElt->isNullValue()) {
2883 if (!PassthruElt)
2884 return nullptr;
2885 NewElements.push_back(PassthruElt);
2886 } else if (MaskElt->isOneValue()) {
2887 if (!VecElt)
2888 return nullptr;
2889 NewElements.push_back(VecElt);
2890 } else {
2891 return nullptr;
2892 }
2893 }
2894 if (NewElements.size() != FVTy->getNumElements())
2895 return nullptr;
2896 return ConstantVector::get(NewElements);
2897 }
2898 case Intrinsic::arm_mve_vctp8:
2899 case Intrinsic::arm_mve_vctp16:
2900 case Intrinsic::arm_mve_vctp32:
2901 case Intrinsic::arm_mve_vctp64: {
2902 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2903 unsigned Lanes = FVTy->getNumElements();
2904 uint64_t Limit = Op->getZExtValue();
2905 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make
2906 // sure we get the limit right in that case and set all relevant lanes.
2907 if (IntrinsicID == Intrinsic::arm_mve_vctp64)
2908 Limit *= 2;
2909
2910 SmallVector<Constant *, 16> NCs;
2911 for (unsigned i = 0; i < Lanes; i++) {
2912 if (i < Limit)
2913 NCs.push_back(ConstantInt::getTrue(Ty));
2914 else
2915 NCs.push_back(ConstantInt::getFalse(Ty));
2916 }
2917 return ConstantVector::get(NCs);
2918 }
2919 break;
2920 }
2921 case Intrinsic::get_active_lane_mask: {
2922 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
2923 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
2924 if (Op0 && Op1) {
2925 unsigned Lanes = FVTy->getNumElements();
2926 uint64_t Base = Op0->getZExtValue();
2927 uint64_t Limit = Op1->getZExtValue();
2928
2929 SmallVector<Constant *, 16> NCs;
2930 for (unsigned i = 0; i < Lanes; i++) {
2931 if (Base + i < Limit)
2932 NCs.push_back(ConstantInt::getTrue(Ty));
2933 else
2934 NCs.push_back(ConstantInt::getFalse(Ty));
2935 }
2936 return ConstantVector::get(NCs);
2937 }
2938 break;
2939 }
2940 default:
2941 break;
2942 }
2943
2944 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2945 // Gather a column of constants.
2946 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2947 // Some intrinsics use a scalar type for certain arguments.
2948 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
2949 Lane[J] = Operands[J];
2950 continue;
2951 }
2952
2953 Constant *Agg = Operands[J]->getAggregateElement(I);
2954 if (!Agg)
2955 return nullptr;
2956
2957 Lane[J] = Agg;
2958 }
2959
2960 // Use the regular scalar folding to simplify this column.
2961 Constant *Folded =
2962 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2963 if (!Folded)
2964 return nullptr;
2965 Result[I] = Folded;
2966 }
2967
2968 return ConstantVector::get(Result);
2969 }
2970
ConstantFoldScalableVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,ScalableVectorType * SVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)2971 static Constant *ConstantFoldScalableVectorCall(
2972 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
2973 ArrayRef<Constant *> Operands, const DataLayout &DL,
2974 const TargetLibraryInfo *TLI, const CallBase *Call) {
2975 switch (IntrinsicID) {
2976 case Intrinsic::aarch64_sve_convert_from_svbool: {
2977 auto *Src = dyn_cast<Constant>(Operands[0]);
2978 if (!Src || !Src->isNullValue())
2979 break;
2980
2981 return ConstantInt::getFalse(SVTy);
2982 }
2983 default:
2984 break;
2985 }
2986 return nullptr;
2987 }
2988
2989 } // end anonymous namespace
2990
ConstantFoldCall(const CallBase * Call,Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)2991 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2992 ArrayRef<Constant *> Operands,
2993 const TargetLibraryInfo *TLI) {
2994 if (Call->isNoBuiltin())
2995 return nullptr;
2996 if (!F->hasName())
2997 return nullptr;
2998 StringRef Name = F->getName();
2999
3000 Type *Ty = F->getReturnType();
3001
3002 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3003 return ConstantFoldFixedVectorCall(
3004 Name, F->getIntrinsicID(), FVTy, Operands,
3005 F->getParent()->getDataLayout(), TLI, Call);
3006
3007 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3008 return ConstantFoldScalableVectorCall(
3009 Name, F->getIntrinsicID(), SVTy, Operands,
3010 F->getParent()->getDataLayout(), TLI, Call);
3011
3012 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3013 Call);
3014 }
3015
isMathLibCallNoop(const CallBase * Call,const TargetLibraryInfo * TLI)3016 bool llvm::isMathLibCallNoop(const CallBase *Call,
3017 const TargetLibraryInfo *TLI) {
3018 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3019 // (and to some extent ConstantFoldScalarCall).
3020 if (Call->isNoBuiltin() || Call->isStrictFP())
3021 return false;
3022 Function *F = Call->getCalledFunction();
3023 if (!F)
3024 return false;
3025
3026 LibFunc Func;
3027 if (!TLI || !TLI->getLibFunc(*F, Func))
3028 return false;
3029
3030 if (Call->getNumArgOperands() == 1) {
3031 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3032 const APFloat &Op = OpC->getValueAPF();
3033 switch (Func) {
3034 case LibFunc_logl:
3035 case LibFunc_log:
3036 case LibFunc_logf:
3037 case LibFunc_log2l:
3038 case LibFunc_log2:
3039 case LibFunc_log2f:
3040 case LibFunc_log10l:
3041 case LibFunc_log10:
3042 case LibFunc_log10f:
3043 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3044
3045 case LibFunc_expl:
3046 case LibFunc_exp:
3047 case LibFunc_expf:
3048 // FIXME: These boundaries are slightly conservative.
3049 if (OpC->getType()->isDoubleTy())
3050 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3051 if (OpC->getType()->isFloatTy())
3052 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3053 break;
3054
3055 case LibFunc_exp2l:
3056 case LibFunc_exp2:
3057 case LibFunc_exp2f:
3058 // FIXME: These boundaries are slightly conservative.
3059 if (OpC->getType()->isDoubleTy())
3060 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3061 if (OpC->getType()->isFloatTy())
3062 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3063 break;
3064
3065 case LibFunc_sinl:
3066 case LibFunc_sin:
3067 case LibFunc_sinf:
3068 case LibFunc_cosl:
3069 case LibFunc_cos:
3070 case LibFunc_cosf:
3071 return !Op.isInfinity();
3072
3073 case LibFunc_tanl:
3074 case LibFunc_tan:
3075 case LibFunc_tanf: {
3076 // FIXME: Stop using the host math library.
3077 // FIXME: The computation isn't done in the right precision.
3078 Type *Ty = OpC->getType();
3079 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3080 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3081 break;
3082 }
3083
3084 case LibFunc_asinl:
3085 case LibFunc_asin:
3086 case LibFunc_asinf:
3087 case LibFunc_acosl:
3088 case LibFunc_acos:
3089 case LibFunc_acosf:
3090 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3091 Op > APFloat(Op.getSemantics(), "1"));
3092
3093 case LibFunc_sinh:
3094 case LibFunc_cosh:
3095 case LibFunc_sinhf:
3096 case LibFunc_coshf:
3097 case LibFunc_sinhl:
3098 case LibFunc_coshl:
3099 // FIXME: These boundaries are slightly conservative.
3100 if (OpC->getType()->isDoubleTy())
3101 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3102 if (OpC->getType()->isFloatTy())
3103 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3104 break;
3105
3106 case LibFunc_sqrtl:
3107 case LibFunc_sqrt:
3108 case LibFunc_sqrtf:
3109 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3110
3111 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3112 // maybe others?
3113 default:
3114 break;
3115 }
3116 }
3117 }
3118
3119 if (Call->getNumArgOperands() == 2) {
3120 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3121 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3122 if (Op0C && Op1C) {
3123 const APFloat &Op0 = Op0C->getValueAPF();
3124 const APFloat &Op1 = Op1C->getValueAPF();
3125
3126 switch (Func) {
3127 case LibFunc_powl:
3128 case LibFunc_pow:
3129 case LibFunc_powf: {
3130 // FIXME: Stop using the host math library.
3131 // FIXME: The computation isn't done in the right precision.
3132 Type *Ty = Op0C->getType();
3133 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3134 if (Ty == Op1C->getType())
3135 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3136 }
3137 break;
3138 }
3139
3140 case LibFunc_fmodl:
3141 case LibFunc_fmod:
3142 case LibFunc_fmodf:
3143 case LibFunc_remainderl:
3144 case LibFunc_remainder:
3145 case LibFunc_remainderf:
3146 return Op0.isNaN() || Op1.isNaN() ||
3147 (!Op0.isInfinity() && !Op1.isZero());
3148
3149 default:
3150 break;
3151 }
3152 }
3153 }
3154
3155 return false;
3156 }
3157
anchor()3158 void TargetFolder::anchor() {}
3159