xref: /llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp (revision 5013c81b781eb95af8e429956d63c8f9c16a4647)
1 //===- Evaluator.cpp - LLVM IR evaluator ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Function evaluator for LLVM IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Transforms/Utils/Evaluator.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GlobalAlias.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/IR/User.h"
34 #include "llvm/IR/Value.h"
35 #include "llvm/Support/Casting.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 #define DEBUG_TYPE "evaluator"
40 
41 using namespace llvm;
42 
43 static inline bool
44 isSimpleEnoughValueToCommit(Constant *C,
45                             SmallPtrSetImpl<Constant *> &SimpleConstants,
46                             const DataLayout &DL);
47 
48 /// Return true if the specified constant can be handled by the code generator.
49 /// We don't want to generate something like:
50 ///   void *X = &X/42;
51 /// because the code generator doesn't have a relocation that can handle that.
52 ///
53 /// This function should be called if C was not found (but just got inserted)
54 /// in SimpleConstants to avoid having to rescan the same constants all the
55 /// time.
56 static bool
57 isSimpleEnoughValueToCommitHelper(Constant *C,
58                                   SmallPtrSetImpl<Constant *> &SimpleConstants,
59                                   const DataLayout &DL) {
60   // Simple global addresses are supported, do not allow dllimport or
61   // thread-local globals.
62   if (auto *GV = dyn_cast<GlobalValue>(C))
63     return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
64 
65   // Simple integer, undef, constant aggregate zero, etc are all supported.
66   if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
67     return true;
68 
69   // Aggregate values are safe if all their elements are.
70   if (isa<ConstantAggregate>(C)) {
71     for (Value *Op : C->operands())
72       if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
73         return false;
74     return true;
75   }
76 
77   // We don't know exactly what relocations are allowed in constant expressions,
78   // so we allow &global+constantoffset, which is safe and uniformly supported
79   // across targets.
80   ConstantExpr *CE = cast<ConstantExpr>(C);
81   switch (CE->getOpcode()) {
82   case Instruction::BitCast:
83     // Bitcast is fine if the casted value is fine.
84     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
85 
86   case Instruction::IntToPtr:
87   case Instruction::PtrToInt:
88     // int <=> ptr is fine if the int type is the same size as the
89     // pointer type.
90     if (DL.getTypeSizeInBits(CE->getType()) !=
91         DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
92       return false;
93     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
94 
95   // GEP is fine if it is simple + constant offset.
96   case Instruction::GetElementPtr:
97     for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
98       if (!isa<ConstantInt>(CE->getOperand(i)))
99         return false;
100     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
101 
102   case Instruction::Add:
103     // We allow simple+cst.
104     if (!isa<ConstantInt>(CE->getOperand(1)))
105       return false;
106     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
107   }
108   return false;
109 }
110 
111 static inline bool
112 isSimpleEnoughValueToCommit(Constant *C,
113                             SmallPtrSetImpl<Constant *> &SimpleConstants,
114                             const DataLayout &DL) {
115   // If we already checked this constant, we win.
116   if (!SimpleConstants.insert(C).second)
117     return true;
118   // Check the constant.
119   return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
120 }
121 
122 void Evaluator::MutableValue::clear() {
123   if (auto *Agg = dyn_cast_if_present<MutableAggregate *>(Val))
124     delete Agg;
125   Val = nullptr;
126 }
127 
128 Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset,
129                                         const DataLayout &DL) const {
130   TypeSize TySize = DL.getTypeStoreSize(Ty);
131   const MutableValue *V = this;
132   while (const auto *Agg = dyn_cast_if_present<MutableAggregate *>(V->Val)) {
133     Type *AggTy = Agg->Ty;
134     std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
135     if (!Index || Index->uge(Agg->Elements.size()) ||
136         !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
137       return nullptr;
138 
139     V = &Agg->Elements[Index->getZExtValue()];
140   }
141 
142   return ConstantFoldLoadFromConst(cast<Constant *>(V->Val), Ty, Offset, DL);
143 }
144 
145 bool Evaluator::MutableValue::makeMutable() {
146   Constant *C = cast<Constant *>(Val);
147   Type *Ty = C->getType();
148   unsigned NumElements;
149   if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
150     NumElements = VT->getNumElements();
151   } else if (auto *AT = dyn_cast<ArrayType>(Ty))
152     NumElements = AT->getNumElements();
153   else if (auto *ST = dyn_cast<StructType>(Ty))
154     NumElements = ST->getNumElements();
155   else
156     return false;
157 
158   MutableAggregate *MA = new MutableAggregate(Ty);
159   MA->Elements.reserve(NumElements);
160   for (unsigned I = 0; I < NumElements; ++I)
161     MA->Elements.push_back(C->getAggregateElement(I));
162   Val = MA;
163   return true;
164 }
165 
166 bool Evaluator::MutableValue::write(Constant *V, APInt Offset,
167                                     const DataLayout &DL) {
168   Type *Ty = V->getType();
169   TypeSize TySize = DL.getTypeStoreSize(Ty);
170   MutableValue *MV = this;
171   while (Offset != 0 ||
172          !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) {
173     if (isa<Constant *>(MV->Val) && !MV->makeMutable())
174       return false;
175 
176     MutableAggregate *Agg = cast<MutableAggregate *>(MV->Val);
177     Type *AggTy = Agg->Ty;
178     std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
179     if (!Index || Index->uge(Agg->Elements.size()) ||
180         !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
181       return false;
182 
183     MV = &Agg->Elements[Index->getZExtValue()];
184   }
185 
186   Type *MVType = MV->getType();
187   MV->clear();
188   if (Ty->isIntegerTy() && MVType->isPointerTy())
189     MV->Val = ConstantExpr::getIntToPtr(V, MVType);
190   else if (Ty->isPointerTy() && MVType->isIntegerTy())
191     MV->Val = ConstantExpr::getPtrToInt(V, MVType);
192   else if (Ty != MVType)
193     MV->Val = ConstantExpr::getBitCast(V, MVType);
194   else
195     MV->Val = V;
196   return true;
197 }
198 
199 Constant *Evaluator::MutableAggregate::toConstant() const {
200   SmallVector<Constant *, 32> Consts;
201   for (const MutableValue &MV : Elements)
202     Consts.push_back(MV.toConstant());
203 
204   if (auto *ST = dyn_cast<StructType>(Ty))
205     return ConstantStruct::get(ST, Consts);
206   if (auto *AT = dyn_cast<ArrayType>(Ty))
207     return ConstantArray::get(AT, Consts);
208   assert(isa<FixedVectorType>(Ty) && "Must be vector");
209   return ConstantVector::get(Consts);
210 }
211 
212 /// Return the value that would be computed by a load from P after the stores
213 /// reflected by 'memory' have been performed.  If we can't decide, return null.
214 Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) {
215   APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0);
216   P = cast<Constant>(P->stripAndAccumulateConstantOffsets(
217       DL, Offset, /* AllowNonInbounds */ true));
218   Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType()));
219   if (auto *GV = dyn_cast<GlobalVariable>(P))
220     return ComputeLoadResult(GV, Ty, Offset);
221   return nullptr;
222 }
223 
224 Constant *Evaluator::ComputeLoadResult(GlobalVariable *GV, Type *Ty,
225                                        const APInt &Offset) {
226   auto It = MutatedMemory.find(GV);
227   if (It != MutatedMemory.end())
228     return It->second.read(Ty, Offset, DL);
229 
230   if (!GV->hasDefinitiveInitializer())
231     return nullptr;
232   return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
233 }
234 
235 static Function *getFunction(Constant *C) {
236   if (auto *Fn = dyn_cast<Function>(C))
237     return Fn;
238 
239   if (auto *Alias = dyn_cast<GlobalAlias>(C))
240     if (auto *Fn = dyn_cast<Function>(Alias->getAliasee()))
241       return Fn;
242   return nullptr;
243 }
244 
245 Function *
246 Evaluator::getCalleeWithFormalArgs(CallBase &CB,
247                                    SmallVectorImpl<Constant *> &Formals) {
248   auto *V = CB.getCalledOperand()->stripPointerCasts();
249   if (auto *Fn = getFunction(getVal(V)))
250     return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
251   return nullptr;
252 }
253 
254 bool Evaluator::getFormalParams(CallBase &CB, Function *F,
255                                 SmallVectorImpl<Constant *> &Formals) {
256   auto *FTy = F->getFunctionType();
257   if (FTy != CB.getFunctionType()) {
258     LLVM_DEBUG(dbgs() << "Signature mismatch.\n");
259     return false;
260   }
261 
262   for (Value *Arg : CB.args())
263     Formals.push_back(getVal(Arg));
264   return true;
265 }
266 
267 /// Evaluate all instructions in block BB, returning true if successful, false
268 /// if we can't evaluate it.  NewBB returns the next BB that control flows into,
269 /// or null upon return. StrippedPointerCastsForAliasAnalysis is set to true if
270 /// we looked through pointer casts to evaluate something.
271 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
272                               bool &StrippedPointerCastsForAliasAnalysis) {
273   // This is the main evaluation loop.
274   while (true) {
275     Constant *InstResult = nullptr;
276 
277     LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
278 
279     if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
280       if (SI->isVolatile()) {
281         LLVM_DEBUG(dbgs() << "Store is volatile! Can not evaluate.\n");
282         return false;  // no volatile accesses.
283       }
284       Constant *Ptr = getVal(SI->getOperand(1));
285       Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
286       if (Ptr != FoldedPtr) {
287         LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
288         Ptr = FoldedPtr;
289         LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
290       }
291 
292       APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
293       Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
294           DL, Offset, /* AllowNonInbounds */ true));
295       Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType()));
296       auto *GV = dyn_cast<GlobalVariable>(Ptr);
297       if (!GV || !GV->hasUniqueInitializer()) {
298         LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: "
299                           << *Ptr << "\n");
300         return false;
301       }
302 
303       // If this might be too difficult for the backend to handle (e.g. the addr
304       // of one global variable divided by another) then we can't commit it.
305       Constant *Val = getVal(SI->getOperand(0));
306       if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
307         LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
308                           << *Val << "\n");
309         return false;
310       }
311 
312       auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer());
313       if (!Res.first->second.write(Val, Offset, DL))
314         return false;
315     } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
316       if (LI->isVolatile()) {
317         LLVM_DEBUG(
318             dbgs() << "Found a Load! Volatile load, can not evaluate.\n");
319         return false;  // no volatile accesses.
320       }
321 
322       Constant *Ptr = getVal(LI->getOperand(0));
323       Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
324       if (Ptr != FoldedPtr) {
325         Ptr = FoldedPtr;
326         LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant "
327                              "folding: "
328                           << *Ptr << "\n");
329       }
330       InstResult = ComputeLoadResult(Ptr, LI->getType());
331       if (!InstResult) {
332         LLVM_DEBUG(
333             dbgs() << "Failed to compute load result. Can not evaluate load."
334                       "\n");
335         return false; // Could not evaluate load.
336       }
337 
338       LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
339     } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
340       if (AI->isArrayAllocation()) {
341         LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
342         return false;  // Cannot handle array allocs.
343       }
344       Type *Ty = AI->getAllocatedType();
345       AllocaTmps.push_back(std::make_unique<GlobalVariable>(
346           Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty),
347           AI->getName(), /*TLMode=*/GlobalValue::NotThreadLocal,
348           AI->getType()->getPointerAddressSpace()));
349       InstResult = AllocaTmps.back().get();
350       LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
351     } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
352       CallBase &CB = *cast<CallBase>(&*CurInst);
353 
354       // Debug info can safely be ignored here.
355       if (isa<DbgInfoIntrinsic>(CB)) {
356         LLVM_DEBUG(dbgs() << "Ignoring debug info.\n");
357         ++CurInst;
358         continue;
359       }
360 
361       // Cannot handle inline asm.
362       if (CB.isInlineAsm()) {
363         LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
364         return false;
365       }
366 
367       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CB)) {
368         if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
369           if (MSI->isVolatile()) {
370             LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset "
371                               << "intrinsic.\n");
372             return false;
373           }
374 
375           auto *LenC = dyn_cast<ConstantInt>(getVal(MSI->getLength()));
376           if (!LenC) {
377             LLVM_DEBUG(dbgs() << "Memset with unknown length.\n");
378             return false;
379           }
380 
381           Constant *Ptr = getVal(MSI->getDest());
382           APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
383           Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
384               DL, Offset, /* AllowNonInbounds */ true));
385           auto *GV = dyn_cast<GlobalVariable>(Ptr);
386           if (!GV) {
387             LLVM_DEBUG(dbgs() << "Memset with unknown base.\n");
388             return false;
389           }
390 
391           Constant *Val = getVal(MSI->getValue());
392           // Avoid the byte-per-byte scan if we're memseting a zeroinitializer
393           // to zero.
394           if (!Val->isNullValue() || MutatedMemory.contains(GV) ||
395               !GV->hasDefinitiveInitializer() ||
396               !GV->getInitializer()->isNullValue()) {
397             APInt Len = LenC->getValue();
398             if (Len.ugt(64 * 1024)) {
399               LLVM_DEBUG(dbgs() << "Not evaluating large memset of size "
400                                 << Len << "\n");
401               return false;
402             }
403 
404             while (Len != 0) {
405               Constant *DestVal = ComputeLoadResult(GV, Val->getType(), Offset);
406               if (DestVal != Val) {
407                 LLVM_DEBUG(dbgs() << "Memset is not a no-op at offset "
408                                   << Offset << " of " << *GV << ".\n");
409                 return false;
410               }
411               ++Offset;
412               --Len;
413             }
414           }
415 
416           LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n");
417           ++CurInst;
418           continue;
419         }
420 
421         if (II->isLifetimeStartOrEnd()) {
422           LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
423           ++CurInst;
424           continue;
425         }
426 
427         if (II->getIntrinsicID() == Intrinsic::invariant_start) {
428           // We don't insert an entry into Values, as it doesn't have a
429           // meaningful return value.
430           if (!II->use_empty()) {
431             LLVM_DEBUG(dbgs()
432                        << "Found unused invariant_start. Can't evaluate.\n");
433             return false;
434           }
435           ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
436           Value *PtrArg = getVal(II->getArgOperand(1));
437           Value *Ptr = PtrArg->stripPointerCasts();
438           if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
439             Type *ElemTy = GV->getValueType();
440             if (!Size->isMinusOne() &&
441                 Size->getValue().getLimitedValue() >=
442                     DL.getTypeStoreSize(ElemTy)) {
443               Invariants.insert(GV);
444               LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: "
445                                 << *GV << "\n");
446             } else {
447               LLVM_DEBUG(dbgs()
448                          << "Found a global var, but can not treat it as an "
449                             "invariant.\n");
450             }
451           }
452           // Continue even if we do nothing.
453           ++CurInst;
454           continue;
455         } else if (II->getIntrinsicID() == Intrinsic::assume) {
456           LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n");
457           ++CurInst;
458           continue;
459         } else if (II->getIntrinsicID() == Intrinsic::sideeffect) {
460           LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
461           ++CurInst;
462           continue;
463         } else if (II->getIntrinsicID() == Intrinsic::pseudoprobe) {
464           LLVM_DEBUG(dbgs() << "Skipping pseudoprobe intrinsic.\n");
465           ++CurInst;
466           continue;
467         } else {
468           Value *Stripped = CurInst->stripPointerCastsForAliasAnalysis();
469           // Only attempt to getVal() if we've actually managed to strip
470           // anything away, or else we'll call getVal() on the current
471           // instruction.
472           if (Stripped != &*CurInst) {
473             InstResult = getVal(Stripped);
474           }
475           if (InstResult) {
476             LLVM_DEBUG(dbgs()
477                        << "Stripped pointer casts for alias analysis for "
478                           "intrinsic call.\n");
479             StrippedPointerCastsForAliasAnalysis = true;
480             InstResult = ConstantExpr::getBitCast(InstResult, II->getType());
481           } else {
482             LLVM_DEBUG(dbgs() << "Unknown intrinsic. Cannot evaluate.\n");
483             return false;
484           }
485         }
486       }
487 
488       if (!InstResult) {
489         // Resolve function pointers.
490         SmallVector<Constant *, 8> Formals;
491         Function *Callee = getCalleeWithFormalArgs(CB, Formals);
492         if (!Callee || Callee->isInterposable()) {
493           LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n");
494           return false; // Cannot resolve.
495         }
496 
497         if (Callee->isDeclaration()) {
498           // If this is a function we can constant fold, do it.
499           if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
500             InstResult = C;
501             LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
502                               << *InstResult << "\n");
503           } else {
504             LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n");
505             return false;
506           }
507         } else {
508           if (Callee->getFunctionType()->isVarArg()) {
509             LLVM_DEBUG(dbgs()
510                        << "Can not constant fold vararg function call.\n");
511             return false;
512           }
513 
514           Constant *RetVal = nullptr;
515           // Execute the call, if successful, use the return value.
516           ValueStack.emplace_back();
517           if (!EvaluateFunction(Callee, RetVal, Formals)) {
518             LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n");
519             return false;
520           }
521           ValueStack.pop_back();
522           InstResult = RetVal;
523           if (InstResult) {
524             LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: "
525                               << *InstResult << "\n\n");
526           } else {
527             LLVM_DEBUG(dbgs()
528                        << "Successfully evaluated function. Result: 0\n\n");
529           }
530         }
531       }
532     } else if (CurInst->isTerminator()) {
533       LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n");
534 
535       if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
536         if (BI->isUnconditional()) {
537           NextBB = BI->getSuccessor(0);
538         } else {
539           ConstantInt *Cond =
540             dyn_cast<ConstantInt>(getVal(BI->getCondition()));
541           if (!Cond) return false;  // Cannot determine.
542 
543           NextBB = BI->getSuccessor(!Cond->getZExtValue());
544         }
545       } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
546         ConstantInt *Val =
547           dyn_cast<ConstantInt>(getVal(SI->getCondition()));
548         if (!Val) return false;  // Cannot determine.
549         NextBB = SI->findCaseValue(Val)->getCaseSuccessor();
550       } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
551         Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
552         if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
553           NextBB = BA->getBasicBlock();
554         else
555           return false;  // Cannot determine.
556       } else if (isa<ReturnInst>(CurInst)) {
557         NextBB = nullptr;
558       } else {
559         // invoke, unwind, resume, unreachable.
560         LLVM_DEBUG(dbgs() << "Can not handle terminator.");
561         return false;  // Cannot handle this terminator.
562       }
563 
564       // We succeeded at evaluating this block!
565       LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n");
566       return true;
567     } else {
568       SmallVector<Constant *> Ops;
569       for (Value *Op : CurInst->operands())
570         Ops.push_back(getVal(Op));
571       InstResult = ConstantFoldInstOperands(&*CurInst, Ops, DL, TLI);
572       if (!InstResult) {
573         LLVM_DEBUG(dbgs() << "Cannot fold instruction: " << *CurInst << "\n");
574         return false;
575       }
576       LLVM_DEBUG(dbgs() << "Folded instruction " << *CurInst << " to "
577                         << *InstResult << "\n");
578     }
579 
580     if (!CurInst->use_empty()) {
581       InstResult = ConstantFoldConstant(InstResult, DL, TLI);
582       setVal(&*CurInst, InstResult);
583     }
584 
585     // If we just processed an invoke, we finished evaluating the block.
586     if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
587       NextBB = II->getNormalDest();
588       LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
589       return true;
590     }
591 
592     // Advance program counter.
593     ++CurInst;
594   }
595 }
596 
597 /// Evaluate a call to function F, returning true if successful, false if we
598 /// can't evaluate it.  ActualArgs contains the formal arguments for the
599 /// function.
600 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
601                                  const SmallVectorImpl<Constant*> &ActualArgs) {
602   assert(ActualArgs.size() == F->arg_size() && "wrong number of arguments");
603 
604   // Check to see if this function is already executing (recursion).  If so,
605   // bail out.  TODO: we might want to accept limited recursion.
606   if (is_contained(CallStack, F))
607     return false;
608 
609   CallStack.push_back(F);
610 
611   // Initialize arguments to the incoming values specified.
612   for (const auto &[ArgNo, Arg] : llvm::enumerate(F->args()))
613     setVal(&Arg, ActualArgs[ArgNo]);
614 
615   // ExecutedBlocks - We only handle non-looping, non-recursive code.  As such,
616   // we can only evaluate any one basic block at most once.  This set keeps
617   // track of what we have executed so we can detect recursive cases etc.
618   SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
619 
620   // CurBB - The current basic block we're evaluating.
621   BasicBlock *CurBB = &F->front();
622 
623   BasicBlock::iterator CurInst = CurBB->begin();
624 
625   while (true) {
626     BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
627     LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
628 
629     bool StrippedPointerCastsForAliasAnalysis = false;
630 
631     if (!EvaluateBlock(CurInst, NextBB, StrippedPointerCastsForAliasAnalysis))
632       return false;
633 
634     if (!NextBB) {
635       // Successfully running until there's no next block means that we found
636       // the return.  Fill it the return value and pop the call stack.
637       ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
638       if (RI->getNumOperands()) {
639         // The Evaluator can look through pointer casts as long as alias
640         // analysis holds because it's just a simple interpreter and doesn't
641         // skip memory accesses due to invariant group metadata, but we can't
642         // let users of Evaluator use a value that's been gleaned looking
643         // through stripping pointer casts.
644         if (StrippedPointerCastsForAliasAnalysis &&
645             !RI->getReturnValue()->getType()->isVoidTy()) {
646           return false;
647         }
648         RetVal = getVal(RI->getOperand(0));
649       }
650       CallStack.pop_back();
651       return true;
652     }
653 
654     // Okay, we succeeded in evaluating this control flow.  See if we have
655     // executed the new block before.  If so, we have a looping function,
656     // which we cannot evaluate in reasonable time.
657     if (!ExecutedBlocks.insert(NextBB).second)
658       return false;  // looped!
659 
660     // Okay, we have never been in this block before.  Check to see if there
661     // are any PHI nodes.  If so, evaluate them with information about where
662     // we came from.
663     PHINode *PN = nullptr;
664     for (CurInst = NextBB->begin();
665          (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
666       setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
667 
668     // Advance to the next block.
669     CurBB = NextBB;
670   }
671 }
672