xref: /llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp (revision 013f4a46d1978e370f940df3cbd04fb0399a04fe)
1 //===- Evaluator.cpp - LLVM IR evaluator ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Function evaluator for LLVM IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Transforms/Utils/Evaluator.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GlobalAlias.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/IR/User.h"
34 #include "llvm/IR/Value.h"
35 #include "llvm/Support/Casting.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 #define DEBUG_TYPE "evaluator"
40 
41 using namespace llvm;
42 
43 static inline bool
44 isSimpleEnoughValueToCommit(Constant *C,
45                             SmallPtrSetImpl<Constant *> &SimpleConstants,
46                             const DataLayout &DL);
47 
48 /// Return true if the specified constant can be handled by the code generator.
49 /// We don't want to generate something like:
50 ///   void *X = &X/42;
51 /// because the code generator doesn't have a relocation that can handle that.
52 ///
53 /// This function should be called if C was not found (but just got inserted)
54 /// in SimpleConstants to avoid having to rescan the same constants all the
55 /// time.
56 static bool
57 isSimpleEnoughValueToCommitHelper(Constant *C,
58                                   SmallPtrSetImpl<Constant *> &SimpleConstants,
59                                   const DataLayout &DL) {
60   // Simple global addresses are supported, do not allow dllimport or
61   // thread-local globals.
62   if (auto *GV = dyn_cast<GlobalValue>(C))
63     return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
64 
65   // Simple integer, undef, constant aggregate zero, etc are all supported.
66   if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
67     return true;
68 
69   // Aggregate values are safe if all their elements are.
70   if (isa<ConstantAggregate>(C)) {
71     for (Value *Op : C->operands())
72       if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
73         return false;
74     return true;
75   }
76 
77   // We don't know exactly what relocations are allowed in constant expressions,
78   // so we allow &global+constantoffset, which is safe and uniformly supported
79   // across targets.
80   ConstantExpr *CE = cast<ConstantExpr>(C);
81   switch (CE->getOpcode()) {
82   case Instruction::BitCast:
83     // Bitcast is fine if the casted value is fine.
84     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
85 
86   case Instruction::IntToPtr:
87   case Instruction::PtrToInt:
88     // int <=> ptr is fine if the int type is the same size as the
89     // pointer type.
90     if (DL.getTypeSizeInBits(CE->getType()) !=
91         DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
92       return false;
93     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
94 
95   // GEP is fine if it is simple + constant offset.
96   case Instruction::GetElementPtr:
97     for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
98       if (!isa<ConstantInt>(CE->getOperand(i)))
99         return false;
100     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
101 
102   case Instruction::Add:
103     // We allow simple+cst.
104     if (!isa<ConstantInt>(CE->getOperand(1)))
105       return false;
106     return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
107   }
108   return false;
109 }
110 
111 static inline bool
112 isSimpleEnoughValueToCommit(Constant *C,
113                             SmallPtrSetImpl<Constant *> &SimpleConstants,
114                             const DataLayout &DL) {
115   // If we already checked this constant, we win.
116   if (!SimpleConstants.insert(C).second)
117     return true;
118   // Check the constant.
119   return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
120 }
121 
122 void Evaluator::MutableValue::clear() {
123   if (auto *Agg = dyn_cast_if_present<MutableAggregate *>(Val))
124     delete Agg;
125   Val = nullptr;
126 }
127 
128 Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset,
129                                         const DataLayout &DL) const {
130   TypeSize TySize = DL.getTypeStoreSize(Ty);
131   const MutableValue *V = this;
132   while (const auto *Agg = dyn_cast_if_present<MutableAggregate *>(V->Val)) {
133     Type *AggTy = Agg->Ty;
134     std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
135     if (!Index || Index->uge(Agg->Elements.size()) ||
136         !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
137       return nullptr;
138 
139     V = &Agg->Elements[Index->getZExtValue()];
140   }
141 
142   return ConstantFoldLoadFromConst(cast<Constant *>(V->Val), Ty, Offset, DL);
143 }
144 
145 bool Evaluator::MutableValue::makeMutable() {
146   Constant *C = cast<Constant *>(Val);
147   Type *Ty = C->getType();
148   unsigned NumElements;
149   if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
150     NumElements = VT->getNumElements();
151   } else if (auto *AT = dyn_cast<ArrayType>(Ty))
152     NumElements = AT->getNumElements();
153   else if (auto *ST = dyn_cast<StructType>(Ty))
154     NumElements = ST->getNumElements();
155   else
156     return false;
157 
158   MutableAggregate *MA = new MutableAggregate(Ty);
159   MA->Elements.reserve(NumElements);
160   for (unsigned I = 0; I < NumElements; ++I)
161     MA->Elements.push_back(C->getAggregateElement(I));
162   Val = MA;
163   return true;
164 }
165 
166 bool Evaluator::MutableValue::write(Constant *V, APInt Offset,
167                                     const DataLayout &DL) {
168   Type *Ty = V->getType();
169   TypeSize TySize = DL.getTypeStoreSize(Ty);
170   MutableValue *MV = this;
171   while (Offset != 0 ||
172          !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) {
173     if (isa<Constant *>(MV->Val) && !MV->makeMutable())
174       return false;
175 
176     MutableAggregate *Agg = cast<MutableAggregate *>(MV->Val);
177     Type *AggTy = Agg->Ty;
178     std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
179     if (!Index || Index->uge(Agg->Elements.size()) ||
180         !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
181       return false;
182 
183     MV = &Agg->Elements[Index->getZExtValue()];
184   }
185 
186   Type *MVType = MV->getType();
187   MV->clear();
188   if (Ty->isIntegerTy() && MVType->isPointerTy())
189     MV->Val = ConstantExpr::getIntToPtr(V, MVType);
190   else if (Ty->isPointerTy() && MVType->isIntegerTy())
191     MV->Val = ConstantExpr::getPtrToInt(V, MVType);
192   else if (Ty != MVType)
193     MV->Val = ConstantExpr::getBitCast(V, MVType);
194   else
195     MV->Val = V;
196   return true;
197 }
198 
199 Constant *Evaluator::MutableAggregate::toConstant() const {
200   SmallVector<Constant *, 32> Consts;
201   for (const MutableValue &MV : Elements)
202     Consts.push_back(MV.toConstant());
203 
204   if (auto *ST = dyn_cast<StructType>(Ty))
205     return ConstantStruct::get(ST, Consts);
206   if (auto *AT = dyn_cast<ArrayType>(Ty))
207     return ConstantArray::get(AT, Consts);
208   assert(isa<FixedVectorType>(Ty) && "Must be vector");
209   return ConstantVector::get(Consts);
210 }
211 
212 /// Return the value that would be computed by a load from P after the stores
213 /// reflected by 'memory' have been performed.  If we can't decide, return null.
214 Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) {
215   APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0);
216   P = cast<Constant>(P->stripAndAccumulateConstantOffsets(
217       DL, Offset, /* AllowNonInbounds */ true));
218   Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType()));
219   if (auto *GV = dyn_cast<GlobalVariable>(P))
220     return ComputeLoadResult(GV, Ty, Offset);
221   return nullptr;
222 }
223 
224 Constant *Evaluator::ComputeLoadResult(GlobalVariable *GV, Type *Ty,
225                                        const APInt &Offset) {
226   auto It = MutatedMemory.find(GV);
227   if (It != MutatedMemory.end())
228     return It->second.read(Ty, Offset, DL);
229 
230   if (!GV->hasDefinitiveInitializer())
231     return nullptr;
232   return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
233 }
234 
235 static Function *getFunction(Constant *C) {
236   if (auto *Fn = dyn_cast<Function>(C))
237     return Fn;
238 
239   if (auto *Alias = dyn_cast<GlobalAlias>(C))
240     if (auto *Fn = dyn_cast<Function>(Alias->getAliasee()))
241       return Fn;
242   return nullptr;
243 }
244 
245 Function *
246 Evaluator::getCalleeWithFormalArgs(CallBase &CB,
247                                    SmallVectorImpl<Constant *> &Formals) {
248   auto *V = CB.getCalledOperand()->stripPointerCasts();
249   if (auto *Fn = getFunction(getVal(V)))
250     return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
251   return nullptr;
252 }
253 
254 bool Evaluator::getFormalParams(CallBase &CB, Function *F,
255                                 SmallVectorImpl<Constant *> &Formals) {
256   if (!F)
257     return false;
258 
259   auto *FTy = F->getFunctionType();
260   if (FTy->getNumParams() > CB.arg_size()) {
261     LLVM_DEBUG(dbgs() << "Too few arguments for function.\n");
262     return false;
263   }
264 
265   auto ArgI = CB.arg_begin();
266   for (Type *PTy : FTy->params()) {
267     auto *ArgC = ConstantFoldLoadThroughBitcast(getVal(*ArgI), PTy, DL);
268     if (!ArgC) {
269       LLVM_DEBUG(dbgs() << "Can not convert function argument.\n");
270       return false;
271     }
272     Formals.push_back(ArgC);
273     ++ArgI;
274   }
275   return true;
276 }
277 
278 /// If call expression contains bitcast then we may need to cast
279 /// evaluated return value to a type of the call expression.
280 Constant *Evaluator::castCallResultIfNeeded(Type *ReturnType, Constant *RV) {
281   if (!RV || RV->getType() == ReturnType)
282     return RV;
283 
284   RV = ConstantFoldLoadThroughBitcast(RV, ReturnType, DL);
285   if (!RV)
286     LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n");
287   return RV;
288 }
289 
290 /// Evaluate all instructions in block BB, returning true if successful, false
291 /// if we can't evaluate it.  NewBB returns the next BB that control flows into,
292 /// or null upon return. StrippedPointerCastsForAliasAnalysis is set to true if
293 /// we looked through pointer casts to evaluate something.
294 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
295                               bool &StrippedPointerCastsForAliasAnalysis) {
296   // This is the main evaluation loop.
297   while (true) {
298     Constant *InstResult = nullptr;
299 
300     LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
301 
302     if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
303       if (SI->isVolatile()) {
304         LLVM_DEBUG(dbgs() << "Store is volatile! Can not evaluate.\n");
305         return false;  // no volatile accesses.
306       }
307       Constant *Ptr = getVal(SI->getOperand(1));
308       Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
309       if (Ptr != FoldedPtr) {
310         LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
311         Ptr = FoldedPtr;
312         LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
313       }
314 
315       APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
316       Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
317           DL, Offset, /* AllowNonInbounds */ true));
318       Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType()));
319       auto *GV = dyn_cast<GlobalVariable>(Ptr);
320       if (!GV || !GV->hasUniqueInitializer()) {
321         LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: "
322                           << *Ptr << "\n");
323         return false;
324       }
325 
326       // If this might be too difficult for the backend to handle (e.g. the addr
327       // of one global variable divided by another) then we can't commit it.
328       Constant *Val = getVal(SI->getOperand(0));
329       if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
330         LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
331                           << *Val << "\n");
332         return false;
333       }
334 
335       auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer());
336       if (!Res.first->second.write(Val, Offset, DL))
337         return false;
338     } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
339       if (LI->isVolatile()) {
340         LLVM_DEBUG(
341             dbgs() << "Found a Load! Volatile load, can not evaluate.\n");
342         return false;  // no volatile accesses.
343       }
344 
345       Constant *Ptr = getVal(LI->getOperand(0));
346       Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
347       if (Ptr != FoldedPtr) {
348         Ptr = FoldedPtr;
349         LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant "
350                              "folding: "
351                           << *Ptr << "\n");
352       }
353       InstResult = ComputeLoadResult(Ptr, LI->getType());
354       if (!InstResult) {
355         LLVM_DEBUG(
356             dbgs() << "Failed to compute load result. Can not evaluate load."
357                       "\n");
358         return false; // Could not evaluate load.
359       }
360 
361       LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
362     } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
363       if (AI->isArrayAllocation()) {
364         LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
365         return false;  // Cannot handle array allocs.
366       }
367       Type *Ty = AI->getAllocatedType();
368       AllocaTmps.push_back(std::make_unique<GlobalVariable>(
369           Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty),
370           AI->getName(), /*TLMode=*/GlobalValue::NotThreadLocal,
371           AI->getType()->getPointerAddressSpace()));
372       InstResult = AllocaTmps.back().get();
373       LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
374     } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
375       CallBase &CB = *cast<CallBase>(&*CurInst);
376 
377       // Debug info can safely be ignored here.
378       if (isa<DbgInfoIntrinsic>(CB)) {
379         LLVM_DEBUG(dbgs() << "Ignoring debug info.\n");
380         ++CurInst;
381         continue;
382       }
383 
384       // Cannot handle inline asm.
385       if (CB.isInlineAsm()) {
386         LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
387         return false;
388       }
389 
390       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CB)) {
391         if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
392           if (MSI->isVolatile()) {
393             LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset "
394                               << "intrinsic.\n");
395             return false;
396           }
397 
398           auto *LenC = dyn_cast<ConstantInt>(getVal(MSI->getLength()));
399           if (!LenC) {
400             LLVM_DEBUG(dbgs() << "Memset with unknown length.\n");
401             return false;
402           }
403 
404           Constant *Ptr = getVal(MSI->getDest());
405           APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
406           Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
407               DL, Offset, /* AllowNonInbounds */ true));
408           auto *GV = dyn_cast<GlobalVariable>(Ptr);
409           if (!GV) {
410             LLVM_DEBUG(dbgs() << "Memset with unknown base.\n");
411             return false;
412           }
413 
414           Constant *Val = getVal(MSI->getValue());
415           // Avoid the byte-per-byte scan if we're memseting a zeroinitializer
416           // to zero.
417           if (!Val->isNullValue() || MutatedMemory.contains(GV) ||
418               !GV->hasDefinitiveInitializer() ||
419               !GV->getInitializer()->isNullValue()) {
420             APInt Len = LenC->getValue();
421             if (Len.ugt(64 * 1024)) {
422               LLVM_DEBUG(dbgs() << "Not evaluating large memset of size "
423                                 << Len << "\n");
424               return false;
425             }
426 
427             while (Len != 0) {
428               Constant *DestVal = ComputeLoadResult(GV, Val->getType(), Offset);
429               if (DestVal != Val) {
430                 LLVM_DEBUG(dbgs() << "Memset is not a no-op at offset "
431                                   << Offset << " of " << *GV << ".\n");
432                 return false;
433               }
434               ++Offset;
435               --Len;
436             }
437           }
438 
439           LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n");
440           ++CurInst;
441           continue;
442         }
443 
444         if (II->isLifetimeStartOrEnd()) {
445           LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
446           ++CurInst;
447           continue;
448         }
449 
450         if (II->getIntrinsicID() == Intrinsic::invariant_start) {
451           // We don't insert an entry into Values, as it doesn't have a
452           // meaningful return value.
453           if (!II->use_empty()) {
454             LLVM_DEBUG(dbgs()
455                        << "Found unused invariant_start. Can't evaluate.\n");
456             return false;
457           }
458           ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
459           Value *PtrArg = getVal(II->getArgOperand(1));
460           Value *Ptr = PtrArg->stripPointerCasts();
461           if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
462             Type *ElemTy = GV->getValueType();
463             if (!Size->isMinusOne() &&
464                 Size->getValue().getLimitedValue() >=
465                     DL.getTypeStoreSize(ElemTy)) {
466               Invariants.insert(GV);
467               LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: "
468                                 << *GV << "\n");
469             } else {
470               LLVM_DEBUG(dbgs()
471                          << "Found a global var, but can not treat it as an "
472                             "invariant.\n");
473             }
474           }
475           // Continue even if we do nothing.
476           ++CurInst;
477           continue;
478         } else if (II->getIntrinsicID() == Intrinsic::assume) {
479           LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n");
480           ++CurInst;
481           continue;
482         } else if (II->getIntrinsicID() == Intrinsic::sideeffect) {
483           LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
484           ++CurInst;
485           continue;
486         } else if (II->getIntrinsicID() == Intrinsic::pseudoprobe) {
487           LLVM_DEBUG(dbgs() << "Skipping pseudoprobe intrinsic.\n");
488           ++CurInst;
489           continue;
490         } else {
491           Value *Stripped = CurInst->stripPointerCastsForAliasAnalysis();
492           // Only attempt to getVal() if we've actually managed to strip
493           // anything away, or else we'll call getVal() on the current
494           // instruction.
495           if (Stripped != &*CurInst) {
496             InstResult = getVal(Stripped);
497           }
498           if (InstResult) {
499             LLVM_DEBUG(dbgs()
500                        << "Stripped pointer casts for alias analysis for "
501                           "intrinsic call.\n");
502             StrippedPointerCastsForAliasAnalysis = true;
503             InstResult = ConstantExpr::getBitCast(InstResult, II->getType());
504           } else {
505             LLVM_DEBUG(dbgs() << "Unknown intrinsic. Cannot evaluate.\n");
506             return false;
507           }
508         }
509       }
510 
511       if (!InstResult) {
512         // Resolve function pointers.
513         SmallVector<Constant *, 8> Formals;
514         Function *Callee = getCalleeWithFormalArgs(CB, Formals);
515         if (!Callee || Callee->isInterposable()) {
516           LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n");
517           return false; // Cannot resolve.
518         }
519 
520         if (Callee->isDeclaration()) {
521           // If this is a function we can constant fold, do it.
522           if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
523             InstResult = castCallResultIfNeeded(CB.getType(), C);
524             if (!InstResult)
525               return false;
526             LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
527                               << *InstResult << "\n");
528           } else {
529             LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n");
530             return false;
531           }
532         } else {
533           if (Callee->getFunctionType()->isVarArg()) {
534             LLVM_DEBUG(dbgs()
535                        << "Can not constant fold vararg function call.\n");
536             return false;
537           }
538 
539           Constant *RetVal = nullptr;
540           // Execute the call, if successful, use the return value.
541           ValueStack.emplace_back();
542           if (!EvaluateFunction(Callee, RetVal, Formals)) {
543             LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n");
544             return false;
545           }
546           ValueStack.pop_back();
547           InstResult = castCallResultIfNeeded(CB.getType(), RetVal);
548           if (RetVal && !InstResult)
549             return false;
550 
551           if (InstResult) {
552             LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: "
553                               << *InstResult << "\n\n");
554           } else {
555             LLVM_DEBUG(dbgs()
556                        << "Successfully evaluated function. Result: 0\n\n");
557           }
558         }
559       }
560     } else if (CurInst->isTerminator()) {
561       LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n");
562 
563       if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
564         if (BI->isUnconditional()) {
565           NextBB = BI->getSuccessor(0);
566         } else {
567           ConstantInt *Cond =
568             dyn_cast<ConstantInt>(getVal(BI->getCondition()));
569           if (!Cond) return false;  // Cannot determine.
570 
571           NextBB = BI->getSuccessor(!Cond->getZExtValue());
572         }
573       } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
574         ConstantInt *Val =
575           dyn_cast<ConstantInt>(getVal(SI->getCondition()));
576         if (!Val) return false;  // Cannot determine.
577         NextBB = SI->findCaseValue(Val)->getCaseSuccessor();
578       } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
579         Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
580         if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
581           NextBB = BA->getBasicBlock();
582         else
583           return false;  // Cannot determine.
584       } else if (isa<ReturnInst>(CurInst)) {
585         NextBB = nullptr;
586       } else {
587         // invoke, unwind, resume, unreachable.
588         LLVM_DEBUG(dbgs() << "Can not handle terminator.");
589         return false;  // Cannot handle this terminator.
590       }
591 
592       // We succeeded at evaluating this block!
593       LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n");
594       return true;
595     } else {
596       SmallVector<Constant *> Ops;
597       for (Value *Op : CurInst->operands())
598         Ops.push_back(getVal(Op));
599       InstResult = ConstantFoldInstOperands(&*CurInst, Ops, DL, TLI);
600       if (!InstResult) {
601         LLVM_DEBUG(dbgs() << "Cannot fold instruction: " << *CurInst << "\n");
602         return false;
603       }
604       LLVM_DEBUG(dbgs() << "Folded instruction " << *CurInst << " to "
605                         << *InstResult << "\n");
606     }
607 
608     if (!CurInst->use_empty()) {
609       InstResult = ConstantFoldConstant(InstResult, DL, TLI);
610       setVal(&*CurInst, InstResult);
611     }
612 
613     // If we just processed an invoke, we finished evaluating the block.
614     if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
615       NextBB = II->getNormalDest();
616       LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
617       return true;
618     }
619 
620     // Advance program counter.
621     ++CurInst;
622   }
623 }
624 
625 /// Evaluate a call to function F, returning true if successful, false if we
626 /// can't evaluate it.  ActualArgs contains the formal arguments for the
627 /// function.
628 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
629                                  const SmallVectorImpl<Constant*> &ActualArgs) {
630   assert(ActualArgs.size() == F->arg_size() && "wrong number of arguments");
631 
632   // Check to see if this function is already executing (recursion).  If so,
633   // bail out.  TODO: we might want to accept limited recursion.
634   if (is_contained(CallStack, F))
635     return false;
636 
637   CallStack.push_back(F);
638 
639   // Initialize arguments to the incoming values specified.
640   for (const auto &[ArgNo, Arg] : llvm::enumerate(F->args()))
641     setVal(&Arg, ActualArgs[ArgNo]);
642 
643   // ExecutedBlocks - We only handle non-looping, non-recursive code.  As such,
644   // we can only evaluate any one basic block at most once.  This set keeps
645   // track of what we have executed so we can detect recursive cases etc.
646   SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
647 
648   // CurBB - The current basic block we're evaluating.
649   BasicBlock *CurBB = &F->front();
650 
651   BasicBlock::iterator CurInst = CurBB->begin();
652 
653   while (true) {
654     BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
655     LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
656 
657     bool StrippedPointerCastsForAliasAnalysis = false;
658 
659     if (!EvaluateBlock(CurInst, NextBB, StrippedPointerCastsForAliasAnalysis))
660       return false;
661 
662     if (!NextBB) {
663       // Successfully running until there's no next block means that we found
664       // the return.  Fill it the return value and pop the call stack.
665       ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
666       if (RI->getNumOperands()) {
667         // The Evaluator can look through pointer casts as long as alias
668         // analysis holds because it's just a simple interpreter and doesn't
669         // skip memory accesses due to invariant group metadata, but we can't
670         // let users of Evaluator use a value that's been gleaned looking
671         // through stripping pointer casts.
672         if (StrippedPointerCastsForAliasAnalysis &&
673             !RI->getReturnValue()->getType()->isVoidTy()) {
674           return false;
675         }
676         RetVal = getVal(RI->getOperand(0));
677       }
678       CallStack.pop_back();
679       return true;
680     }
681 
682     // Okay, we succeeded in evaluating this control flow.  See if we have
683     // executed the new block before.  If so, we have a looping function,
684     // which we cannot evaluate in reasonable time.
685     if (!ExecutedBlocks.insert(NextBB).second)
686       return false;  // looped!
687 
688     // Okay, we have never been in this block before.  Check to see if there
689     // are any PHI nodes.  If so, evaluate them with information about where
690     // we came from.
691     PHINode *PN = nullptr;
692     for (CurInst = NextBB->begin();
693          (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
694       setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
695 
696     // Advance to the next block.
697     CurBB = NextBB;
698   }
699 }
700