xref: /llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp (revision dddeec4becabf71d4067080bcc2c09a9e67c3025)
1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 #include <optional>
29 
30 using namespace clang;
31 using namespace ento;
32 
33 #define DEBUG_TYPE "ExprEngine"
34 
35 STATISTIC(NumOfDynamicDispatchPathSplits,
36   "The # of times we split the path due to imprecise dynamic dispatch info");
37 
38 STATISTIC(NumInlinedCalls,
39   "The # of times we inlined a call");
40 
41 STATISTIC(NumReachedInlineCountMax,
42   "The # of times we reached inline count maximum");
43 
44 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
45                                   ExplodedNode *Pred) {
46   // Get the entry block in the CFG of the callee.
47   const StackFrameContext *calleeCtx = CE.getCalleeContext();
48   PrettyStackTraceLocationContext CrashInfo(calleeCtx);
49   const CFGBlock *Entry = CE.getEntry();
50 
51   // Validate the CFG.
52   assert(Entry->empty());
53   assert(Entry->succ_size() == 1);
54 
55   // Get the solitary successor.
56   const CFGBlock *Succ = *(Entry->succ_begin());
57 
58   // Construct an edge representing the starting location in the callee.
59   BlockEdge Loc(Entry, Succ, calleeCtx);
60 
61   ProgramStateRef state = Pred->getState();
62 
63   // Construct a new node, notify checkers that analysis of the function has
64   // begun, and add the resultant nodes to the worklist.
65   bool isNew;
66   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
67   Node->addPredecessor(Pred, G);
68   if (isNew) {
69     ExplodedNodeSet DstBegin;
70     processBeginOfFunction(BC, Node, DstBegin, Loc);
71     Engine.enqueue(DstBegin);
72   }
73 }
74 
75 // Find the last statement on the path to the exploded node and the
76 // corresponding Block.
77 static std::pair<const Stmt*,
78                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
79   const Stmt *S = nullptr;
80   const CFGBlock *Blk = nullptr;
81   const StackFrameContext *SF = Node->getStackFrame();
82 
83   // Back up through the ExplodedGraph until we reach a statement node in this
84   // stack frame.
85   while (Node) {
86     const ProgramPoint &PP = Node->getLocation();
87 
88     if (PP.getStackFrame() == SF) {
89       if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
90         S = SP->getStmt();
91         break;
92       } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
93         S = CEE->getCalleeContext()->getCallSite();
94         if (S)
95           break;
96 
97         // If there is no statement, this is an implicitly-generated call.
98         // We'll walk backwards over it and then continue the loop to find
99         // an actual statement.
100         std::optional<CallEnter> CE;
101         do {
102           Node = Node->getFirstPred();
103           CE = Node->getLocationAs<CallEnter>();
104         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
105 
106         // Continue searching the graph.
107       } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
108         Blk = BE->getSrc();
109       }
110     } else if (std::optional<CallEnter> CE = PP.getAs<CallEnter>()) {
111       // If we reached the CallEnter for this function, it has no statements.
112       if (CE->getCalleeContext() == SF)
113         break;
114     }
115 
116     if (Node->pred_empty())
117       return std::make_pair(nullptr, nullptr);
118 
119     Node = *Node->pred_begin();
120   }
121 
122   return std::make_pair(S, Blk);
123 }
124 
125 /// Adjusts a return value when the called function's return type does not
126 /// match the caller's expression type. This can happen when a dynamic call
127 /// is devirtualized, and the overriding method has a covariant (more specific)
128 /// return type than the parent's method. For C++ objects, this means we need
129 /// to add base casts.
130 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
131                               StoreManager &StoreMgr) {
132   // For now, the only adjustments we handle apply only to locations.
133   if (!isa<Loc>(V))
134     return V;
135 
136   // If the types already match, don't do any unnecessary work.
137   ExpectedTy = ExpectedTy.getCanonicalType();
138   ActualTy = ActualTy.getCanonicalType();
139   if (ExpectedTy == ActualTy)
140     return V;
141 
142   // No adjustment is needed between Objective-C pointer types.
143   if (ExpectedTy->isObjCObjectPointerType() &&
144       ActualTy->isObjCObjectPointerType())
145     return V;
146 
147   // C++ object pointers may need "derived-to-base" casts.
148   const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
149   const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
150   if (ExpectedClass && ActualClass) {
151     CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
152                        /*DetectVirtual=*/false);
153     if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
154         !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
155       return StoreMgr.evalDerivedToBase(V, Paths.front());
156     }
157   }
158 
159   // Unfortunately, Objective-C does not enforce that overridden methods have
160   // covariant return types, so we can't assert that that never happens.
161   // Be safe and return UnknownVal().
162   return UnknownVal();
163 }
164 
165 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
166                                            ExplodedNode *Pred,
167                                            ExplodedNodeSet &Dst) {
168   // Find the last statement in the function and the corresponding basic block.
169   const Stmt *LastSt = nullptr;
170   const CFGBlock *Blk = nullptr;
171   std::tie(LastSt, Blk) = getLastStmt(Pred);
172   if (!Blk || !LastSt) {
173     Dst.Add(Pred);
174     return;
175   }
176 
177   // Here, we destroy the current location context. We use the current
178   // function's entire body as a diagnostic statement, with which the program
179   // point will be associated. However, we only want to use LastStmt as a
180   // reference for what to clean up if it's a ReturnStmt; otherwise, everything
181   // is dead.
182   SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
183   const LocationContext *LCtx = Pred->getLocationContext();
184   removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
185              LCtx->getAnalysisDeclContext()->getBody(),
186              ProgramPoint::PostStmtPurgeDeadSymbolsKind);
187 }
188 
189 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
190     const StackFrameContext *calleeCtx) {
191   const Decl *RuntimeCallee = calleeCtx->getDecl();
192   const Decl *StaticDecl = Call->getDecl();
193   assert(RuntimeCallee);
194   if (!StaticDecl)
195     return true;
196   return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
197 }
198 
199 // Returns the number of elements in the array currently being destructed.
200 // If the element count is not found 0 will be returned.
201 static unsigned getElementCountOfArrayBeingDestructed(
202     const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) {
203   assert(isa<CXXDestructorCall>(Call) &&
204          "The call event is not a destructor call!");
205 
206   const auto &DtorCall = cast<CXXDestructorCall>(Call);
207 
208   auto ThisVal = DtorCall.getCXXThisVal();
209 
210   if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) {
211     auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion();
212     auto ElementType = ThisElementRegion->getElementType();
213 
214     auto ElementCount =
215         getDynamicElementCount(State, ArrayRegion, SVB, ElementType);
216 
217     if (!ElementCount.isConstant())
218       return 0;
219 
220     return ElementCount.getAsInteger()->getLimitedValue();
221   }
222 
223   return 0;
224 }
225 
226 ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation(
227     ProgramStateRef State, const CXXConstructExpr *E,
228     const LocationContext *LCtx) {
229 
230   assert(LCtx && "Location context must be provided!");
231 
232   if (E) {
233     if (getPendingInitLoop(State, E, LCtx))
234       State = removePendingInitLoop(State, E, LCtx);
235 
236     if (getIndexOfElementToConstruct(State, E, LCtx))
237       State = removeIndexOfElementToConstruct(State, E, LCtx);
238   }
239 
240   if (getPendingArrayDestruction(State, LCtx))
241     State = removePendingArrayDestruction(State, LCtx);
242 
243   return State;
244 }
245 
246 /// The call exit is simulated with a sequence of nodes, which occur between
247 /// CallExitBegin and CallExitEnd. The following operations occur between the
248 /// two program points:
249 /// 1. CallExitBegin (triggers the start of call exit sequence)
250 /// 2. Bind the return value
251 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
252 /// 4. CallExitEnd (switch to the caller context)
253 /// 5. PostStmt<CallExpr>
254 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
255   // Step 1 CEBNode was generated before the call.
256   PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
257   const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
258 
259   // The parent context might not be a stack frame, so make sure we
260   // look up the first enclosing stack frame.
261   const StackFrameContext *callerCtx =
262     calleeCtx->getParent()->getStackFrame();
263 
264   const Stmt *CE = calleeCtx->getCallSite();
265   ProgramStateRef state = CEBNode->getState();
266   // Find the last statement in the function and the corresponding basic block.
267   const Stmt *LastSt = nullptr;
268   const CFGBlock *Blk = nullptr;
269   std::tie(LastSt, Blk) = getLastStmt(CEBNode);
270 
271   // Generate a CallEvent /before/ cleaning the state, so that we can get the
272   // correct value for 'this' (if necessary).
273   CallEventManager &CEMgr = getStateManager().getCallEventManager();
274   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
275 
276   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
277 
278   // If this variable is set to 'true' the analyzer will evaluate the call
279   // statement we are about to exit again, instead of continuing the execution
280   // from the statement after the call. This is useful for non-POD type array
281   // construction where the CXXConstructExpr is referenced only once in the CFG,
282   // but we want to evaluate it as many times as many elements the array has.
283   bool ShouldRepeatCall = false;
284 
285   if (const auto *DtorDecl =
286           dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) {
287     if (auto Idx = getPendingArrayDestruction(state, callerCtx)) {
288       ShouldRepeatCall = *Idx > 0;
289 
290       auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx);
291       state = state->killBinding(ThisVal);
292     }
293   }
294 
295   // If the callee returns an expression, bind its value to CallExpr.
296   if (CE) {
297     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
298       const LocationContext *LCtx = CEBNode->getLocationContext();
299       SVal V = state->getSVal(RS, LCtx);
300 
301       // Ensure that the return type matches the type of the returned Expr.
302       if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
303         QualType ReturnedTy =
304           CallEvent::getDeclaredResultType(calleeCtx->getDecl());
305         if (!ReturnedTy.isNull()) {
306           if (const Expr *Ex = dyn_cast<Expr>(CE)) {
307             V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
308                                   getStoreManager());
309           }
310         }
311       }
312 
313       state = state->BindExpr(CE, callerCtx, V);
314     }
315 
316     // Bind the constructed object value to CXXConstructExpr.
317     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
318       loc::MemRegionVal This =
319         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
320       SVal ThisV = state->getSVal(This);
321       ThisV = state->getSVal(ThisV.castAs<Loc>());
322       state = state->BindExpr(CCE, callerCtx, ThisV);
323 
324       ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
325     }
326 
327     if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
328       // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
329       // while to reach the actual CXXNewExpr element from here, so keep the
330       // region for later use.
331       // Additionally cast the return value of the inlined operator new
332       // (which is of type 'void *') to the correct object type.
333       SVal AllocV = state->getSVal(CNE, callerCtx);
334       AllocV = svalBuilder.evalCast(
335           AllocV, CNE->getType(),
336           getContext().getPointerType(getContext().VoidTy));
337 
338       state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
339                                          AllocV);
340     }
341   }
342 
343   if (!ShouldRepeatCall) {
344     state = removeStateTraitsUsedForArrayEvaluation(
345         state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx);
346   }
347 
348   // Step 3: BindedRetNode -> CleanedNodes
349   // If we can find a statement and a block in the inlined function, run remove
350   // dead bindings before returning from the call. This is important to ensure
351   // that we report the issues such as leaks in the stack contexts in which
352   // they occurred.
353   ExplodedNodeSet CleanedNodes;
354   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
355     static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
356     auto Loc = isa<ReturnStmt>(LastSt)
357                    ? ProgramPoint{PostStmt(LastSt, calleeCtx, &retValBind)}
358                    : ProgramPoint{EpsilonPoint(calleeCtx, /*Data1=*/nullptr,
359                                                /*Data2=*/nullptr, &retValBind)};
360     const CFGBlock *PrePurgeBlock =
361         isa<ReturnStmt>(LastSt) ? Blk : &CEBNode->getCFG().getExit();
362     bool isNew;
363     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
364     BindedRetNode->addPredecessor(CEBNode, G);
365     if (!isNew)
366       return;
367 
368     NodeBuilderContext Ctx(getCoreEngine(), PrePurgeBlock, BindedRetNode);
369     currBldrCtx = &Ctx;
370     // Here, we call the Symbol Reaper with 0 statement and callee location
371     // context, telling it to clean up everything in the callee's context
372     // (and its children). We use the callee's function body as a diagnostic
373     // statement, with which the program point will be associated.
374     removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
375                calleeCtx->getAnalysisDeclContext()->getBody(),
376                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
377     currBldrCtx = nullptr;
378   } else {
379     CleanedNodes.Add(CEBNode);
380   }
381 
382   for (ExplodedNode *N : CleanedNodes) {
383     // Step 4: Generate the CallExit and leave the callee's context.
384     // CleanedNodes -> CEENode
385     CallExitEnd Loc(calleeCtx, callerCtx);
386     bool isNew;
387     ProgramStateRef CEEState = (N == CEBNode) ? state : N->getState();
388 
389     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
390     CEENode->addPredecessor(N, G);
391     if (!isNew)
392       return;
393 
394     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
395     // result onto the work list.
396     // CEENode -> Dst -> WorkList
397     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
398     SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx);
399     SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex());
400 
401     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
402 
403     ExplodedNodeSet DstPostCall;
404     if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
405       ExplodedNodeSet DstPostPostCallCallback;
406       getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
407                                                  CEENode, *UpdatedCall, *this,
408                                                  /*wasInlined=*/true);
409       for (ExplodedNode *I : DstPostPostCallCallback) {
410         getCheckerManager().runCheckersForNewAllocator(
411             cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
412             /*wasInlined=*/true);
413       }
414     } else {
415       getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
416                                                  *UpdatedCall, *this,
417                                                  /*wasInlined=*/true);
418     }
419     ExplodedNodeSet Dst;
420     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
421       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
422                                                         *this,
423                                                         /*wasInlined=*/true);
424     } else if (CE &&
425                !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
426                  AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
427       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
428                                                  *this, /*wasInlined=*/true);
429     } else {
430       Dst.insert(DstPostCall);
431     }
432 
433     // Enqueue the next element in the block.
434     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
435          PSI != PSE; ++PSI) {
436       unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
437 
438       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
439     }
440   }
441 }
442 
443 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
444   // When there are no branches in the function, it means that there's no
445   // exponential complexity introduced by inlining such function.
446   // Such functions also don't trigger various fundamental problems
447   // with our inlining mechanism, such as the problem of
448   // inlined defensive checks. Hence isLinear().
449   const CFG *Cfg = ADC->getCFG();
450   return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
451 }
452 
453 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
454   const CFG *Cfg = ADC->getCFG();
455   return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
456 }
457 
458 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
459   const CFG *Cfg = ADC->getCFG();
460   return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
461 }
462 
463 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
464                                bool &IsRecursive, unsigned &StackDepth) {
465   IsRecursive = false;
466   StackDepth = 0;
467 
468   while (LCtx) {
469     if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
470       const Decl *DI = SFC->getDecl();
471 
472       // Mark recursive (and mutually recursive) functions and always count
473       // them when measuring the stack depth.
474       if (DI == D) {
475         IsRecursive = true;
476         ++StackDepth;
477         LCtx = LCtx->getParent();
478         continue;
479       }
480 
481       // Do not count the small functions when determining the stack depth.
482       AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
483       if (!isSmall(CalleeADC))
484         ++StackDepth;
485     }
486     LCtx = LCtx->getParent();
487   }
488 }
489 
490 // The GDM component containing the dynamic dispatch bifurcation info. When
491 // the exact type of the receiver is not known, we want to explore both paths -
492 // one on which we do inline it and the other one on which we don't. This is
493 // done to ensure we do not drop coverage.
494 // This is the map from the receiver region to a bool, specifying either we
495 // consider this region's information precise or not along the given path.
496 namespace {
497   enum DynamicDispatchMode {
498     DynamicDispatchModeInlined = 1,
499     DynamicDispatchModeConservative
500   };
501 } // end anonymous namespace
502 
503 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
504                                const MemRegion *, unsigned)
505 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
506 
507 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
508                               NodeBuilder &Bldr, ExplodedNode *Pred,
509                               ProgramStateRef State) {
510   ProgramStateRef ConservativeEvalState = nullptr;
511   if (Call.isForeign() && !isSecondPhaseCTU()) {
512     const auto IK = AMgr.options.getCTUPhase1Inlining();
513     const bool DoInline = IK == CTUPhase1InliningKind::All ||
514                           (IK == CTUPhase1InliningKind::Small &&
515                            isSmall(AMgr.getAnalysisDeclContext(D)));
516     if (DoInline) {
517       inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
518       return;
519     }
520     const bool BState = State->get<CTUDispatchBifurcation>();
521     if (!BState) { // This is the first time we see this foreign function.
522       // Enqueue it to be analyzed in the second (ctu) phase.
523       inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
524       // Conservatively evaluate in the first phase.
525       ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
526       conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
527     } else {
528       conservativeEvalCall(Call, Bldr, Pred, State);
529     }
530     return;
531   }
532   inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
533 }
534 
535 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
536                             const Decl *D, NodeBuilder &Bldr,
537                             ExplodedNode *Pred, ProgramStateRef State) {
538   assert(D);
539 
540   const LocationContext *CurLC = Pred->getLocationContext();
541   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
542   const LocationContext *ParentOfCallee = CallerSFC;
543   if (Call.getKind() == CE_Block &&
544       !cast<BlockCall>(Call).isConversionFromLambda()) {
545     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
546     assert(BR && "If we have the block definition we should have its region");
547     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
548     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
549                                                          cast<BlockDecl>(D),
550                                                          BR);
551   }
552 
553   // This may be NULL, but that's fine.
554   const Expr *CallE = Call.getOriginExpr();
555 
556   // Construct a new stack frame for the callee.
557   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
558   const StackFrameContext *CalleeSFC =
559       CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
560                                currBldrCtx->blockCount(), currStmtIdx);
561 
562   CallEnter Loc(CallE, CalleeSFC, CurLC);
563 
564   // Construct a new state which contains the mapping from actual to
565   // formal arguments.
566   State = State->enterStackFrame(Call, CalleeSFC);
567 
568   bool isNew;
569   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
570     N->addPredecessor(Pred, G);
571     if (isNew)
572       WList->enqueue(N);
573   }
574 
575   // If we decided to inline the call, the successor has been manually
576   // added onto the work list so remove it from the node builder.
577   Bldr.takeNodes(Pred);
578 
579   NumInlinedCalls++;
580   Engine.FunctionSummaries->bumpNumTimesInlined(D);
581 
582   // Do not mark as visited in the 2nd run (CTUWList), so the function will
583   // be visited as top-level, this way we won't loose reports in non-ctu
584   // mode. Considering the case when a function in a foreign TU calls back
585   // into the main TU.
586   // Note, during the 1st run, it doesn't matter if we mark the foreign
587   // functions as visited (or not) because they can never appear as a top level
588   // function in the main TU.
589   if (!isSecondPhaseCTU())
590     // Mark the decl as visited.
591     if (VisitedCallees)
592       VisitedCallees->insert(D);
593 }
594 
595 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
596                                             const Stmt *CallE) {
597   const void *ReplayState = State->get<ReplayWithoutInlining>();
598   if (!ReplayState)
599     return nullptr;
600 
601   assert(ReplayState == CallE && "Backtracked to the wrong call.");
602   (void)CallE;
603 
604   return State->remove<ReplayWithoutInlining>();
605 }
606 
607 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
608                                ExplodedNodeSet &dst) {
609   // Perform the previsit of the CallExpr.
610   ExplodedNodeSet dstPreVisit;
611   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
612 
613   // Get the call in its initial state. We use this as a template to perform
614   // all the checks.
615   CallEventManager &CEMgr = getStateManager().getCallEventManager();
616   CallEventRef<> CallTemplate = CEMgr.getSimpleCall(
617       CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
618 
619   // Evaluate the function call.  We try each of the checkers
620   // to see if the can evaluate the function call.
621   ExplodedNodeSet dstCallEvaluated;
622   for (ExplodedNode *N : dstPreVisit) {
623     evalCall(dstCallEvaluated, N, *CallTemplate);
624   }
625 
626   // Finally, perform the post-condition check of the CallExpr and store
627   // the created nodes in 'Dst'.
628   // Note that if the call was inlined, dstCallEvaluated will be empty.
629   // The post-CallExpr check will occur in processCallExit.
630   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
631                                              *this);
632 }
633 
634 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
635                                                        const CallEvent &Call) {
636   const Expr *E = Call.getOriginExpr();
637   // FIXME: Constructors to placement arguments of operator new
638   // are not supported yet.
639   if (!E || isa<CXXNewExpr>(E))
640     return State;
641 
642   const LocationContext *LC = Call.getLocationContext();
643   for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
644     unsigned I = Call.getASTArgumentIndex(CallI);
645     if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) {
646       SVal VV = *V;
647       (void)VV;
648       assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
649                  ->getStackFrame()->getParent()
650                  ->getStackFrame() == LC->getStackFrame());
651       State = finishObjectConstruction(State, {E, I}, LC);
652     }
653   }
654 
655   return State;
656 }
657 
658 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
659                                             ExplodedNode *Pred,
660                                             const CallEvent &Call) {
661   ProgramStateRef State = Pred->getState();
662   ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
663   if (CleanedState == State) {
664     Dst.insert(Pred);
665     return;
666   }
667 
668   const Expr *E = Call.getOriginExpr();
669   const LocationContext *LC = Call.getLocationContext();
670   NodeBuilder B(Pred, Dst, *currBldrCtx);
671   static SimpleProgramPointTag Tag("ExprEngine",
672                                    "Finish argument construction");
673   PreStmt PP(E, LC, &Tag);
674   B.generateNode(PP, CleanedState, Pred);
675 }
676 
677 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
678                           const CallEvent &Call) {
679   // WARNING: At this time, the state attached to 'Call' may be older than the
680   // state in 'Pred'. This is a minor optimization since CheckerManager will
681   // use an updated CallEvent instance when calling checkers, but if 'Call' is
682   // ever used directly in this function all callers should be updated to pass
683   // the most recent state. (It is probably not worth doing the work here since
684   // for some callers this will not be necessary.)
685 
686   // Run any pre-call checks using the generic call interface.
687   ExplodedNodeSet dstPreVisit;
688   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
689                                             Call, *this);
690 
691   // Actually evaluate the function call.  We try each of the checkers
692   // to see if the can evaluate the function call, and get a callback at
693   // defaultEvalCall if all of them fail.
694   ExplodedNodeSet dstCallEvaluated;
695   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
696                                              Call, *this, EvalCallOptions());
697 
698   // If there were other constructors called for object-type arguments
699   // of this call, clean them up.
700   ExplodedNodeSet dstArgumentCleanup;
701   for (ExplodedNode *I : dstCallEvaluated)
702     finishArgumentConstruction(dstArgumentCleanup, I, Call);
703 
704   ExplodedNodeSet dstPostCall;
705   getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
706                                              Call, *this);
707 
708   // Escaping symbols conjured during invalidating the regions above.
709   // Note that, for inlined calls the nodes were put back into the worklist,
710   // so we can assume that every node belongs to a conservative call at this
711   // point.
712 
713   // Run pointerEscape callback with the newly conjured symbols.
714   SmallVector<std::pair<SVal, SVal>, 8> Escaped;
715   for (ExplodedNode *I : dstPostCall) {
716     NodeBuilder B(I, Dst, *currBldrCtx);
717     ProgramStateRef State = I->getState();
718     Escaped.clear();
719     {
720       unsigned Arg = -1;
721       for (const ParmVarDecl *PVD : Call.parameters()) {
722         ++Arg;
723         QualType ParamTy = PVD->getType();
724         if (ParamTy.isNull() ||
725             (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
726           continue;
727         QualType Pointee = ParamTy->getPointeeType();
728         if (Pointee.isConstQualified() || Pointee->isVoidType())
729           continue;
730         if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
731           Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
732       }
733     }
734 
735     State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
736                                         PSK_EscapeOutParameters, &Call);
737 
738     if (State == I->getState())
739       Dst.insert(I);
740     else
741       B.generateNode(I->getLocation(), State, I);
742   }
743 }
744 
745 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
746                                             const LocationContext *LCtx,
747                                             ProgramStateRef State) {
748   const Expr *E = Call.getOriginExpr();
749   if (!E)
750     return State;
751 
752   // Some method families have known return values.
753   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
754     switch (Msg->getMethodFamily()) {
755     default:
756       break;
757     case OMF_autorelease:
758     case OMF_retain:
759     case OMF_self: {
760       // These methods return their receivers.
761       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
762     }
763     }
764   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
765     SVal ThisV = C->getCXXThisVal();
766     ThisV = State->getSVal(ThisV.castAs<Loc>());
767     return State->BindExpr(E, LCtx, ThisV);
768   }
769 
770   SVal R;
771   QualType ResultTy = Call.getResultType();
772   unsigned Count = currBldrCtx->blockCount();
773   if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
774     // Conjure a temporary if the function returns an object by value.
775     SVal Target;
776     assert(RTC->getStmt() == Call.getOriginExpr());
777     EvalCallOptions CallOpts; // FIXME: We won't really need those.
778     std::tie(State, Target) = handleConstructionContext(
779         Call.getOriginExpr(), State, currBldrCtx, LCtx,
780         RTC->getConstructionContext(), CallOpts);
781     const MemRegion *TargetR = Target.getAsRegion();
782     assert(TargetR);
783     // Invalidate the region so that it didn't look uninitialized. If this is
784     // a field or element constructor, we do not want to invalidate
785     // the whole structure. Pointer escape is meaningless because
786     // the structure is a product of conservative evaluation
787     // and therefore contains nothing interesting at this point.
788     RegionAndSymbolInvalidationTraits ITraits;
789     ITraits.setTrait(TargetR,
790         RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
791     State = State->invalidateRegions(TargetR, E, Count, LCtx,
792                                      /* CausesPointerEscape=*/false, nullptr,
793                                      &Call, &ITraits);
794 
795     R = State->getSVal(Target.castAs<Loc>(), E->getType());
796   } else {
797     // Conjure a symbol if the return value is unknown.
798 
799     // See if we need to conjure a heap pointer instead of
800     // a regular unknown pointer.
801     const auto *CNE = dyn_cast<CXXNewExpr>(E);
802     if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
803       R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
804       const MemRegion *MR = R.getAsRegion()->StripCasts();
805 
806       // Store the extent of the allocated object(s).
807       SVal ElementCount;
808       if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
809         ElementCount = State->getSVal(SizeExpr, LCtx);
810       } else {
811         ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
812       }
813 
814       SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
815 
816       SVal Size =
817           svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
818                                 svalBuilder.getArrayIndexType());
819 
820       // FIXME: This line is to prevent a crash. For more details please check
821       // issue #56264.
822       if (Size.isUndef())
823         Size = UnknownVal();
824 
825       State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>());
826     } else {
827       R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
828     }
829   }
830   return State->BindExpr(E, LCtx, R);
831 }
832 
833 // Conservatively evaluate call by invalidating regions and binding
834 // a conjured return value.
835 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
836                                       ExplodedNode *Pred, ProgramStateRef State) {
837   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
838   State = bindReturnValue(Call, Pred->getLocationContext(), State);
839 
840   // And make the result node.
841   static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call");
842   Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred);
843 }
844 
845 ExprEngine::CallInlinePolicy
846 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
847                               AnalyzerOptions &Opts,
848                               const EvalCallOptions &CallOpts) {
849   const LocationContext *CurLC = Pred->getLocationContext();
850   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
851   switch (Call.getKind()) {
852   case CE_Function:
853   case CE_CXXStaticOperator:
854   case CE_Block:
855     break;
856   case CE_CXXMember:
857   case CE_CXXMemberOperator:
858     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
859       return CIP_DisallowedAlways;
860     break;
861   case CE_CXXConstructor: {
862     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
863       return CIP_DisallowedAlways;
864 
865     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
866 
867     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
868 
869     auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
870     const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
871                                         : nullptr;
872 
873     if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
874         !Opts.MayInlineCXXAllocator)
875       return CIP_DisallowedOnce;
876 
877     if (CallOpts.IsArrayCtorOrDtor) {
878       if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
879         return CIP_DisallowedOnce;
880     }
881 
882     // Inlining constructors requires including initializers in the CFG.
883     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
884     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
885     (void)ADC;
886 
887     // If the destructor is trivial, it's always safe to inline the constructor.
888     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
889       break;
890 
891     // For other types, only inline constructors if destructor inlining is
892     // also enabled.
893     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
894       return CIP_DisallowedAlways;
895 
896     if (CtorExpr->getConstructionKind() == CXXConstructionKind::Complete) {
897       // If we don't handle temporary destructors, we shouldn't inline
898       // their constructors.
899       if (CallOpts.IsTemporaryCtorOrDtor &&
900           !Opts.ShouldIncludeTemporaryDtorsInCFG)
901         return CIP_DisallowedOnce;
902 
903       // If we did not find the correct this-region, it would be pointless
904       // to inline the constructor. Instead we will simply invalidate
905       // the fake temporary target.
906       if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
907         return CIP_DisallowedOnce;
908 
909       // If the temporary is lifetime-extended by binding it to a reference-type
910       // field within an aggregate, automatic destructors don't work properly.
911       if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
912         return CIP_DisallowedOnce;
913     }
914 
915     break;
916   }
917   case CE_CXXInheritedConstructor: {
918     // This doesn't really increase the cost of inlining ever, because
919     // the stack frame of the inherited constructor is trivial.
920     return CIP_Allowed;
921   }
922   case CE_CXXDestructor: {
923     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
924       return CIP_DisallowedAlways;
925 
926     // Inlining destructors requires building the CFG correctly.
927     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
928     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
929     (void)ADC;
930 
931     if (CallOpts.IsArrayCtorOrDtor) {
932       if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed(
933               Call, Pred->getState(), svalBuilder))) {
934         return CIP_DisallowedOnce;
935       }
936     }
937 
938     // Allow disabling temporary destructor inlining with a separate option.
939     if (CallOpts.IsTemporaryCtorOrDtor &&
940         !Opts.MayInlineCXXTemporaryDtors)
941       return CIP_DisallowedOnce;
942 
943     // If we did not find the correct this-region, it would be pointless
944     // to inline the destructor. Instead we will simply invalidate
945     // the fake temporary target.
946     if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
947       return CIP_DisallowedOnce;
948     break;
949   }
950   case CE_CXXDeallocator:
951     [[fallthrough]];
952   case CE_CXXAllocator:
953     if (Opts.MayInlineCXXAllocator)
954       break;
955     // Do not inline allocators until we model deallocators.
956     // This is unfortunate, but basically necessary for smart pointers and such.
957     return CIP_DisallowedAlways;
958   case CE_ObjCMessage:
959     if (!Opts.MayInlineObjCMethod)
960       return CIP_DisallowedAlways;
961     if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
962           Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
963       return CIP_DisallowedAlways;
964     break;
965   }
966 
967   return CIP_Allowed;
968 }
969 
970 /// Returns true if the given C++ class contains a member with the given name.
971 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
972                       StringRef Name) {
973   const IdentifierInfo &II = Ctx.Idents.get(Name);
974   return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
975 }
976 
977 /// Returns true if the given C++ class is a container or iterator.
978 ///
979 /// Our heuristic for this is whether it contains a method named 'begin()' or a
980 /// nested type named 'iterator' or 'iterator_category'.
981 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
982   return hasMember(Ctx, RD, "begin") ||
983          hasMember(Ctx, RD, "iterator") ||
984          hasMember(Ctx, RD, "iterator_category");
985 }
986 
987 /// Returns true if the given function refers to a method of a C++ container
988 /// or iterator.
989 ///
990 /// We generally do a poor job modeling most containers right now, and might
991 /// prefer not to inline their methods.
992 static bool isContainerMethod(const ASTContext &Ctx,
993                               const FunctionDecl *FD) {
994   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
995     return isContainerClass(Ctx, MD->getParent());
996   return false;
997 }
998 
999 /// Returns true if the given function is the destructor of a class named
1000 /// "shared_ptr".
1001 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
1002   const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
1003   if (!Dtor)
1004     return false;
1005 
1006   const CXXRecordDecl *RD = Dtor->getParent();
1007   if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
1008     if (II->isStr("shared_ptr"))
1009         return true;
1010 
1011   return false;
1012 }
1013 
1014 /// Returns true if the function in \p CalleeADC may be inlined in general.
1015 ///
1016 /// This checks static properties of the function, such as its signature and
1017 /// CFG, to determine whether the analyzer should ever consider inlining it,
1018 /// in any context.
1019 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
1020   AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
1021   // FIXME: Do not inline variadic calls.
1022   if (CallEvent::isVariadic(CalleeADC->getDecl()))
1023     return false;
1024 
1025   // Check certain C++-related inlining policies.
1026   ASTContext &Ctx = CalleeADC->getASTContext();
1027   if (Ctx.getLangOpts().CPlusPlus) {
1028     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
1029       // Conditionally control the inlining of template functions.
1030       if (!Opts.MayInlineTemplateFunctions)
1031         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
1032           return false;
1033 
1034       // Conditionally control the inlining of C++ standard library functions.
1035       if (!Opts.MayInlineCXXStandardLibrary)
1036         if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
1037           if (AnalysisDeclContext::isInStdNamespace(FD))
1038             return false;
1039 
1040       // Conditionally control the inlining of methods on objects that look
1041       // like C++ containers.
1042       if (!Opts.MayInlineCXXContainerMethods)
1043         if (!AMgr.isInCodeFile(FD->getLocation()))
1044           if (isContainerMethod(Ctx, FD))
1045             return false;
1046 
1047       // Conditionally control the inlining of the destructor of C++ shared_ptr.
1048       // We don't currently do a good job modeling shared_ptr because we can't
1049       // see the reference count, so treating as opaque is probably the best
1050       // idea.
1051       if (!Opts.MayInlineCXXSharedPtrDtor)
1052         if (isCXXSharedPtrDtor(FD))
1053           return false;
1054     }
1055   }
1056 
1057   // It is possible that the CFG cannot be constructed.
1058   // Be safe, and check if the CalleeCFG is valid.
1059   const CFG *CalleeCFG = CalleeADC->getCFG();
1060   if (!CalleeCFG)
1061     return false;
1062 
1063   // Do not inline large functions.
1064   if (isHuge(CalleeADC))
1065     return false;
1066 
1067   // It is possible that the live variables analysis cannot be
1068   // run.  If so, bail out.
1069   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
1070     return false;
1071 
1072   return true;
1073 }
1074 
1075 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
1076                                   const ExplodedNode *Pred,
1077                                   const EvalCallOptions &CallOpts) {
1078   if (!D)
1079     return false;
1080 
1081   AnalysisManager &AMgr = getAnalysisManager();
1082   AnalyzerOptions &Opts = AMgr.options;
1083   AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1084   AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1085 
1086   // The auto-synthesized bodies are essential to inline as they are
1087   // usually small and commonly used. Note: we should do this check early on to
1088   // ensure we always inline these calls.
1089   if (CalleeADC->isBodyAutosynthesized())
1090     return true;
1091 
1092   if (!AMgr.shouldInlineCall())
1093     return false;
1094 
1095   // Check if this function has been marked as non-inlinable.
1096   std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1097   if (MayInline) {
1098     if (!*MayInline)
1099       return false;
1100 
1101   } else {
1102     // We haven't actually checked the static properties of this function yet.
1103     // Do that now, and record our decision in the function summaries.
1104     if (mayInlineDecl(CalleeADC)) {
1105       Engine.FunctionSummaries->markMayInline(D);
1106     } else {
1107       Engine.FunctionSummaries->markShouldNotInline(D);
1108       return false;
1109     }
1110   }
1111 
1112   // Check if we should inline a call based on its kind.
1113   // FIXME: this checks both static and dynamic properties of the call, which
1114   // means we're redoing a bit of work that could be cached in the function
1115   // summary.
1116   CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1117   if (CIP != CIP_Allowed) {
1118     if (CIP == CIP_DisallowedAlways) {
1119       assert(!MayInline || *MayInline);
1120       Engine.FunctionSummaries->markShouldNotInline(D);
1121     }
1122     return false;
1123   }
1124 
1125   // Do not inline if recursive or we've reached max stack frame count.
1126   bool IsRecursive = false;
1127   unsigned StackDepth = 0;
1128   examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1129   if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1130       (!isSmall(CalleeADC) || IsRecursive))
1131     return false;
1132 
1133   // Do not inline large functions too many times.
1134   if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1135        Opts.MaxTimesInlineLarge) &&
1136       isLarge(CalleeADC)) {
1137     NumReachedInlineCountMax++;
1138     return false;
1139   }
1140 
1141   if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1142     return false;
1143 
1144   return true;
1145 }
1146 
1147 bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
1148                                                const CXXConstructExpr *CE,
1149                                                const LocationContext *LCtx) {
1150   if (!CE)
1151     return false;
1152 
1153   // FIXME: Handle other arrays types.
1154   if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) {
1155     unsigned ArrSize = getContext().getConstantArrayElementCount(CAT);
1156 
1157     // This might seem conter-intuitive at first glance, but the functions are
1158     // closely related. Reasoning about destructors depends only on the type
1159     // of the expression that initialized the memory region, which is the
1160     // CXXConstructExpr. So to avoid code repetition, the work is delegated
1161     // to the function that reasons about destructor inlining. Also note that
1162     // if the constructors of the array elements are inlined, the destructors
1163     // can also be inlined and if the destructors can be inline, it's safe to
1164     // inline the constructors.
1165     return shouldInlineArrayDestruction(ArrSize);
1166   }
1167 
1168   // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
1169   if (auto Size = getPendingInitLoop(State, CE, LCtx))
1170     return shouldInlineArrayDestruction(*Size);
1171 
1172   return false;
1173 }
1174 
1175 bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) {
1176 
1177   uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath;
1178 
1179   // Declaring a 0 element array is also possible.
1180   return Size <= maxAllowedSize && Size > 0;
1181 }
1182 
1183 bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
1184                                       const CXXConstructExpr *E,
1185                                       const LocationContext *LCtx) {
1186 
1187   if (!E)
1188     return false;
1189 
1190   auto Ty = E->getType();
1191 
1192   // FIXME: Handle non constant array types
1193   if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
1194     unsigned Size = getContext().getConstantArrayElementCount(CAT);
1195     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1196   }
1197 
1198   if (auto Size = getPendingInitLoop(State, E, LCtx))
1199     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1200 
1201   return false;
1202 }
1203 
1204 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1205   const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1206   if (!ICall)
1207     return false;
1208 
1209   const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1210   if (!MD)
1211     return false;
1212   if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1213     return false;
1214 
1215   return MD->isTrivial();
1216 }
1217 
1218 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1219                                  const CallEvent &CallTemplate,
1220                                  const EvalCallOptions &CallOpts) {
1221   // Make sure we have the most recent state attached to the call.
1222   ProgramStateRef State = Pred->getState();
1223   CallEventRef<> Call = CallTemplate.cloneWithState(State);
1224 
1225   // Special-case trivial assignment operators.
1226   if (isTrivialObjectAssignment(*Call)) {
1227     performTrivialCopy(Bldr, Pred, *Call);
1228     return;
1229   }
1230 
1231   // Try to inline the call.
1232   // The origin expression here is just used as a kind of checksum;
1233   // this should still be safe even for CallEvents that don't come from exprs.
1234   const Expr *E = Call->getOriginExpr();
1235 
1236   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1237   if (InlinedFailedState) {
1238     // If we already tried once and failed, make sure we don't retry later.
1239     State = InlinedFailedState;
1240   } else {
1241     RuntimeDefinition RD = Call->getRuntimeDefinition();
1242     Call->setForeign(RD.isForeign());
1243     const Decl *D = RD.getDecl();
1244     if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1245       if (RD.mayHaveOtherDefinitions()) {
1246         AnalyzerOptions &Options = getAnalysisManager().options;
1247 
1248         // Explore with and without inlining the call.
1249         if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1250           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1251           return;
1252         }
1253 
1254         // Don't inline if we're not in any dynamic dispatch mode.
1255         if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1256           conservativeEvalCall(*Call, Bldr, Pred, State);
1257           return;
1258         }
1259       }
1260       ctuBifurcate(*Call, D, Bldr, Pred, State);
1261       return;
1262     }
1263   }
1264 
1265   // If we can't inline it, clean up the state traits used only if the function
1266   // is inlined.
1267   State = removeStateTraitsUsedForArrayEvaluation(
1268       State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
1269 
1270   // Also handle the return value and invalidate the regions.
1271   conservativeEvalCall(*Call, Bldr, Pred, State);
1272 }
1273 
1274 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1275                                const CallEvent &Call, const Decl *D,
1276                                NodeBuilder &Bldr, ExplodedNode *Pred) {
1277   assert(BifurReg);
1278   BifurReg = BifurReg->StripCasts();
1279 
1280   // Check if we've performed the split already - note, we only want
1281   // to split the path once per memory region.
1282   ProgramStateRef State = Pred->getState();
1283   const unsigned *BState =
1284                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
1285   if (BState) {
1286     // If we are on "inline path", keep inlining if possible.
1287     if (*BState == DynamicDispatchModeInlined)
1288       ctuBifurcate(Call, D, Bldr, Pred, State);
1289     // If inline failed, or we are on the path where we assume we
1290     // don't have enough info about the receiver to inline, conjure the
1291     // return value and invalidate the regions.
1292     conservativeEvalCall(Call, Bldr, Pred, State);
1293     return;
1294   }
1295 
1296   // If we got here, this is the first time we process a message to this
1297   // region, so split the path.
1298   ProgramStateRef IState =
1299       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1300                                                DynamicDispatchModeInlined);
1301   ctuBifurcate(Call, D, Bldr, Pred, IState);
1302 
1303   ProgramStateRef NoIState =
1304       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1305                                                DynamicDispatchModeConservative);
1306   conservativeEvalCall(Call, Bldr, Pred, NoIState);
1307 
1308   NumOfDynamicDispatchPathSplits++;
1309 }
1310 
1311 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1312                                  ExplodedNodeSet &Dst) {
1313   ExplodedNodeSet dstPreVisit;
1314   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1315 
1316   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1317 
1318   if (RS->getRetValue()) {
1319     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1320                                   ei = dstPreVisit.end(); it != ei; ++it) {
1321       B.generateNode(RS, *it, (*it)->getState());
1322     }
1323   }
1324 }
1325