/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file defines ExprEngine's support for calls and returns. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "PrettyStackTraceLocationContext.h" |
14 | | #include "clang/AST/CXXInheritance.h" |
15 | | #include "clang/AST/Decl.h" |
16 | | #include "clang/AST/DeclCXX.h" |
17 | | #include "clang/Analysis/Analyses/LiveVariables.h" |
18 | | #include "clang/Analysis/ConstructionContext.h" |
19 | | #include "clang/StaticAnalyzer/Core/CheckerManager.h" |
20 | | #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" |
21 | | #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h" |
22 | | #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" |
23 | | #include "llvm/ADT/SmallSet.h" |
24 | | #include "llvm/ADT/Statistic.h" |
25 | | #include "llvm/Support/Casting.h" |
26 | | #include "llvm/Support/Compiler.h" |
27 | | #include "llvm/Support/SaveAndRestore.h" |
28 | | #include <optional> |
29 | | |
30 | | using namespace clang; |
31 | | using namespace ento; |
32 | | |
33 | | #define DEBUG_TYPE "ExprEngine" |
34 | | |
35 | | STATISTIC(NumOfDynamicDispatchPathSplits, |
36 | | "The # of times we split the path due to imprecise dynamic dispatch info"); |
37 | | |
38 | | STATISTIC(NumInlinedCalls, |
39 | | "The # of times we inlined a call"); |
40 | | |
41 | | STATISTIC(NumReachedInlineCountMax, |
42 | | "The # of times we reached inline count maximum"); |
43 | | |
44 | | void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE, |
45 | 35.1k | ExplodedNode *Pred) { |
46 | | // Get the entry block in the CFG of the callee. |
47 | 35.1k | const StackFrameContext *calleeCtx = CE.getCalleeContext(); |
48 | 35.1k | PrettyStackTraceLocationContext CrashInfo(calleeCtx); |
49 | 35.1k | const CFGBlock *Entry = CE.getEntry(); |
50 | | |
51 | | // Validate the CFG. |
52 | 35.1k | assert(Entry->empty()); |
53 | 35.1k | assert(Entry->succ_size() == 1); |
54 | | |
55 | | // Get the solitary successor. |
56 | 35.1k | const CFGBlock *Succ = *(Entry->succ_begin()); |
57 | | |
58 | | // Construct an edge representing the starting location in the callee. |
59 | 35.1k | BlockEdge Loc(Entry, Succ, calleeCtx); |
60 | | |
61 | 35.1k | ProgramStateRef state = Pred->getState(); |
62 | | |
63 | | // Construct a new node, notify checkers that analysis of the function has |
64 | | // begun, and add the resultant nodes to the worklist. |
65 | 35.1k | bool isNew; |
66 | 35.1k | ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); |
67 | 35.1k | Node->addPredecessor(Pred, G); |
68 | 35.1k | if (isNew) { |
69 | 35.1k | ExplodedNodeSet DstBegin; |
70 | 35.1k | processBeginOfFunction(BC, Node, DstBegin, Loc); |
71 | 35.1k | Engine.enqueue(DstBegin); |
72 | 35.1k | } |
73 | 35.1k | } |
74 | | |
75 | | // Find the last statement on the path to the exploded node and the |
76 | | // corresponding Block. |
77 | | static std::pair<const Stmt*, |
78 | 67.8k | const CFGBlock*> getLastStmt(const ExplodedNode *Node) { |
79 | 67.8k | const Stmt *S = nullptr; |
80 | 67.8k | const CFGBlock *Blk = nullptr; |
81 | 67.8k | const StackFrameContext *SF = Node->getStackFrame(); |
82 | | |
83 | | // Back up through the ExplodedGraph until we reach a statement node in this |
84 | | // stack frame. |
85 | 191k | while (Node) { |
86 | 191k | const ProgramPoint &PP = Node->getLocation(); |
87 | | |
88 | 191k | if (PP.getStackFrame() == SF) { |
89 | 187k | if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { |
90 | 61.3k | S = SP->getStmt(); |
91 | 61.3k | break; |
92 | 126k | } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { |
93 | 2.74k | S = CEE->getCalleeContext()->getCallSite(); |
94 | 2.74k | if (S) |
95 | 2.21k | break; |
96 | | |
97 | | // If there is no statement, this is an implicitly-generated call. |
98 | | // We'll walk backwards over it and then continue the loop to find |
99 | | // an actual statement. |
100 | 529 | std::optional<CallEnter> CE; |
101 | 7.99k | do { |
102 | 7.99k | Node = Node->getFirstPred(); |
103 | 7.99k | CE = Node->getLocationAs<CallEnter>(); |
104 | 7.99k | } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()698 ); |
105 | | |
106 | | // Continue searching the graph. |
107 | 123k | } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { |
108 | 70.4k | Blk = BE->getSrc(); |
109 | 70.4k | } |
110 | 187k | } else if (std::optional<CallEnter> 3.84k CE3.84k = PP.getAs<CallEnter>()) { |
111 | | // If we reached the CallEnter for this function, it has no statements. |
112 | 3.84k | if (CE->getCalleeContext() == SF) |
113 | 3.84k | break; |
114 | 3.84k | } |
115 | | |
116 | 124k | if (Node->pred_empty()) |
117 | 404 | return std::make_pair(nullptr, nullptr); |
118 | | |
119 | 123k | Node = *Node->pred_begin(); |
120 | 123k | } |
121 | | |
122 | 67.4k | return std::make_pair(S, Blk); |
123 | 67.8k | } |
124 | | |
125 | | /// Adjusts a return value when the called function's return type does not |
126 | | /// match the caller's expression type. This can happen when a dynamic call |
127 | | /// is devirtualized, and the overriding method has a covariant (more specific) |
128 | | /// return type than the parent's method. For C++ objects, this means we need |
129 | | /// to add base casts. |
130 | | static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, |
131 | 90 | StoreManager &StoreMgr) { |
132 | | // For now, the only adjustments we handle apply only to locations. |
133 | 90 | if (!isa<Loc>(V)) |
134 | 76 | return V; |
135 | | |
136 | | // If the types already match, don't do any unnecessary work. |
137 | 14 | ExpectedTy = ExpectedTy.getCanonicalType(); |
138 | 14 | ActualTy = ActualTy.getCanonicalType(); |
139 | 14 | if (ExpectedTy == ActualTy) |
140 | 11 | return V; |
141 | | |
142 | | // No adjustment is needed between Objective-C pointer types. |
143 | 3 | if (ExpectedTy->isObjCObjectPointerType() && |
144 | 3 | ActualTy->isObjCObjectPointerType()1 ) |
145 | 1 | return V; |
146 | | |
147 | | // C++ object pointers may need "derived-to-base" casts. |
148 | 2 | const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); |
149 | 2 | const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); |
150 | 2 | if (ExpectedClass && ActualClass1 ) { |
151 | 1 | CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, |
152 | 1 | /*DetectVirtual=*/false); |
153 | 1 | if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && |
154 | 1 | !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { |
155 | 1 | return StoreMgr.evalDerivedToBase(V, Paths.front()); |
156 | 1 | } |
157 | 1 | } |
158 | | |
159 | | // Unfortunately, Objective-C does not enforce that overridden methods have |
160 | | // covariant return types, so we can't assert that that never happens. |
161 | | // Be safe and return UnknownVal(). |
162 | 1 | return UnknownVal(); |
163 | 2 | } |
164 | | |
165 | | void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, |
166 | | ExplodedNode *Pred, |
167 | 25.3k | ExplodedNodeSet &Dst) { |
168 | | // Find the last statement in the function and the corresponding basic block. |
169 | 25.3k | const Stmt *LastSt = nullptr; |
170 | 25.3k | const CFGBlock *Blk = nullptr; |
171 | 25.3k | std::tie(LastSt, Blk) = getLastStmt(Pred); |
172 | 25.3k | if (!Blk || !LastSt24.9k ) { |
173 | 404 | Dst.Add(Pred); |
174 | 404 | return; |
175 | 404 | } |
176 | | |
177 | | // Here, we destroy the current location context. We use the current |
178 | | // function's entire body as a diagnostic statement, with which the program |
179 | | // point will be associated. However, we only want to use LastStmt as a |
180 | | // reference for what to clean up if it's a ReturnStmt; otherwise, everything |
181 | | // is dead. |
182 | 24.9k | SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); |
183 | 24.9k | const LocationContext *LCtx = Pred->getLocationContext(); |
184 | 24.9k | removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, |
185 | 24.9k | LCtx->getAnalysisDeclContext()->getBody(), |
186 | 24.9k | ProgramPoint::PostStmtPurgeDeadSymbolsKind); |
187 | 24.9k | } |
188 | | |
189 | | static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, |
190 | 16.9k | const StackFrameContext *calleeCtx) { |
191 | 16.9k | const Decl *RuntimeCallee = calleeCtx->getDecl(); |
192 | 16.9k | const Decl *StaticDecl = Call->getDecl(); |
193 | 16.9k | assert(RuntimeCallee); |
194 | 16.9k | if (!StaticDecl) |
195 | 0 | return true; |
196 | 16.9k | return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); |
197 | 16.9k | } |
198 | | |
199 | | // Returns the number of elements in the array currently being destructed. |
200 | | // If the element count is not found 0 will be returned. |
201 | | static unsigned getElementCountOfArrayBeingDestructed( |
202 | 182 | const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) { |
203 | 182 | assert(isa<CXXDestructorCall>(Call) && |
204 | 182 | "The call event is not a destructor call!"); |
205 | | |
206 | 182 | const auto &DtorCall = cast<CXXDestructorCall>(Call); |
207 | | |
208 | 182 | auto ThisVal = DtorCall.getCXXThisVal(); |
209 | | |
210 | 182 | if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) { |
211 | 182 | auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion(); |
212 | 182 | auto ElementType = ThisElementRegion->getElementType(); |
213 | | |
214 | 182 | auto ElementCount = |
215 | 182 | getDynamicElementCount(State, ArrayRegion, SVB, ElementType); |
216 | | |
217 | 182 | if (!ElementCount.isConstant()) |
218 | 2 | return 0; |
219 | | |
220 | 180 | return ElementCount.getAsInteger()->getLimitedValue(); |
221 | 182 | } |
222 | | |
223 | 0 | return 0; |
224 | 182 | } |
225 | | |
226 | | ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation( |
227 | | ProgramStateRef State, const CXXConstructExpr *E, |
228 | 77.4k | const LocationContext *LCtx) { |
229 | | |
230 | 77.4k | assert(LCtx && "Location context must be provided!"); |
231 | | |
232 | 77.4k | if (E) { |
233 | 10.5k | if (getPendingInitLoop(State, E, LCtx)) |
234 | 37 | State = removePendingInitLoop(State, E, LCtx); |
235 | | |
236 | 10.5k | if (getIndexOfElementToConstruct(State, E, LCtx)) |
237 | 279 | State = removeIndexOfElementToConstruct(State, E, LCtx); |
238 | 10.5k | } |
239 | | |
240 | 77.4k | if (getPendingArrayDestruction(State, LCtx)) |
241 | 54 | State = removePendingArrayDestruction(State, LCtx); |
242 | | |
243 | 77.4k | return State; |
244 | 77.4k | } |
245 | | |
246 | | /// The call exit is simulated with a sequence of nodes, which occur between |
247 | | /// CallExitBegin and CallExitEnd. The following operations occur between the |
248 | | /// two program points: |
249 | | /// 1. CallExitBegin (triggers the start of call exit sequence) |
250 | | /// 2. Bind the return value |
251 | | /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. |
252 | | /// 4. CallExitEnd (switch to the caller context) |
253 | | /// 5. PostStmt<CallExpr> |
254 | 42.5k | void ExprEngine::processCallExit(ExplodedNode *CEBNode) { |
255 | | // Step 1 CEBNode was generated before the call. |
256 | 42.5k | PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext()); |
257 | 42.5k | const StackFrameContext *calleeCtx = CEBNode->getStackFrame(); |
258 | | |
259 | | // The parent context might not be a stack frame, so make sure we |
260 | | // look up the first enclosing stack frame. |
261 | 42.5k | const StackFrameContext *callerCtx = |
262 | 42.5k | calleeCtx->getParent()->getStackFrame(); |
263 | | |
264 | 42.5k | const Stmt *CE = calleeCtx->getCallSite(); |
265 | 42.5k | ProgramStateRef state = CEBNode->getState(); |
266 | | // Find the last statement in the function and the corresponding basic block. |
267 | 42.5k | const Stmt *LastSt = nullptr; |
268 | 42.5k | const CFGBlock *Blk = nullptr; |
269 | 42.5k | std::tie(LastSt, Blk) = getLastStmt(CEBNode); |
270 | | |
271 | | // Generate a CallEvent /before/ cleaning the state, so that we can get the |
272 | | // correct value for 'this' (if necessary). |
273 | 42.5k | CallEventManager &CEMgr = getStateManager().getCallEventManager(); |
274 | 42.5k | CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); |
275 | | |
276 | | // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. |
277 | | |
278 | | // If this variable is set to 'true' the analyzer will evaluate the call |
279 | | // statement we are about to exit again, instead of continuing the execution |
280 | | // from the statement after the call. This is useful for non-POD type array |
281 | | // construction where the CXXConstructExpr is referenced only once in the CFG, |
282 | | // but we want to evaluate it as many times as many elements the array has. |
283 | 42.5k | bool ShouldRepeatCall = false; |
284 | | |
285 | 42.5k | if (const auto *DtorDecl = |
286 | 42.5k | dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) { |
287 | 957 | if (auto Idx = getPendingArrayDestruction(state, callerCtx)) { |
288 | 171 | ShouldRepeatCall = *Idx > 0; |
289 | | |
290 | 171 | auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx); |
291 | 171 | state = state->killBinding(ThisVal); |
292 | 171 | } |
293 | 957 | } |
294 | | |
295 | | // If the callee returns an expression, bind its value to CallExpr. |
296 | 42.5k | if (CE) { |
297 | 41.5k | if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { |
298 | 16.9k | const LocationContext *LCtx = CEBNode->getLocationContext(); |
299 | 16.9k | SVal V = state->getSVal(RS, LCtx); |
300 | | |
301 | | // Ensure that the return type matches the type of the returned Expr. |
302 | 16.9k | if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { |
303 | 90 | QualType ReturnedTy = |
304 | 90 | CallEvent::getDeclaredResultType(calleeCtx->getDecl()); |
305 | 90 | if (!ReturnedTy.isNull()) { |
306 | 90 | if (const Expr *Ex = dyn_cast<Expr>(CE)) { |
307 | 90 | V = adjustReturnValue(V, Ex->getType(), ReturnedTy, |
308 | 90 | getStoreManager()); |
309 | 90 | } |
310 | 90 | } |
311 | 90 | } |
312 | | |
313 | 16.9k | state = state->BindExpr(CE, callerCtx, V); |
314 | 16.9k | } |
315 | | |
316 | | // Bind the constructed object value to CXXConstructExpr. |
317 | 41.5k | if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { |
318 | 9.34k | loc::MemRegionVal This = |
319 | 9.34k | svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); |
320 | 9.34k | SVal ThisV = state->getSVal(This); |
321 | 9.34k | ThisV = state->getSVal(ThisV.castAs<Loc>()); |
322 | 9.34k | state = state->BindExpr(CCE, callerCtx, ThisV); |
323 | | |
324 | 9.34k | ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx); |
325 | 9.34k | } |
326 | | |
327 | 41.5k | if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) { |
328 | | // We are currently evaluating a CXXNewAllocator CFGElement. It takes a |
329 | | // while to reach the actual CXXNewExpr element from here, so keep the |
330 | | // region for later use. |
331 | | // Additionally cast the return value of the inlined operator new |
332 | | // (which is of type 'void *') to the correct object type. |
333 | 236 | SVal AllocV = state->getSVal(CNE, callerCtx); |
334 | 236 | AllocV = svalBuilder.evalCast( |
335 | 236 | AllocV, CNE->getType(), |
336 | 236 | getContext().getPointerType(getContext().VoidTy)); |
337 | | |
338 | 236 | state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(), |
339 | 236 | AllocV); |
340 | 236 | } |
341 | 41.5k | } |
342 | | |
343 | 42.5k | if (!ShouldRepeatCall) { |
344 | 42.0k | state = removeStateTraitsUsedForArrayEvaluation( |
345 | 42.0k | state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx); |
346 | 42.0k | } |
347 | | |
348 | | // Step 3: BindedRetNode -> CleanedNodes |
349 | | // If we can find a statement and a block in the inlined function, run remove |
350 | | // dead bindings before returning from the call. This is important to ensure |
351 | | // that we report the issues such as leaks in the stack contexts in which |
352 | | // they occurred. |
353 | 42.5k | ExplodedNodeSet CleanedNodes; |
354 | 42.5k | if (LastSt && Blk38.6k && AMgr.options.AnalysisPurgeOpt != PurgeNone38.6k ) { |
355 | 38.6k | static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value"); |
356 | 38.6k | PostStmt Loc(LastSt, calleeCtx, &retValBind); |
357 | 38.6k | bool isNew; |
358 | 38.6k | ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); |
359 | 38.6k | BindedRetNode->addPredecessor(CEBNode, G); |
360 | 38.6k | if (!isNew) |
361 | 0 | return; |
362 | | |
363 | 38.6k | NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); |
364 | 38.6k | currBldrCtx = &Ctx; |
365 | | // Here, we call the Symbol Reaper with 0 statement and callee location |
366 | | // context, telling it to clean up everything in the callee's context |
367 | | // (and its children). We use the callee's function body as a diagnostic |
368 | | // statement, with which the program point will be associated. |
369 | 38.6k | removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx, |
370 | 38.6k | calleeCtx->getAnalysisDeclContext()->getBody(), |
371 | 38.6k | ProgramPoint::PostStmtPurgeDeadSymbolsKind); |
372 | 38.6k | currBldrCtx = nullptr; |
373 | 38.6k | } else { |
374 | 3.84k | CleanedNodes.Add(CEBNode); |
375 | 3.84k | } |
376 | | |
377 | 42.5k | for (ExplodedNode *N : CleanedNodes) { |
378 | | // Step 4: Generate the CallExit and leave the callee's context. |
379 | | // CleanedNodes -> CEENode |
380 | 40.8k | CallExitEnd Loc(calleeCtx, callerCtx); |
381 | 40.8k | bool isNew; |
382 | 40.8k | ProgramStateRef CEEState = (N == CEBNode) ? state3.84k : N->getState()36.9k ; |
383 | | |
384 | 40.8k | ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); |
385 | 40.8k | CEENode->addPredecessor(N, G); |
386 | 40.8k | if (!isNew) |
387 | 0 | return; |
388 | | |
389 | | // Step 5: Perform the post-condition check of the CallExpr and enqueue the |
390 | | // result onto the work list. |
391 | | // CEENode -> Dst -> WorkList |
392 | 40.8k | NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); |
393 | 40.8k | SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx); |
394 | 40.8k | SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex()); |
395 | | |
396 | 40.8k | CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); |
397 | | |
398 | 40.8k | ExplodedNodeSet DstPostCall; |
399 | 40.8k | if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) { |
400 | 236 | ExplodedNodeSet DstPostPostCallCallback; |
401 | 236 | getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback, |
402 | 236 | CEENode, *UpdatedCall, *this, |
403 | 236 | /*wasInlined=*/true); |
404 | 236 | for (ExplodedNode *I : DstPostPostCallCallback) { |
405 | 236 | getCheckerManager().runCheckersForNewAllocator( |
406 | 236 | cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this, |
407 | 236 | /*wasInlined=*/true); |
408 | 236 | } |
409 | 40.6k | } else { |
410 | 40.6k | getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, |
411 | 40.6k | *UpdatedCall, *this, |
412 | 40.6k | /*wasInlined=*/true); |
413 | 40.6k | } |
414 | 40.8k | ExplodedNodeSet Dst; |
415 | 40.8k | if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { |
416 | 625 | getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, |
417 | 625 | *this, |
418 | 625 | /*wasInlined=*/true); |
419 | 40.2k | } else if (CE && |
420 | 40.2k | !(39.2k isa<CXXNewExpr>(CE)39.2k && // Called when visiting CXXNewExpr. |
421 | 39.2k | AMgr.getAnalyzerOptions().MayInlineCXXAllocator236 )) { |
422 | 39.0k | getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, |
423 | 39.0k | *this, /*wasInlined=*/true); |
424 | 39.0k | } else { |
425 | 1.18k | Dst.insert(DstPostCall); |
426 | 1.18k | } |
427 | | |
428 | | // Enqueue the next element in the block. |
429 | 40.8k | for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); |
430 | 81.6k | PSI != PSE; ++PSI40.8k ) { |
431 | 40.8k | unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0481 : 140.3k ); |
432 | | |
433 | 40.8k | Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx); |
434 | 40.8k | } |
435 | 40.8k | } |
436 | 42.5k | } |
437 | | |
438 | 46.9k | bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const { |
439 | | // When there are no branches in the function, it means that there's no |
440 | | // exponential complexity introduced by inlining such function. |
441 | | // Such functions also don't trigger various fundamental problems |
442 | | // with our inlining mechanism, such as the problem of |
443 | | // inlined defensive checks. Hence isLinear(). |
444 | 46.9k | const CFG *Cfg = ADC->getCFG(); |
445 | 46.9k | return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize14.4k ; |
446 | 46.9k | } |
447 | | |
448 | 11.1k | bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const { |
449 | 11.1k | const CFG *Cfg = ADC->getCFG(); |
450 | 11.1k | return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge; |
451 | 11.1k | } |
452 | | |
453 | 5.14k | bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const { |
454 | 5.14k | const CFG *Cfg = ADC->getCFG(); |
455 | 5.14k | return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize; |
456 | 5.14k | } |
457 | | |
458 | | void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, |
459 | 28.6k | bool &IsRecursive, unsigned &StackDepth) { |
460 | 28.6k | IsRecursive = false; |
461 | 28.6k | StackDepth = 0; |
462 | | |
463 | 77.8k | while (LCtx) { |
464 | 49.2k | if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { |
465 | 49.1k | const Decl *DI = SFC->getDecl(); |
466 | | |
467 | | // Mark recursive (and mutually recursive) functions and always count |
468 | | // them when measuring the stack depth. |
469 | 49.1k | if (DI == D) { |
470 | 2.55k | IsRecursive = true; |
471 | 2.55k | ++StackDepth; |
472 | 2.55k | LCtx = LCtx->getParent(); |
473 | 2.55k | continue; |
474 | 2.55k | } |
475 | | |
476 | | // Do not count the small functions when determining the stack depth. |
477 | 46.6k | AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); |
478 | 46.6k | if (!isSmall(CalleeADC)) |
479 | 14.4k | ++StackDepth; |
480 | 46.6k | } |
481 | 46.6k | LCtx = LCtx->getParent(); |
482 | 46.6k | } |
483 | 28.6k | } |
484 | | |
485 | | // The GDM component containing the dynamic dispatch bifurcation info. When |
486 | | // the exact type of the receiver is not known, we want to explore both paths - |
487 | | // one on which we do inline it and the other one on which we don't. This is |
488 | | // done to ensure we do not drop coverage. |
489 | | // This is the map from the receiver region to a bool, specifying either we |
490 | | // consider this region's information precise or not along the given path. |
491 | | namespace { |
492 | | enum DynamicDispatchMode { |
493 | | DynamicDispatchModeInlined = 1, |
494 | | DynamicDispatchModeConservative |
495 | | }; |
496 | | } // end anonymous namespace |
497 | | |
498 | | REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, |
499 | | const MemRegion *, unsigned) |
500 | | REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool) |
501 | | |
502 | | void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D, |
503 | | NodeBuilder &Bldr, ExplodedNode *Pred, |
504 | 35.1k | ProgramStateRef State) { |
505 | 35.1k | ProgramStateRef ConservativeEvalState = nullptr; |
506 | 35.1k | if (Call.isForeign() && !isSecondPhaseCTU()153 ) { |
507 | 124 | const auto IK = AMgr.options.getCTUPhase1Inlining(); |
508 | 124 | const bool DoInline = IK == CTUPhase1InliningKind::All || |
509 | 124 | (92 IK == CTUPhase1InliningKind::Small92 && |
510 | 92 | isSmall(AMgr.getAnalysisDeclContext(D))59 ); |
511 | 124 | if (DoInline) { |
512 | 89 | inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State); |
513 | 89 | return; |
514 | 89 | } |
515 | 35 | const bool BState = State->get<CTUDispatchBifurcation>(); |
516 | 35 | if (!BState) { // This is the first time we see this foreign function. |
517 | | // Enqueue it to be analyzed in the second (ctu) phase. |
518 | 16 | inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State); |
519 | | // Conservatively evaluate in the first phase. |
520 | 16 | ConservativeEvalState = State->set<CTUDispatchBifurcation>(true); |
521 | 16 | conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState); |
522 | 19 | } else { |
523 | 19 | conservativeEvalCall(Call, Bldr, Pred, State); |
524 | 19 | } |
525 | 35 | return; |
526 | 124 | } |
527 | 35.0k | inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State); |
528 | 35.0k | } |
529 | | |
530 | | void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call, |
531 | | const Decl *D, NodeBuilder &Bldr, |
532 | 35.1k | ExplodedNode *Pred, ProgramStateRef State) { |
533 | 35.1k | assert(D); |
534 | | |
535 | 35.1k | const LocationContext *CurLC = Pred->getLocationContext(); |
536 | 35.1k | const StackFrameContext *CallerSFC = CurLC->getStackFrame(); |
537 | 35.1k | const LocationContext *ParentOfCallee = CallerSFC; |
538 | 35.1k | if (Call.getKind() == CE_Block && |
539 | 35.1k | !cast<BlockCall>(Call).isConversionFromLambda()188 ) { |
540 | 181 | const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); |
541 | 181 | assert(BR && "If we have the block definition we should have its region"); |
542 | 181 | AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); |
543 | 181 | ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, |
544 | 181 | cast<BlockDecl>(D), |
545 | 181 | BR); |
546 | 181 | } |
547 | | |
548 | | // This may be NULL, but that's fine. |
549 | 35.1k | const Expr *CallE = Call.getOriginExpr(); |
550 | | |
551 | | // Construct a new stack frame for the callee. |
552 | 35.1k | AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); |
553 | 35.1k | const StackFrameContext *CalleeSFC = |
554 | 35.1k | CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), |
555 | 35.1k | currBldrCtx->blockCount(), currStmtIdx); |
556 | | |
557 | 35.1k | CallEnter Loc(CallE, CalleeSFC, CurLC); |
558 | | |
559 | | // Construct a new state which contains the mapping from actual to |
560 | | // formal arguments. |
561 | 35.1k | State = State->enterStackFrame(Call, CalleeSFC); |
562 | | |
563 | 35.1k | bool isNew; |
564 | 35.1k | if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { |
565 | 35.1k | N->addPredecessor(Pred, G); |
566 | 35.1k | if (isNew) |
567 | 35.1k | WList->enqueue(N); |
568 | 35.1k | } |
569 | | |
570 | | // If we decided to inline the call, the successor has been manually |
571 | | // added onto the work list so remove it from the node builder. |
572 | 35.1k | Bldr.takeNodes(Pred); |
573 | | |
574 | 35.1k | NumInlinedCalls++; |
575 | 35.1k | Engine.FunctionSummaries->bumpNumTimesInlined(D); |
576 | | |
577 | | // Do not mark as visited in the 2nd run (CTUWList), so the function will |
578 | | // be visited as top-level, this way we won't loose reports in non-ctu |
579 | | // mode. Considering the case when a function in a foreign TU calls back |
580 | | // into the main TU. |
581 | | // Note, during the 1st run, it doesn't matter if we mark the foreign |
582 | | // functions as visited (or not) because they can never appear as a top level |
583 | | // function in the main TU. |
584 | 35.1k | if (!isSecondPhaseCTU()) |
585 | | // Mark the decl as visited. |
586 | 35.1k | if (VisitedCallees) |
587 | 35.1k | VisitedCallees->insert(D); |
588 | 35.1k | } |
589 | | |
590 | | static ProgramStateRef getInlineFailedState(ProgramStateRef State, |
591 | 70.5k | const Stmt *CallE) { |
592 | 70.5k | const void *ReplayState = State->get<ReplayWithoutInlining>(); |
593 | 70.5k | if (!ReplayState) |
594 | 70.5k | return nullptr; |
595 | | |
596 | 35 | assert(ReplayState == CallE && "Backtracked to the wrong call."); |
597 | 35 | (void)CallE; |
598 | | |
599 | 35 | return State->remove<ReplayWithoutInlining>(); |
600 | 35 | } |
601 | | |
602 | | void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, |
603 | 80.8k | ExplodedNodeSet &dst) { |
604 | | // Perform the previsit of the CallExpr. |
605 | 80.8k | ExplodedNodeSet dstPreVisit; |
606 | 80.8k | getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); |
607 | | |
608 | | // Get the call in its initial state. We use this as a template to perform |
609 | | // all the checks. |
610 | 80.8k | CallEventManager &CEMgr = getStateManager().getCallEventManager(); |
611 | 80.8k | CallEventRef<> CallTemplate = CEMgr.getSimpleCall( |
612 | 80.8k | CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef()); |
613 | | |
614 | | // Evaluate the function call. We try each of the checkers |
615 | | // to see if the can evaluate the function call. |
616 | 80.8k | ExplodedNodeSet dstCallEvaluated; |
617 | 80.8k | for (ExplodedNode *N : dstPreVisit) { |
618 | 80.7k | evalCall(dstCallEvaluated, N, *CallTemplate); |
619 | 80.7k | } |
620 | | |
621 | | // Finally, perform the post-condition check of the CallExpr and store |
622 | | // the created nodes in 'Dst'. |
623 | | // Note that if the call was inlined, dstCallEvaluated will be empty. |
624 | | // The post-CallExpr check will occur in processCallExit. |
625 | 80.8k | getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, |
626 | 80.8k | *this); |
627 | 80.8k | } |
628 | | |
629 | | ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State, |
630 | 116k | const CallEvent &Call) { |
631 | 116k | const Expr *E = Call.getOriginExpr(); |
632 | | // FIXME: Constructors to placement arguments of operator new |
633 | | // are not supported yet. |
634 | 116k | if (!E || isa<CXXNewExpr>(E)115k ) |
635 | 1.18k | return State; |
636 | | |
637 | 115k | const LocationContext *LC = Call.getLocationContext(); |
638 | 218k | for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI103k ) { |
639 | 103k | unsigned I = Call.getASTArgumentIndex(CallI); |
640 | 103k | if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) { |
641 | 11.4k | SVal VV = *V; |
642 | 11.4k | (void)VV; |
643 | 11.4k | assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion()) |
644 | 11.4k | ->getStackFrame()->getParent() |
645 | 11.4k | ->getStackFrame() == LC->getStackFrame()); |
646 | 11.4k | State = finishObjectConstruction(State, {E, I}, LC); |
647 | 11.4k | } |
648 | 103k | } |
649 | | |
650 | 115k | return State; |
651 | 115k | } |
652 | | |
653 | | void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst, |
654 | | ExplodedNode *Pred, |
655 | 73.9k | const CallEvent &Call) { |
656 | 73.9k | ProgramStateRef State = Pred->getState(); |
657 | 73.9k | ProgramStateRef CleanedState = finishArgumentConstruction(State, Call); |
658 | 73.9k | if (CleanedState == State) { |
659 | 64.0k | Dst.insert(Pred); |
660 | 64.0k | return; |
661 | 64.0k | } |
662 | | |
663 | 9.89k | const Expr *E = Call.getOriginExpr(); |
664 | 9.89k | const LocationContext *LC = Call.getLocationContext(); |
665 | 9.89k | NodeBuilder B(Pred, Dst, *currBldrCtx); |
666 | 9.89k | static SimpleProgramPointTag Tag("ExprEngine", |
667 | 9.89k | "Finish argument construction"); |
668 | 9.89k | PreStmt PP(E, LC, &Tag); |
669 | 9.89k | B.generateNode(PP, CleanedState, Pred); |
670 | 9.89k | } |
671 | | |
672 | | void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, |
673 | 80.7k | const CallEvent &Call) { |
674 | | // WARNING: At this time, the state attached to 'Call' may be older than the |
675 | | // state in 'Pred'. This is a minor optimization since CheckerManager will |
676 | | // use an updated CallEvent instance when calling checkers, but if 'Call' is |
677 | | // ever used directly in this function all callers should be updated to pass |
678 | | // the most recent state. (It is probably not worth doing the work here since |
679 | | // for some callers this will not be necessary.) |
680 | | |
681 | | // Run any pre-call checks using the generic call interface. |
682 | 80.7k | ExplodedNodeSet dstPreVisit; |
683 | 80.7k | getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, |
684 | 80.7k | Call, *this); |
685 | | |
686 | | // Actually evaluate the function call. We try each of the checkers |
687 | | // to see if the can evaluate the function call, and get a callback at |
688 | | // defaultEvalCall if all of them fail. |
689 | 80.7k | ExplodedNodeSet dstCallEvaluated; |
690 | 80.7k | getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, |
691 | 80.7k | Call, *this, EvalCallOptions()); |
692 | | |
693 | | // If there were other constructors called for object-type arguments |
694 | | // of this call, clean them up. |
695 | 80.7k | ExplodedNodeSet dstArgumentCleanup; |
696 | 80.7k | for (ExplodedNode *I : dstCallEvaluated) |
697 | 55.9k | finishArgumentConstruction(dstArgumentCleanup, I, Call); |
698 | | |
699 | 80.7k | ExplodedNodeSet dstPostCall; |
700 | 80.7k | getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup, |
701 | 80.7k | Call, *this); |
702 | | |
703 | | // Escaping symbols conjured during invalidating the regions above. |
704 | | // Note that, for inlined calls the nodes were put back into the worklist, |
705 | | // so we can assume that every node belongs to a conservative call at this |
706 | | // point. |
707 | | |
708 | | // Run pointerEscape callback with the newly conjured symbols. |
709 | 80.7k | SmallVector<std::pair<SVal, SVal>, 8> Escaped; |
710 | 80.7k | for (ExplodedNode *I : dstPostCall) { |
711 | 55.2k | NodeBuilder B(I, Dst, *currBldrCtx); |
712 | 55.2k | ProgramStateRef State = I->getState(); |
713 | 55.2k | Escaped.clear(); |
714 | 55.2k | { |
715 | 55.2k | unsigned Arg = -1; |
716 | 55.8k | for (const ParmVarDecl *PVD : Call.parameters()) { |
717 | 55.8k | ++Arg; |
718 | 55.8k | QualType ParamTy = PVD->getType(); |
719 | 55.8k | if (ParamTy.isNull() || |
720 | 55.8k | (!ParamTy->isPointerType() && !ParamTy->isReferenceType()40.9k )) |
721 | 34.2k | continue; |
722 | 21.5k | QualType Pointee = ParamTy->getPointeeType(); |
723 | 21.5k | if (Pointee.isConstQualified() || Pointee->isVoidType()7.93k ) |
724 | 15.5k | continue; |
725 | 6.02k | if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion()) |
726 | 5.70k | Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee)); |
727 | 6.02k | } |
728 | 55.2k | } |
729 | | |
730 | 55.2k | State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(), |
731 | 55.2k | PSK_EscapeOutParameters, &Call); |
732 | | |
733 | 55.2k | if (State == I->getState()) |
734 | 55.2k | Dst.insert(I); |
735 | 6 | else |
736 | 6 | B.generateNode(I->getLocation(), State, I); |
737 | 55.2k | } |
738 | 80.7k | } |
739 | | |
740 | | ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, |
741 | | const LocationContext *LCtx, |
742 | 47.9k | ProgramStateRef State) { |
743 | 47.9k | const Expr *E = Call.getOriginExpr(); |
744 | 47.9k | if (!E) |
745 | 820 | return State; |
746 | | |
747 | | // Some method families have known return values. |
748 | 47.1k | if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { |
749 | 3.64k | switch (Msg->getMethodFamily()) { |
750 | 3.32k | default: |
751 | 3.32k | break; |
752 | 3.32k | case OMF_autorelease: |
753 | 308 | case OMF_retain: |
754 | 311 | case OMF_self: { |
755 | | // These methods return their receivers. |
756 | 311 | return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); |
757 | 308 | } |
758 | 3.64k | } |
759 | 43.4k | } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ |
760 | 14.0k | SVal ThisV = C->getCXXThisVal(); |
761 | 14.0k | ThisV = State->getSVal(ThisV.castAs<Loc>()); |
762 | 14.0k | return State->BindExpr(E, LCtx, ThisV); |
763 | 14.0k | } |
764 | | |
765 | 32.7k | SVal R; |
766 | 32.7k | QualType ResultTy = Call.getResultType(); |
767 | 32.7k | unsigned Count = currBldrCtx->blockCount(); |
768 | 32.7k | if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) { |
769 | | // Conjure a temporary if the function returns an object by value. |
770 | 2.06k | SVal Target; |
771 | 2.06k | assert(RTC->getStmt() == Call.getOriginExpr()); |
772 | 2.06k | EvalCallOptions CallOpts; // FIXME: We won't really need those. |
773 | 2.06k | std::tie(State, Target) = handleConstructionContext( |
774 | 2.06k | Call.getOriginExpr(), State, currBldrCtx, LCtx, |
775 | 2.06k | RTC->getConstructionContext(), CallOpts); |
776 | 2.06k | const MemRegion *TargetR = Target.getAsRegion(); |
777 | 2.06k | assert(TargetR); |
778 | | // Invalidate the region so that it didn't look uninitialized. If this is |
779 | | // a field or element constructor, we do not want to invalidate |
780 | | // the whole structure. Pointer escape is meaningless because |
781 | | // the structure is a product of conservative evaluation |
782 | | // and therefore contains nothing interesting at this point. |
783 | 2.06k | RegionAndSymbolInvalidationTraits ITraits; |
784 | 2.06k | ITraits.setTrait(TargetR, |
785 | 2.06k | RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion); |
786 | 2.06k | State = State->invalidateRegions(TargetR, E, Count, LCtx, |
787 | 2.06k | /* CausesPointerEscape=*/false, nullptr, |
788 | 2.06k | &Call, &ITraits); |
789 | | |
790 | 2.06k | R = State->getSVal(Target.castAs<Loc>(), E->getType()); |
791 | 30.6k | } else { |
792 | | // Conjure a symbol if the return value is unknown. |
793 | | |
794 | | // See if we need to conjure a heap pointer instead of |
795 | | // a regular unknown pointer. |
796 | 30.6k | const auto *CNE = dyn_cast<CXXNewExpr>(E); |
797 | 30.6k | if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()845 ) { |
798 | 796 | R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count); |
799 | 796 | const MemRegion *MR = R.getAsRegion()->StripCasts(); |
800 | | |
801 | | // Store the extent of the allocated object(s). |
802 | 796 | SVal ElementCount; |
803 | 796 | if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) { |
804 | 161 | ElementCount = State->getSVal(SizeExpr, LCtx); |
805 | 635 | } else { |
806 | 635 | ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true); |
807 | 635 | } |
808 | | |
809 | 796 | SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder); |
810 | | |
811 | 796 | SVal Size = |
812 | 796 | svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize, |
813 | 796 | svalBuilder.getArrayIndexType()); |
814 | | |
815 | | // FIXME: This line is to prevent a crash. For more details please check |
816 | | // issue #56264. |
817 | 796 | if (Size.isUndef()) |
818 | 2 | Size = UnknownVal(); |
819 | | |
820 | 796 | State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(), |
821 | 796 | svalBuilder); |
822 | 29.8k | } else { |
823 | 29.8k | R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count); |
824 | 29.8k | } |
825 | 30.6k | } |
826 | 32.7k | return State->BindExpr(E, LCtx, R); |
827 | 32.7k | } |
828 | | |
829 | | // Conservatively evaluate call by invalidating regions and binding |
830 | | // a conjured return value. |
831 | | void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, |
832 | 35.4k | ExplodedNode *Pred, ProgramStateRef State) { |
833 | 35.4k | State = Call.invalidateRegions(currBldrCtx->blockCount(), State); |
834 | 35.4k | State = bindReturnValue(Call, Pred->getLocationContext(), State); |
835 | | |
836 | | // And make the result node. |
837 | 35.4k | static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call"); |
838 | 35.4k | Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred); |
839 | 35.4k | } |
840 | | |
841 | | ExprEngine::CallInlinePolicy |
842 | | ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred, |
843 | | AnalyzerOptions &Opts, |
844 | 28.9k | const EvalCallOptions &CallOpts) { |
845 | 28.9k | const LocationContext *CurLC = Pred->getLocationContext(); |
846 | 28.9k | const StackFrameContext *CallerSFC = CurLC->getStackFrame(); |
847 | 28.9k | switch (Call.getKind()) { |
848 | 11.0k | case CE_Function: |
849 | 11.2k | case CE_Block: |
850 | 11.2k | break; |
851 | 3.96k | case CE_CXXMember: |
852 | 6.47k | case CE_CXXMemberOperator: |
853 | 6.47k | if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) |
854 | 0 | return CIP_DisallowedAlways; |
855 | 6.47k | break; |
856 | 9.55k | case CE_CXXConstructor: { |
857 | 9.55k | if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) |
858 | 0 | return CIP_DisallowedAlways; |
859 | | |
860 | 9.55k | const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); |
861 | | |
862 | 9.55k | const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); |
863 | | |
864 | 9.55k | auto CCE = getCurrentCFGElement().getAs<CFGConstructor>(); |
865 | 9.55k | const ConstructionContext *CC = CCE ? CCE->getConstructionContext()9.32k |
866 | 9.55k | : nullptr234 ; |
867 | | |
868 | 9.55k | if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) && |
869 | 9.55k | !Opts.MayInlineCXXAllocator501 ) |
870 | 3 | return CIP_DisallowedOnce; |
871 | | |
872 | 9.55k | if (CallOpts.IsArrayCtorOrDtor) { |
873 | 625 | if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC)) |
874 | 73 | return CIP_DisallowedOnce; |
875 | 625 | } |
876 | | |
877 | | // Inlining constructors requires including initializers in the CFG. |
878 | 9.47k | const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); |
879 | 9.47k | assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); |
880 | 9.47k | (void)ADC; |
881 | | |
882 | | // If the destructor is trivial, it's always safe to inline the constructor. |
883 | 9.47k | if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) |
884 | 8.32k | break; |
885 | | |
886 | | // For other types, only inline constructors if destructor inlining is |
887 | | // also enabled. |
888 | 1.15k | if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) |
889 | 14 | return CIP_DisallowedAlways; |
890 | | |
891 | 1.13k | if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) { |
892 | | // If we don't handle temporary destructors, we shouldn't inline |
893 | | // their constructors. |
894 | 1.00k | if (CallOpts.IsTemporaryCtorOrDtor && |
895 | 1.00k | !Opts.ShouldIncludeTemporaryDtorsInCFG259 ) |
896 | 74 | return CIP_DisallowedOnce; |
897 | | |
898 | | // If we did not find the correct this-region, it would be pointless |
899 | | // to inline the constructor. Instead we will simply invalidate |
900 | | // the fake temporary target. |
901 | 932 | if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) |
902 | 11 | return CIP_DisallowedOnce; |
903 | | |
904 | | // If the temporary is lifetime-extended by binding it to a reference-type |
905 | | // field within an aggregate, automatic destructors don't work properly. |
906 | 921 | if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate) |
907 | 16 | return CIP_DisallowedOnce; |
908 | 921 | } |
909 | | |
910 | 1.03k | break; |
911 | 1.13k | } |
912 | 1.03k | case CE_CXXInheritedConstructor: { |
913 | | // This doesn't really increase the cost of inlining ever, because |
914 | | // the stack frame of the inherited constructor is trivial. |
915 | 4 | return CIP_Allowed; |
916 | 1.13k | } |
917 | 988 | case CE_CXXDestructor: { |
918 | 988 | if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) |
919 | 6 | return CIP_DisallowedAlways; |
920 | | |
921 | | // Inlining destructors requires building the CFG correctly. |
922 | 982 | const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); |
923 | 982 | assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); |
924 | 982 | (void)ADC; |
925 | | |
926 | 982 | if (CallOpts.IsArrayCtorOrDtor) { |
927 | 182 | if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed( |
928 | 182 | Call, Pred->getState(), svalBuilder))) { |
929 | 9 | return CIP_DisallowedOnce; |
930 | 9 | } |
931 | 182 | } |
932 | | |
933 | | // Allow disabling temporary destructor inlining with a separate option. |
934 | 973 | if (CallOpts.IsTemporaryCtorOrDtor && |
935 | 973 | !Opts.MayInlineCXXTemporaryDtors155 ) |
936 | 1 | return CIP_DisallowedOnce; |
937 | | |
938 | | // If we did not find the correct this-region, it would be pointless |
939 | | // to inline the destructor. Instead we will simply invalidate |
940 | | // the fake temporary target. |
941 | 972 | if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) |
942 | 15 | return CIP_DisallowedOnce; |
943 | 957 | break; |
944 | 972 | } |
945 | 957 | case CE_CXXDeallocator: |
946 | 4 | [[fallthrough]]; |
947 | 235 | case CE_CXXAllocator: |
948 | 235 | if (Opts.MayInlineCXXAllocator) |
949 | 235 | break; |
950 | | // Do not inline allocators until we model deallocators. |
951 | | // This is unfortunate, but basically necessary for smart pointers and such. |
952 | 0 | return CIP_DisallowedAlways; |
953 | 371 | case CE_ObjCMessage: |
954 | 371 | if (!Opts.MayInlineObjCMethod) |
955 | 1 | return CIP_DisallowedAlways; |
956 | 370 | if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || |
957 | 370 | Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate358 )) |
958 | 1 | return CIP_DisallowedAlways; |
959 | 369 | break; |
960 | 28.9k | } |
961 | | |
962 | 28.6k | return CIP_Allowed; |
963 | 28.9k | } |
964 | | |
965 | | /// Returns true if the given C++ class contains a member with the given name. |
966 | | static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD, |
967 | 530 | StringRef Name) { |
968 | 530 | const IdentifierInfo &II = Ctx.Idents.get(Name); |
969 | 530 | return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II)); |
970 | 530 | } |
971 | | |
972 | | /// Returns true if the given C++ class is a container or iterator. |
973 | | /// |
974 | | /// Our heuristic for this is whether it contains a method named 'begin()' or a |
975 | | /// nested type named 'iterator' or 'iterator_category'. |
976 | 297 | static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) { |
977 | 297 | return hasMember(Ctx, RD, "begin") || |
978 | 297 | hasMember(Ctx, RD, "iterator")171 || |
979 | 297 | hasMember(Ctx, RD, "iterator_category")62 ; |
980 | 297 | } |
981 | | |
982 | | /// Returns true if the given function refers to a method of a C++ container |
983 | | /// or iterator. |
984 | | /// |
985 | | /// We generally do a poor job modeling most containers right now, and might |
986 | | /// prefer not to inline their methods. |
987 | | static bool isContainerMethod(const ASTContext &Ctx, |
988 | 446 | const FunctionDecl *FD) { |
989 | 446 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) |
990 | 297 | return isContainerClass(Ctx, MD->getParent()); |
991 | 149 | return false; |
992 | 446 | } |
993 | | |
994 | | /// Returns true if the given function is the destructor of a class named |
995 | | /// "shared_ptr". |
996 | 4.41k | static bool isCXXSharedPtrDtor(const FunctionDecl *FD) { |
997 | 4.41k | const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD); |
998 | 4.41k | if (!Dtor) |
999 | 4.07k | return false; |
1000 | | |
1001 | 333 | const CXXRecordDecl *RD = Dtor->getParent(); |
1002 | 333 | if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo()) |
1003 | 328 | if (II->isStr("shared_ptr")) |
1004 | 6 | return true; |
1005 | | |
1006 | 327 | return false; |
1007 | 333 | } |
1008 | | |
1009 | | /// Returns true if the function in \p CalleeADC may be inlined in general. |
1010 | | /// |
1011 | | /// This checks static properties of the function, such as its signature and |
1012 | | /// CFG, to determine whether the analyzer should ever consider inlining it, |
1013 | | /// in any context. |
1014 | 5.95k | bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const { |
1015 | 5.95k | AnalyzerOptions &Opts = AMgr.getAnalyzerOptions(); |
1016 | | // FIXME: Do not inline variadic calls. |
1017 | 5.95k | if (CallEvent::isVariadic(CalleeADC->getDecl())) |
1018 | 19 | return false; |
1019 | | |
1020 | | // Check certain C++-related inlining policies. |
1021 | 5.93k | ASTContext &Ctx = CalleeADC->getASTContext(); |
1022 | 5.93k | if (Ctx.getLangOpts().CPlusPlus) { |
1023 | 4.88k | if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) { |
1024 | | // Conditionally control the inlining of template functions. |
1025 | 4.66k | if (!Opts.MayInlineTemplateFunctions) |
1026 | 5 | if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) |
1027 | 4 | return false; |
1028 | | |
1029 | | // Conditionally control the inlining of C++ standard library functions. |
1030 | 4.66k | if (!Opts.MayInlineCXXStandardLibrary) |
1031 | 19 | if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation())) |
1032 | 19 | if (AnalysisDeclContext::isInStdNamespace(FD)) |
1033 | 18 | return false; |
1034 | | |
1035 | | // Conditionally control the inlining of methods on objects that look |
1036 | | // like C++ containers. |
1037 | 4.64k | if (!Opts.MayInlineCXXContainerMethods) |
1038 | 4.31k | if (!AMgr.isInCodeFile(FD->getLocation())) |
1039 | 446 | if (isContainerMethod(Ctx, FD)) |
1040 | 235 | return false; |
1041 | | |
1042 | | // Conditionally control the inlining of the destructor of C++ shared_ptr. |
1043 | | // We don't currently do a good job modeling shared_ptr because we can't |
1044 | | // see the reference count, so treating as opaque is probably the best |
1045 | | // idea. |
1046 | 4.41k | if (!Opts.MayInlineCXXSharedPtrDtor) |
1047 | 4.41k | if (isCXXSharedPtrDtor(FD)) |
1048 | 6 | return false; |
1049 | 4.41k | } |
1050 | 4.88k | } |
1051 | | |
1052 | | // It is possible that the CFG cannot be constructed. |
1053 | | // Be safe, and check if the CalleeCFG is valid. |
1054 | 5.66k | const CFG *CalleeCFG = CalleeADC->getCFG(); |
1055 | 5.66k | if (!CalleeCFG) |
1056 | 528 | return false; |
1057 | | |
1058 | | // Do not inline large functions. |
1059 | 5.14k | if (isHuge(CalleeADC)) |
1060 | 2 | return false; |
1061 | | |
1062 | | // It is possible that the live variables analysis cannot be |
1063 | | // run. If so, bail out. |
1064 | 5.13k | if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) |
1065 | 0 | return false; |
1066 | | |
1067 | 5.13k | return true; |
1068 | 5.13k | } |
1069 | | |
1070 | | bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, |
1071 | | const ExplodedNode *Pred, |
1072 | 70.5k | const EvalCallOptions &CallOpts) { |
1073 | 70.5k | if (!D) |
1074 | 30.1k | return false; |
1075 | | |
1076 | 40.3k | AnalysisManager &AMgr = getAnalysisManager(); |
1077 | 40.3k | AnalyzerOptions &Opts = AMgr.options; |
1078 | 40.3k | AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); |
1079 | 40.3k | AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); |
1080 | | |
1081 | | // The auto-synthesized bodies are essential to inline as they are |
1082 | | // usually small and commonly used. Note: we should do this check early on to |
1083 | | // ensure we always inline these calls. |
1084 | 40.3k | if (CalleeADC->isBodyAutosynthesized()) |
1085 | 6.79k | return true; |
1086 | | |
1087 | 33.5k | if (!AMgr.shouldInlineCall()) |
1088 | 36 | return false; |
1089 | | |
1090 | | // Check if this function has been marked as non-inlinable. |
1091 | 33.5k | std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D); |
1092 | 33.5k | if (MayInline) { |
1093 | 27.5k | if (!*MayInline) |
1094 | 3.80k | return false; |
1095 | | |
1096 | 27.5k | } else { |
1097 | | // We haven't actually checked the static properties of this function yet. |
1098 | | // Do that now, and record our decision in the function summaries. |
1099 | 5.95k | if (mayInlineDecl(CalleeADC)) { |
1100 | 5.13k | Engine.FunctionSummaries->markMayInline(D); |
1101 | 5.13k | } else { |
1102 | 812 | Engine.FunctionSummaries->markShouldNotInline(D); |
1103 | 812 | return false; |
1104 | 812 | } |
1105 | 5.95k | } |
1106 | | |
1107 | | // Check if we should inline a call based on its kind. |
1108 | | // FIXME: this checks both static and dynamic properties of the call, which |
1109 | | // means we're redoing a bit of work that could be cached in the function |
1110 | | // summary. |
1111 | 28.9k | CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts); |
1112 | 28.9k | if (CIP != CIP_Allowed) { |
1113 | 224 | if (CIP == CIP_DisallowedAlways) { |
1114 | 22 | assert(!MayInline || *MayInline); |
1115 | 22 | Engine.FunctionSummaries->markShouldNotInline(D); |
1116 | 22 | } |
1117 | 224 | return false; |
1118 | 224 | } |
1119 | | |
1120 | | // Do not inline if recursive or we've reached max stack frame count. |
1121 | 28.6k | bool IsRecursive = false; |
1122 | 28.6k | unsigned StackDepth = 0; |
1123 | 28.6k | examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); |
1124 | 28.6k | if ((StackDepth >= Opts.InlineMaxStackDepth) && |
1125 | 28.6k | (292 !isSmall(CalleeADC)292 || IsRecursive280 )) |
1126 | 286 | return false; |
1127 | | |
1128 | | // Do not inline large functions too many times. |
1129 | 28.3k | if ((Engine.FunctionSummaries->getNumTimesInlined(D) > |
1130 | 28.3k | Opts.MaxTimesInlineLarge) && |
1131 | 28.3k | isLarge(CalleeADC)11.1k ) { |
1132 | 0 | NumReachedInlineCountMax++; |
1133 | 0 | return false; |
1134 | 0 | } |
1135 | | |
1136 | 28.3k | if (HowToInline == Inline_Minimal && (25 !isSmall(CalleeADC)25 || IsRecursive24 )) |
1137 | 1 | return false; |
1138 | | |
1139 | 28.3k | return true; |
1140 | 28.3k | } |
1141 | | |
1142 | | bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State, |
1143 | | const CXXConstructExpr *CE, |
1144 | 625 | const LocationContext *LCtx) { |
1145 | 625 | if (!CE) |
1146 | 0 | return false; |
1147 | | |
1148 | | // FIXME: Handle other arrays types. |
1149 | 625 | if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) { |
1150 | 514 | unsigned ArrSize = getContext().getConstantArrayElementCount(CAT); |
1151 | | |
1152 | | // This might seem conter-intuitive at first glance, but the functions are |
1153 | | // closely related. Reasoning about destructors depends only on the type |
1154 | | // of the expression that initialized the memory region, which is the |
1155 | | // CXXConstructExpr. So to avoid code repetition, the work is delegated |
1156 | | // to the function that reasons about destructor inlining. Also note that |
1157 | | // if the constructors of the array elements are inlined, the destructors |
1158 | | // can also be inlined and if the destructors can be inline, it's safe to |
1159 | | // inline the constructors. |
1160 | 514 | return shouldInlineArrayDestruction(ArrSize); |
1161 | 514 | } |
1162 | | |
1163 | | // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small. |
1164 | 111 | if (auto Size = getPendingInitLoop(State, CE, LCtx)) |
1165 | 106 | return shouldInlineArrayDestruction(*Size); |
1166 | | |
1167 | 5 | return false; |
1168 | 111 | } |
1169 | | |
1170 | 802 | bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) { |
1171 | | |
1172 | 802 | uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath; |
1173 | | |
1174 | | // Declaring a 0 element array is also possible. |
1175 | 802 | return Size <= maxAllowedSize && Size > 0727 ; |
1176 | 802 | } |
1177 | | |
1178 | | bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State, |
1179 | | const CXXConstructExpr *E, |
1180 | 9.34k | const LocationContext *LCtx) { |
1181 | | |
1182 | 9.34k | if (!E) |
1183 | 0 | return false; |
1184 | | |
1185 | 9.34k | auto Ty = E->getType(); |
1186 | | |
1187 | | // FIXME: Handle non constant array types |
1188 | 9.34k | if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) { |
1189 | 456 | unsigned Size = getContext().getConstantArrayElementCount(CAT); |
1190 | 456 | return Size > getIndexOfElementToConstruct(State, E, LCtx); |
1191 | 456 | } |
1192 | | |
1193 | 8.88k | if (auto Size = getPendingInitLoop(State, E, LCtx)) |
1194 | 96 | return Size > getIndexOfElementToConstruct(State, E, LCtx); |
1195 | | |
1196 | 8.79k | return false; |
1197 | 8.88k | } |
1198 | | |
1199 | 70.6k | static bool isTrivialObjectAssignment(const CallEvent &Call) { |
1200 | 70.6k | const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); |
1201 | 70.6k | if (!ICall) |
1202 | 57.4k | return false; |
1203 | | |
1204 | 13.2k | const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); |
1205 | 13.2k | if (!MD) |
1206 | 2 | return false; |
1207 | 13.2k | if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()12.9k )) |
1208 | 11.5k | return false; |
1209 | | |
1210 | 1.68k | return MD->isTrivial(); |
1211 | 13.2k | } |
1212 | | |
1213 | | void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, |
1214 | | const CallEvent &CallTemplate, |
1215 | 70.6k | const EvalCallOptions &CallOpts) { |
1216 | | // Make sure we have the most recent state attached to the call. |
1217 | 70.6k | ProgramStateRef State = Pred->getState(); |
1218 | 70.6k | CallEventRef<> Call = CallTemplate.cloneWithState(State); |
1219 | | |
1220 | | // Special-case trivial assignment operators. |
1221 | 70.6k | if (isTrivialObjectAssignment(*Call)) { |
1222 | 122 | performTrivialCopy(Bldr, Pred, *Call); |
1223 | 122 | return; |
1224 | 122 | } |
1225 | | |
1226 | | // Try to inline the call. |
1227 | | // The origin expression here is just used as a kind of checksum; |
1228 | | // this should still be safe even for CallEvents that don't come from exprs. |
1229 | 70.5k | const Expr *E = Call->getOriginExpr(); |
1230 | | |
1231 | 70.5k | ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); |
1232 | 70.5k | if (InlinedFailedState) { |
1233 | | // If we already tried once and failed, make sure we don't retry later. |
1234 | 35 | State = InlinedFailedState; |
1235 | 70.5k | } else { |
1236 | 70.5k | RuntimeDefinition RD = Call->getRuntimeDefinition(); |
1237 | 70.5k | Call->setForeign(RD.isForeign()); |
1238 | 70.5k | const Decl *D = RD.getDecl(); |
1239 | 70.5k | if (shouldInlineCall(*Call, D, Pred, CallOpts)) { |
1240 | 35.1k | if (RD.mayHaveOtherDefinitions()) { |
1241 | 20 | AnalyzerOptions &Options = getAnalysisManager().options; |
1242 | | |
1243 | | // Explore with and without inlining the call. |
1244 | 20 | if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { |
1245 | 17 | BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); |
1246 | 17 | return; |
1247 | 17 | } |
1248 | | |
1249 | | // Don't inline if we're not in any dynamic dispatch mode. |
1250 | 3 | if (Options.getIPAMode() != IPAK_DynamicDispatch) { |
1251 | 3 | conservativeEvalCall(*Call, Bldr, Pred, State); |
1252 | 3 | return; |
1253 | 3 | } |
1254 | 3 | } |
1255 | 35.1k | ctuBifurcate(*Call, D, Bldr, Pred, State); |
1256 | 35.1k | return; |
1257 | 35.1k | } |
1258 | 70.5k | } |
1259 | | |
1260 | | // If we can't inline it, clean up the state traits used only if the function |
1261 | | // is inlined. |
1262 | 35.3k | State = removeStateTraitsUsedForArrayEvaluation( |
1263 | 35.3k | State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext()); |
1264 | | |
1265 | | // Also handle the return value and invalidate the regions. |
1266 | 35.3k | conservativeEvalCall(*Call, Bldr, Pred, State); |
1267 | 35.3k | } |
1268 | | |
1269 | | void ExprEngine::BifurcateCall(const MemRegion *BifurReg, |
1270 | | const CallEvent &Call, const Decl *D, |
1271 | 17 | NodeBuilder &Bldr, ExplodedNode *Pred) { |
1272 | 17 | assert(BifurReg); |
1273 | 17 | BifurReg = BifurReg->StripCasts(); |
1274 | | |
1275 | | // Check if we've performed the split already - note, we only want |
1276 | | // to split the path once per memory region. |
1277 | 17 | ProgramStateRef State = Pred->getState(); |
1278 | 17 | const unsigned *BState = |
1279 | 17 | State->get<DynamicDispatchBifurcationMap>(BifurReg); |
1280 | 17 | if (BState) { |
1281 | | // If we are on "inline path", keep inlining if possible. |
1282 | 6 | if (*BState == DynamicDispatchModeInlined) |
1283 | 3 | ctuBifurcate(Call, D, Bldr, Pred, State); |
1284 | | // If inline failed, or we are on the path where we assume we |
1285 | | // don't have enough info about the receiver to inline, conjure the |
1286 | | // return value and invalidate the regions. |
1287 | 6 | conservativeEvalCall(Call, Bldr, Pred, State); |
1288 | 6 | return; |
1289 | 6 | } |
1290 | | |
1291 | | // If we got here, this is the first time we process a message to this |
1292 | | // region, so split the path. |
1293 | 11 | ProgramStateRef IState = |
1294 | 11 | State->set<DynamicDispatchBifurcationMap>(BifurReg, |
1295 | 11 | DynamicDispatchModeInlined); |
1296 | 11 | ctuBifurcate(Call, D, Bldr, Pred, IState); |
1297 | | |
1298 | 11 | ProgramStateRef NoIState = |
1299 | 11 | State->set<DynamicDispatchBifurcationMap>(BifurReg, |
1300 | 11 | DynamicDispatchModeConservative); |
1301 | 11 | conservativeEvalCall(Call, Bldr, Pred, NoIState); |
1302 | | |
1303 | 11 | NumOfDynamicDispatchPathSplits++; |
1304 | 11 | } |
1305 | | |
1306 | | void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, |
1307 | 24.3k | ExplodedNodeSet &Dst) { |
1308 | 24.3k | ExplodedNodeSet dstPreVisit; |
1309 | 24.3k | getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); |
1310 | | |
1311 | 24.3k | StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); |
1312 | | |
1313 | 24.3k | if (RS->getRetValue()) { |
1314 | 21.1k | for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), |
1315 | 42.2k | ei = dstPreVisit.end(); it != ei; ++it21.0k ) { |
1316 | 21.0k | B.generateNode(RS, *it, (*it)->getState()); |
1317 | 21.0k | } |
1318 | 21.1k | } |
1319 | 24.3k | } |