/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file defines ExprEngine's support for calls and returns. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "PrettyStackTraceLocationContext.h" |
14 | | #include "clang/AST/CXXInheritance.h" |
15 | | #include "clang/AST/Decl.h" |
16 | | #include "clang/AST/DeclCXX.h" |
17 | | #include "clang/Analysis/Analyses/LiveVariables.h" |
18 | | #include "clang/Analysis/ConstructionContext.h" |
19 | | #include "clang/StaticAnalyzer/Core/CheckerManager.h" |
20 | | #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" |
21 | | #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" |
22 | | #include "llvm/ADT/SmallSet.h" |
23 | | #include "llvm/ADT/Statistic.h" |
24 | | #include "llvm/Support/Casting.h" |
25 | | #include "llvm/Support/Compiler.h" |
26 | | #include "llvm/Support/SaveAndRestore.h" |
27 | | |
28 | | using namespace clang; |
29 | | using namespace ento; |
30 | | |
31 | | #define DEBUG_TYPE "ExprEngine" |
32 | | |
33 | | STATISTIC(NumOfDynamicDispatchPathSplits, |
34 | | "The # of times we split the path due to imprecise dynamic dispatch info"); |
35 | | |
36 | | STATISTIC(NumInlinedCalls, |
37 | | "The # of times we inlined a call"); |
38 | | |
39 | | STATISTIC(NumReachedInlineCountMax, |
40 | | "The # of times we reached inline count maximum"); |
41 | | |
42 | | void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE, |
43 | 33.8k | ExplodedNode *Pred) { |
44 | | // Get the entry block in the CFG of the callee. |
45 | 33.8k | const StackFrameContext *calleeCtx = CE.getCalleeContext(); |
46 | 33.8k | PrettyStackTraceLocationContext CrashInfo(calleeCtx); |
47 | 33.8k | const CFGBlock *Entry = CE.getEntry(); |
48 | | |
49 | | // Validate the CFG. |
50 | 33.8k | assert(Entry->empty()); |
51 | 0 | assert(Entry->succ_size() == 1); |
52 | | |
53 | | // Get the solitary successor. |
54 | 0 | const CFGBlock *Succ = *(Entry->succ_begin()); |
55 | | |
56 | | // Construct an edge representing the starting location in the callee. |
57 | 33.8k | BlockEdge Loc(Entry, Succ, calleeCtx); |
58 | | |
59 | 33.8k | ProgramStateRef state = Pred->getState(); |
60 | | |
61 | | // Construct a new node, notify checkers that analysis of the function has |
62 | | // begun, and add the resultant nodes to the worklist. |
63 | 33.8k | bool isNew; |
64 | 33.8k | ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); |
65 | 33.8k | Node->addPredecessor(Pred, G); |
66 | 33.8k | if (isNew) { |
67 | 33.8k | ExplodedNodeSet DstBegin; |
68 | 33.8k | processBeginOfFunction(BC, Node, DstBegin, Loc); |
69 | 33.8k | Engine.enqueue(DstBegin); |
70 | 33.8k | } |
71 | 33.8k | } |
72 | | |
73 | | // Find the last statement on the path to the exploded node and the |
74 | | // corresponding Block. |
75 | | static std::pair<const Stmt*, |
76 | 63.0k | const CFGBlock*> getLastStmt(const ExplodedNode *Node) { |
77 | 63.0k | const Stmt *S = nullptr; |
78 | 63.0k | const CFGBlock *Blk = nullptr; |
79 | 63.0k | const StackFrameContext *SF = Node->getStackFrame(); |
80 | | |
81 | | // Back up through the ExplodedGraph until we reach a statement node in this |
82 | | // stack frame. |
83 | 179k | while (Node) { |
84 | 179k | const ProgramPoint &PP = Node->getLocation(); |
85 | | |
86 | 179k | if (PP.getStackFrame() == SF) { |
87 | 176k | if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { |
88 | 56.5k | S = SP->getStmt(); |
89 | 56.5k | break; |
90 | 119k | } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { |
91 | 2.84k | S = CEE->getCalleeContext()->getCallSite(); |
92 | 2.84k | if (S) |
93 | 2.47k | break; |
94 | | |
95 | | // If there is no statement, this is an implicitly-generated call. |
96 | | // We'll walk backwards over it and then continue the loop to find |
97 | | // an actual statement. |
98 | 370 | Optional<CallEnter> CE; |
99 | 5.84k | do { |
100 | 5.84k | Node = Node->getFirstPred(); |
101 | 5.84k | CE = Node->getLocationAs<CallEnter>(); |
102 | 5.84k | } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()534 ); |
103 | | |
104 | | // Continue searching the graph. |
105 | 116k | } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { |
106 | 65.7k | Blk = BE->getSrc(); |
107 | 65.7k | } |
108 | 3.39k | } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) { |
109 | | // If we reached the CallEnter for this function, it has no statements. |
110 | 3.39k | if (CE->getCalleeContext() == SF) |
111 | 3.39k | break; |
112 | 3.39k | } |
113 | | |
114 | 117k | if (Node->pred_empty()) |
115 | 621 | return std::make_pair(nullptr, nullptr); |
116 | | |
117 | 116k | Node = *Node->pred_begin(); |
118 | 116k | } |
119 | | |
120 | 62.4k | return std::make_pair(S, Blk); |
121 | 63.0k | } |
122 | | |
123 | | /// Adjusts a return value when the called function's return type does not |
124 | | /// match the caller's expression type. This can happen when a dynamic call |
125 | | /// is devirtualized, and the overriding method has a covariant (more specific) |
126 | | /// return type than the parent's method. For C++ objects, this means we need |
127 | | /// to add base casts. |
128 | | static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, |
129 | 81 | StoreManager &StoreMgr) { |
130 | | // For now, the only adjustments we handle apply only to locations. |
131 | 81 | if (!V.getAs<Loc>()) |
132 | 67 | return V; |
133 | | |
134 | | // If the types already match, don't do any unnecessary work. |
135 | 14 | ExpectedTy = ExpectedTy.getCanonicalType(); |
136 | 14 | ActualTy = ActualTy.getCanonicalType(); |
137 | 14 | if (ExpectedTy == ActualTy) |
138 | 11 | return V; |
139 | | |
140 | | // No adjustment is needed between Objective-C pointer types. |
141 | 3 | if (ExpectedTy->isObjCObjectPointerType() && |
142 | 1 | ActualTy->isObjCObjectPointerType()) |
143 | 1 | return V; |
144 | | |
145 | | // C++ object pointers may need "derived-to-base" casts. |
146 | 2 | const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); |
147 | 2 | const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); |
148 | 2 | if (ExpectedClass && ActualClass1 ) { |
149 | 1 | CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, |
150 | 1 | /*DetectVirtual=*/false); |
151 | 1 | if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && |
152 | 1 | !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { |
153 | 1 | return StoreMgr.evalDerivedToBase(V, Paths.front()); |
154 | 1 | } |
155 | 1 | } |
156 | | |
157 | | // Unfortunately, Objective-C does not enforce that overridden methods have |
158 | | // covariant return types, so we can't assert that that never happens. |
159 | | // Be safe and return UnknownVal(). |
160 | 1 | return UnknownVal(); |
161 | 2 | } |
162 | | |
163 | | void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, |
164 | | ExplodedNode *Pred, |
165 | 21.8k | ExplodedNodeSet &Dst) { |
166 | | // Find the last statement in the function and the corresponding basic block. |
167 | 21.8k | const Stmt *LastSt = nullptr; |
168 | 21.8k | const CFGBlock *Blk = nullptr; |
169 | 21.8k | std::tie(LastSt, Blk) = getLastStmt(Pred); |
170 | 21.8k | if (!Blk || !LastSt21.2k ) { |
171 | 621 | Dst.Add(Pred); |
172 | 621 | return; |
173 | 621 | } |
174 | | |
175 | | // Here, we destroy the current location context. We use the current |
176 | | // function's entire body as a diagnostic statement, with which the program |
177 | | // point will be associated. However, we only want to use LastStmt as a |
178 | | // reference for what to clean up if it's a ReturnStmt; otherwise, everything |
179 | | // is dead. |
180 | 21.2k | SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); |
181 | 21.2k | const LocationContext *LCtx = Pred->getLocationContext(); |
182 | 21.2k | removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, |
183 | 21.2k | LCtx->getAnalysisDeclContext()->getBody(), |
184 | 21.2k | ProgramPoint::PostStmtPurgeDeadSymbolsKind); |
185 | 21.2k | } |
186 | | |
187 | | static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, |
188 | 16.0k | const StackFrameContext *calleeCtx) { |
189 | 16.0k | const Decl *RuntimeCallee = calleeCtx->getDecl(); |
190 | 16.0k | const Decl *StaticDecl = Call->getDecl(); |
191 | 16.0k | assert(RuntimeCallee); |
192 | 16.0k | if (!StaticDecl) |
193 | 0 | return true; |
194 | 16.0k | return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); |
195 | 16.0k | } |
196 | | |
197 | | /// The call exit is simulated with a sequence of nodes, which occur between |
198 | | /// CallExitBegin and CallExitEnd. The following operations occur between the |
199 | | /// two program points: |
200 | | /// 1. CallExitBegin (triggers the start of call exit sequence) |
201 | | /// 2. Bind the return value |
202 | | /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. |
203 | | /// 4. CallExitEnd (switch to the caller context) |
204 | | /// 5. PostStmt<CallExpr> |
205 | 41.2k | void ExprEngine::processCallExit(ExplodedNode *CEBNode) { |
206 | | // Step 1 CEBNode was generated before the call. |
207 | 41.2k | PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext()); |
208 | 41.2k | const StackFrameContext *calleeCtx = CEBNode->getStackFrame(); |
209 | | |
210 | | // The parent context might not be a stack frame, so make sure we |
211 | | // look up the first enclosing stack frame. |
212 | 41.2k | const StackFrameContext *callerCtx = |
213 | 41.2k | calleeCtx->getParent()->getStackFrame(); |
214 | | |
215 | 41.2k | const Stmt *CE = calleeCtx->getCallSite(); |
216 | 41.2k | ProgramStateRef state = CEBNode->getState(); |
217 | | // Find the last statement in the function and the corresponding basic block. |
218 | 41.2k | const Stmt *LastSt = nullptr; |
219 | 41.2k | const CFGBlock *Blk = nullptr; |
220 | 41.2k | std::tie(LastSt, Blk) = getLastStmt(CEBNode); |
221 | | |
222 | | // Generate a CallEvent /before/ cleaning the state, so that we can get the |
223 | | // correct value for 'this' (if necessary). |
224 | 41.2k | CallEventManager &CEMgr = getStateManager().getCallEventManager(); |
225 | 41.2k | CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); |
226 | | |
227 | | // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. |
228 | | |
229 | | // If the callee returns an expression, bind its value to CallExpr. |
230 | 41.2k | if (CE) { |
231 | 40.5k | if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { |
232 | 16.0k | const LocationContext *LCtx = CEBNode->getLocationContext(); |
233 | 16.0k | SVal V = state->getSVal(RS, LCtx); |
234 | | |
235 | | // Ensure that the return type matches the type of the returned Expr. |
236 | 16.0k | if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { |
237 | 81 | QualType ReturnedTy = |
238 | 81 | CallEvent::getDeclaredResultType(calleeCtx->getDecl()); |
239 | 81 | if (!ReturnedTy.isNull()) { |
240 | 81 | if (const Expr *Ex = dyn_cast<Expr>(CE)) { |
241 | 81 | V = adjustReturnValue(V, Ex->getType(), ReturnedTy, |
242 | 81 | getStoreManager()); |
243 | 81 | } |
244 | 81 | } |
245 | 81 | } |
246 | | |
247 | 16.0k | state = state->BindExpr(CE, callerCtx, V); |
248 | 16.0k | } |
249 | | |
250 | | // Bind the constructed object value to CXXConstructExpr. |
251 | 40.5k | if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { |
252 | 8.80k | loc::MemRegionVal This = |
253 | 8.80k | svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); |
254 | 8.80k | SVal ThisV = state->getSVal(This); |
255 | 8.80k | ThisV = state->getSVal(ThisV.castAs<Loc>()); |
256 | 8.80k | state = state->BindExpr(CCE, callerCtx, ThisV); |
257 | 8.80k | } |
258 | | |
259 | 40.5k | if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) { |
260 | | // We are currently evaluating a CXXNewAllocator CFGElement. It takes a |
261 | | // while to reach the actual CXXNewExpr element from here, so keep the |
262 | | // region for later use. |
263 | | // Additionally cast the return value of the inlined operator new |
264 | | // (which is of type 'void *') to the correct object type. |
265 | 366 | SVal AllocV = state->getSVal(CNE, callerCtx); |
266 | 366 | AllocV = svalBuilder.evalCast( |
267 | 366 | AllocV, CNE->getType(), |
268 | 366 | getContext().getPointerType(getContext().VoidTy)); |
269 | | |
270 | 366 | state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(), |
271 | 366 | AllocV); |
272 | 366 | } |
273 | 40.5k | } |
274 | | |
275 | | // Step 3: BindedRetNode -> CleanedNodes |
276 | | // If we can find a statement and a block in the inlined function, run remove |
277 | | // dead bindings before returning from the call. This is important to ensure |
278 | | // that we report the issues such as leaks in the stack contexts in which |
279 | | // they occurred. |
280 | 41.2k | ExplodedNodeSet CleanedNodes; |
281 | 41.2k | if (LastSt && Blk37.8k && AMgr.options.AnalysisPurgeOpt != PurgeNone37.8k ) { |
282 | 37.8k | static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value"); |
283 | 37.8k | PostStmt Loc(LastSt, calleeCtx, &retValBind); |
284 | 37.8k | bool isNew; |
285 | 37.8k | ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); |
286 | 37.8k | BindedRetNode->addPredecessor(CEBNode, G); |
287 | 37.8k | if (!isNew) |
288 | 0 | return; |
289 | | |
290 | 37.8k | NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); |
291 | 37.8k | currBldrCtx = &Ctx; |
292 | | // Here, we call the Symbol Reaper with 0 statement and callee location |
293 | | // context, telling it to clean up everything in the callee's context |
294 | | // (and its children). We use the callee's function body as a diagnostic |
295 | | // statement, with which the program point will be associated. |
296 | 37.8k | removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx, |
297 | 37.8k | calleeCtx->getAnalysisDeclContext()->getBody(), |
298 | 37.8k | ProgramPoint::PostStmtPurgeDeadSymbolsKind); |
299 | 37.8k | currBldrCtx = nullptr; |
300 | 3.39k | } else { |
301 | 3.39k | CleanedNodes.Add(CEBNode); |
302 | 3.39k | } |
303 | | |
304 | 41.2k | for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), |
305 | 80.6k | E = CleanedNodes.end(); I != E; ++I39.4k ) { |
306 | | |
307 | | // Step 4: Generate the CallExit and leave the callee's context. |
308 | | // CleanedNodes -> CEENode |
309 | 39.4k | CallExitEnd Loc(calleeCtx, callerCtx); |
310 | 39.4k | bool isNew; |
311 | 39.4k | ProgramStateRef CEEState = (*I == CEBNode) ? state3.39k : (*I)->getState()36.0k ; |
312 | | |
313 | 39.4k | ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); |
314 | 39.4k | CEENode->addPredecessor(*I, G); |
315 | 39.4k | if (!isNew) |
316 | 0 | return; |
317 | | |
318 | | // Step 5: Perform the post-condition check of the CallExpr and enqueue the |
319 | | // result onto the work list. |
320 | | // CEENode -> Dst -> WorkList |
321 | 39.4k | NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); |
322 | 39.4k | SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, |
323 | 39.4k | &Ctx); |
324 | 39.4k | SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); |
325 | | |
326 | 39.4k | CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); |
327 | | |
328 | 39.4k | ExplodedNodeSet DstPostCall; |
329 | 39.4k | if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) { |
330 | 366 | ExplodedNodeSet DstPostPostCallCallback; |
331 | 366 | getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback, |
332 | 366 | CEENode, *UpdatedCall, *this, |
333 | 366 | /*wasInlined=*/true); |
334 | 366 | for (ExplodedNode *I : DstPostPostCallCallback) { |
335 | 366 | getCheckerManager().runCheckersForNewAllocator( |
336 | 366 | cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this, |
337 | 366 | /*wasInlined=*/true); |
338 | 366 | } |
339 | 39.0k | } else { |
340 | 39.0k | getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, |
341 | 39.0k | *UpdatedCall, *this, |
342 | 39.0k | /*wasInlined=*/true); |
343 | 39.0k | } |
344 | 39.4k | ExplodedNodeSet Dst; |
345 | 39.4k | if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { |
346 | 591 | getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, |
347 | 591 | *this, |
348 | 591 | /*wasInlined=*/true); |
349 | 38.8k | } else if (CE && |
350 | 38.1k | !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr. |
351 | 37.7k | AMgr.getAnalyzerOptions().MayInlineCXXAllocator366 )) { |
352 | 37.7k | getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, |
353 | 37.7k | *this, /*wasInlined=*/true); |
354 | 1.08k | } else { |
355 | 1.08k | Dst.insert(DstPostCall); |
356 | 1.08k | } |
357 | | |
358 | | // Enqueue the next element in the block. |
359 | 39.4k | for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); |
360 | 78.8k | PSI != PSE; ++PSI39.4k ) { |
361 | 39.4k | Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), |
362 | 39.4k | calleeCtx->getIndex()+1); |
363 | 39.4k | } |
364 | 39.4k | } |
365 | 41.2k | } |
366 | | |
367 | 66.7k | bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const { |
368 | | // When there are no branches in the function, it means that there's no |
369 | | // exponential complexity introduced by inlining such function. |
370 | | // Such functions also don't trigger various fundamental problems |
371 | | // with our inlining mechanism, such as the problem of |
372 | | // inlined defensive checks. Hence isLinear(). |
373 | 66.7k | const CFG *Cfg = ADC->getCFG(); |
374 | 66.7k | return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize18.5k ; |
375 | 66.7k | } |
376 | | |
377 | 16.7k | bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const { |
378 | 16.7k | const CFG *Cfg = ADC->getCFG(); |
379 | 16.7k | return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge; |
380 | 16.7k | } |
381 | | |
382 | 4.68k | bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const { |
383 | 4.68k | const CFG *Cfg = ADC->getCFG(); |
384 | 4.68k | return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize; |
385 | 4.68k | } |
386 | | |
387 | | void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, |
388 | 33.6k | bool &IsRecursive, unsigned &StackDepth) { |
389 | 33.6k | IsRecursive = false; |
390 | 33.6k | StackDepth = 0; |
391 | | |
392 | 102k | while (LCtx) { |
393 | 69.0k | if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { |
394 | 68.9k | const Decl *DI = SFC->getDecl(); |
395 | | |
396 | | // Mark recursive (and mutually recursive) functions and always count |
397 | | // them when measuring the stack depth. |
398 | 68.9k | if (DI == D) { |
399 | 2.53k | IsRecursive = true; |
400 | 2.53k | ++StackDepth; |
401 | 2.53k | LCtx = LCtx->getParent(); |
402 | 2.53k | continue; |
403 | 2.53k | } |
404 | | |
405 | | // Do not count the small functions when determining the stack depth. |
406 | 66.4k | AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); |
407 | 66.4k | if (!isSmall(CalleeADC)) |
408 | 18.5k | ++StackDepth; |
409 | 66.4k | } |
410 | 66.4k | LCtx = LCtx->getParent(); |
411 | 66.4k | } |
412 | 33.6k | } |
413 | | |
414 | | // The GDM component containing the dynamic dispatch bifurcation info. When |
415 | | // the exact type of the receiver is not known, we want to explore both paths - |
416 | | // one on which we do inline it and the other one on which we don't. This is |
417 | | // done to ensure we do not drop coverage. |
418 | | // This is the map from the receiver region to a bool, specifying either we |
419 | | // consider this region's information precise or not along the given path. |
420 | | namespace { |
421 | | enum DynamicDispatchMode { |
422 | | DynamicDispatchModeInlined = 1, |
423 | | DynamicDispatchModeConservative |
424 | | }; |
425 | | } // end anonymous namespace |
426 | | |
427 | | REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, |
428 | | const MemRegion *, unsigned) |
429 | | |
430 | | bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, |
431 | | NodeBuilder &Bldr, ExplodedNode *Pred, |
432 | 33.8k | ProgramStateRef State) { |
433 | 33.8k | assert(D); |
434 | | |
435 | 0 | const LocationContext *CurLC = Pred->getLocationContext(); |
436 | 33.8k | const StackFrameContext *CallerSFC = CurLC->getStackFrame(); |
437 | 33.8k | const LocationContext *ParentOfCallee = CallerSFC; |
438 | 33.8k | if (Call.getKind() == CE_Block && |
439 | 177 | !cast<BlockCall>(Call).isConversionFromLambda()) { |
440 | 170 | const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); |
441 | 170 | assert(BR && "If we have the block definition we should have its region"); |
442 | 0 | AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); |
443 | 170 | ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, |
444 | 170 | cast<BlockDecl>(D), |
445 | 170 | BR); |
446 | 170 | } |
447 | | |
448 | | // This may be NULL, but that's fine. |
449 | 0 | const Expr *CallE = Call.getOriginExpr(); |
450 | | |
451 | | // Construct a new stack frame for the callee. |
452 | 33.8k | AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); |
453 | 33.8k | const StackFrameContext *CalleeSFC = |
454 | 33.8k | CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), |
455 | 33.8k | currBldrCtx->blockCount(), currStmtIdx); |
456 | | |
457 | 33.8k | CallEnter Loc(CallE, CalleeSFC, CurLC); |
458 | | |
459 | | // Construct a new state which contains the mapping from actual to |
460 | | // formal arguments. |
461 | 33.8k | State = State->enterStackFrame(Call, CalleeSFC); |
462 | | |
463 | 33.8k | bool isNew; |
464 | 33.8k | if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { |
465 | 33.8k | N->addPredecessor(Pred, G); |
466 | 33.8k | if (isNew) |
467 | 33.8k | Engine.getWorkList()->enqueue(N); |
468 | 33.8k | } |
469 | | |
470 | | // If we decided to inline the call, the successor has been manually |
471 | | // added onto the work list so remove it from the node builder. |
472 | 33.8k | Bldr.takeNodes(Pred); |
473 | | |
474 | 33.8k | NumInlinedCalls++; |
475 | 33.8k | Engine.FunctionSummaries->bumpNumTimesInlined(D); |
476 | | |
477 | | // Mark the decl as visited. |
478 | 33.8k | if (VisitedCallees) |
479 | 33.8k | VisitedCallees->insert(D); |
480 | | |
481 | 33.8k | return true; |
482 | 33.8k | } |
483 | | |
484 | | static ProgramStateRef getInlineFailedState(ProgramStateRef State, |
485 | 65.3k | const Stmt *CallE) { |
486 | 65.3k | const void *ReplayState = State->get<ReplayWithoutInlining>(); |
487 | 65.3k | if (!ReplayState) |
488 | 65.2k | return nullptr; |
489 | | |
490 | 35 | assert(ReplayState == CallE && "Backtracked to the wrong call."); |
491 | 0 | (void)CallE; |
492 | | |
493 | 35 | return State->remove<ReplayWithoutInlining>(); |
494 | 65.3k | } |
495 | | |
496 | | void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, |
497 | 69.2k | ExplodedNodeSet &dst) { |
498 | | // Perform the previsit of the CallExpr. |
499 | 69.2k | ExplodedNodeSet dstPreVisit; |
500 | 69.2k | getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); |
501 | | |
502 | | // Get the call in its initial state. We use this as a template to perform |
503 | | // all the checks. |
504 | 69.2k | CallEventManager &CEMgr = getStateManager().getCallEventManager(); |
505 | 69.2k | CallEventRef<> CallTemplate |
506 | 69.2k | = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); |
507 | | |
508 | | // Evaluate the function call. We try each of the checkers |
509 | | // to see if the can evaluate the function call. |
510 | 69.2k | ExplodedNodeSet dstCallEvaluated; |
511 | 69.2k | for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); |
512 | 138k | I != E; ++I69.1k ) { |
513 | 69.1k | evalCall(dstCallEvaluated, *I, *CallTemplate); |
514 | 69.1k | } |
515 | | |
516 | | // Finally, perform the post-condition check of the CallExpr and store |
517 | | // the created nodes in 'Dst'. |
518 | | // Note that if the call was inlined, dstCallEvaluated will be empty. |
519 | | // The post-CallExpr check will occur in processCallExit. |
520 | 69.2k | getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, |
521 | 69.2k | *this); |
522 | 69.2k | } |
523 | | |
524 | | ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State, |
525 | 103k | const CallEvent &Call) { |
526 | 103k | const Expr *E = Call.getOriginExpr(); |
527 | | // FIXME: Constructors to placement arguments of operator new |
528 | | // are not supported yet. |
529 | 103k | if (!E || isa<CXXNewExpr>(E)102k ) |
530 | 1.08k | return State; |
531 | | |
532 | 102k | const LocationContext *LC = Call.getLocationContext(); |
533 | 191k | for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI88.9k ) { |
534 | 88.9k | unsigned I = Call.getASTArgumentIndex(CallI); |
535 | 88.9k | if (Optional<SVal> V = |
536 | 11.6k | getObjectUnderConstruction(State, {E, I}, LC)) { |
537 | 11.6k | SVal VV = *V; |
538 | 11.6k | (void)VV; |
539 | 11.6k | assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion()) |
540 | 11.6k | ->getStackFrame()->getParent() |
541 | 11.6k | ->getStackFrame() == LC->getStackFrame()); |
542 | 0 | State = finishObjectConstruction(State, {E, I}, LC); |
543 | 11.6k | } |
544 | 88.9k | } |
545 | | |
546 | 102k | return State; |
547 | 103k | } |
548 | | |
549 | | void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst, |
550 | | ExplodedNode *Pred, |
551 | 62.3k | const CallEvent &Call) { |
552 | 62.3k | ProgramStateRef State = Pred->getState(); |
553 | 62.3k | ProgramStateRef CleanedState = finishArgumentConstruction(State, Call); |
554 | 62.3k | if (CleanedState == State) { |
555 | 52.4k | Dst.insert(Pred); |
556 | 52.4k | return; |
557 | 52.4k | } |
558 | | |
559 | 9.88k | const Expr *E = Call.getOriginExpr(); |
560 | 9.88k | const LocationContext *LC = Call.getLocationContext(); |
561 | 9.88k | NodeBuilder B(Pred, Dst, *currBldrCtx); |
562 | 9.88k | static SimpleProgramPointTag Tag("ExprEngine", |
563 | 9.88k | "Finish argument construction"); |
564 | 9.88k | PreStmt PP(E, LC, &Tag); |
565 | 9.88k | B.generateNode(PP, CleanedState, Pred); |
566 | 9.88k | } |
567 | | |
568 | | void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, |
569 | 69.1k | const CallEvent &Call) { |
570 | | // WARNING: At this time, the state attached to 'Call' may be older than the |
571 | | // state in 'Pred'. This is a minor optimization since CheckerManager will |
572 | | // use an updated CallEvent instance when calling checkers, but if 'Call' is |
573 | | // ever used directly in this function all callers should be updated to pass |
574 | | // the most recent state. (It is probably not worth doing the work here since |
575 | | // for some callers this will not be necessary.) |
576 | | |
577 | | // Run any pre-call checks using the generic call interface. |
578 | 69.1k | ExplodedNodeSet dstPreVisit; |
579 | 69.1k | getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, |
580 | 69.1k | Call, *this); |
581 | | |
582 | | // Actually evaluate the function call. We try each of the checkers |
583 | | // to see if the can evaluate the function call, and get a callback at |
584 | | // defaultEvalCall if all of them fail. |
585 | 69.1k | ExplodedNodeSet dstCallEvaluated; |
586 | 69.1k | getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, |
587 | 69.1k | Call, *this, EvalCallOptions()); |
588 | | |
589 | | // If there were other constructors called for object-type arguments |
590 | | // of this call, clean them up. |
591 | 69.1k | ExplodedNodeSet dstArgumentCleanup; |
592 | 69.1k | for (ExplodedNode *I : dstCallEvaluated) |
593 | 44.5k | finishArgumentConstruction(dstArgumentCleanup, I, Call); |
594 | | |
595 | 69.1k | ExplodedNodeSet dstPostCall; |
596 | 69.1k | getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup, |
597 | 69.1k | Call, *this); |
598 | | |
599 | | // Escaping symbols conjured during invalidating the regions above. |
600 | | // Note that, for inlined calls the nodes were put back into the worklist, |
601 | | // so we can assume that every node belongs to a conservative call at this |
602 | | // point. |
603 | | |
604 | | // Run pointerEscape callback with the newly conjured symbols. |
605 | 69.1k | SmallVector<std::pair<SVal, SVal>, 8> Escaped; |
606 | 43.9k | for (ExplodedNode *I : dstPostCall) { |
607 | 43.9k | NodeBuilder B(I, Dst, *currBldrCtx); |
608 | 43.9k | ProgramStateRef State = I->getState(); |
609 | 43.9k | Escaped.clear(); |
610 | 43.9k | { |
611 | 43.9k | unsigned Arg = -1; |
612 | 43.2k | for (const ParmVarDecl *PVD : Call.parameters()) { |
613 | 43.2k | ++Arg; |
614 | 43.2k | QualType ParamTy = PVD->getType(); |
615 | 43.2k | if (ParamTy.isNull() || |
616 | 43.2k | (!ParamTy->isPointerType() && !ParamTy->isReferenceType()32.7k )) |
617 | 26.3k | continue; |
618 | 16.9k | QualType Pointee = ParamTy->getPointeeType(); |
619 | 16.9k | if (Pointee.isConstQualified() || Pointee->isVoidType()5.76k ) |
620 | 12.3k | continue; |
621 | 4.59k | if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion()) |
622 | 4.30k | Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee)); |
623 | 4.59k | } |
624 | 43.9k | } |
625 | | |
626 | 43.9k | State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(), |
627 | 43.9k | PSK_EscapeOutParameters, &Call); |
628 | | |
629 | 43.9k | if (State == I->getState()) |
630 | 43.9k | Dst.insert(I); |
631 | 6 | else |
632 | 6 | B.generateNode(I->getLocation(), State, I); |
633 | 43.9k | } |
634 | 69.1k | } |
635 | | |
636 | | ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, |
637 | | const LocationContext *LCtx, |
638 | 42.3k | ProgramStateRef State) { |
639 | 42.3k | const Expr *E = Call.getOriginExpr(); |
640 | 42.3k | if (!E) |
641 | 770 | return State; |
642 | | |
643 | | // Some method families have known return values. |
644 | 41.5k | if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { |
645 | 3.46k | switch (Msg->getMethodFamily()) { |
646 | 3.15k | default: |
647 | 3.15k | break; |
648 | 132 | case OMF_autorelease: |
649 | 308 | case OMF_retain: |
650 | 311 | case OMF_self: { |
651 | | // These methods return their receivers. |
652 | 311 | return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); |
653 | 308 | } |
654 | 3.46k | } |
655 | 38.1k | } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ |
656 | 12.4k | SVal ThisV = C->getCXXThisVal(); |
657 | 12.4k | ThisV = State->getSVal(ThisV.castAs<Loc>()); |
658 | 12.4k | return State->BindExpr(E, LCtx, ThisV); |
659 | 12.4k | } |
660 | | |
661 | 28.8k | SVal R; |
662 | 28.8k | QualType ResultTy = Call.getResultType(); |
663 | 28.8k | unsigned Count = currBldrCtx->blockCount(); |
664 | 28.8k | if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) { |
665 | | // Conjure a temporary if the function returns an object by value. |
666 | 2.04k | SVal Target; |
667 | 2.04k | assert(RTC->getStmt() == Call.getOriginExpr()); |
668 | 0 | EvalCallOptions CallOpts; // FIXME: We won't really need those. |
669 | 2.04k | std::tie(State, Target) = |
670 | 2.04k | handleConstructionContext(Call.getOriginExpr(), State, LCtx, |
671 | 2.04k | RTC->getConstructionContext(), CallOpts); |
672 | 2.04k | const MemRegion *TargetR = Target.getAsRegion(); |
673 | 2.04k | assert(TargetR); |
674 | | // Invalidate the region so that it didn't look uninitialized. If this is |
675 | | // a field or element constructor, we do not want to invalidate |
676 | | // the whole structure. Pointer escape is meaningless because |
677 | | // the structure is a product of conservative evaluation |
678 | | // and therefore contains nothing interesting at this point. |
679 | 0 | RegionAndSymbolInvalidationTraits ITraits; |
680 | 2.04k | ITraits.setTrait(TargetR, |
681 | 2.04k | RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion); |
682 | 2.04k | State = State->invalidateRegions(TargetR, E, Count, LCtx, |
683 | 2.04k | /* CausesPointerEscape=*/false, nullptr, |
684 | 2.04k | &Call, &ITraits); |
685 | | |
686 | 2.04k | R = State->getSVal(Target.castAs<Loc>(), E->getType()); |
687 | 26.8k | } else { |
688 | | // Conjure a symbol if the return value is unknown. |
689 | | |
690 | | // See if we need to conjure a heap pointer instead of |
691 | | // a regular unknown pointer. |
692 | 26.8k | bool IsHeapPointer = false; |
693 | 26.8k | if (const auto *CNE = dyn_cast<CXXNewExpr>(E)) |
694 | 652 | if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { |
695 | | // FIXME: Delegate this to evalCall in MallocChecker? |
696 | 604 | IsHeapPointer = true; |
697 | 604 | } |
698 | | |
699 | 26.8k | R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)604 |
700 | 26.2k | : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, |
701 | 26.2k | Count); |
702 | 26.8k | } |
703 | 0 | return State->BindExpr(E, LCtx, R); |
704 | 41.5k | } |
705 | | |
706 | | // Conservatively evaluate call by invalidating regions and binding |
707 | | // a conjured return value. |
708 | | void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, |
709 | 31.5k | ExplodedNode *Pred, ProgramStateRef State) { |
710 | 31.5k | State = Call.invalidateRegions(currBldrCtx->blockCount(), State); |
711 | 31.5k | State = bindReturnValue(Call, Pred->getLocationContext(), State); |
712 | | |
713 | | // And make the result node. |
714 | 31.5k | Bldr.generateNode(Call.getProgramPoint(), State, Pred); |
715 | 31.5k | } |
716 | | |
717 | | ExprEngine::CallInlinePolicy |
718 | | ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred, |
719 | | AnalyzerOptions &Opts, |
720 | 33.9k | const EvalCallOptions &CallOpts) { |
721 | 33.9k | const LocationContext *CurLC = Pred->getLocationContext(); |
722 | 33.9k | const StackFrameContext *CallerSFC = CurLC->getStackFrame(); |
723 | 33.9k | switch (Call.getKind()) { |
724 | 16.6k | case CE_Function: |
725 | 16.8k | case CE_Block: |
726 | 16.8k | break; |
727 | 4.42k | case CE_CXXMember: |
728 | 6.69k | case CE_CXXMemberOperator: |
729 | 6.69k | if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) |
730 | 0 | return CIP_DisallowedAlways; |
731 | 6.69k | break; |
732 | 9.02k | case CE_CXXConstructor: { |
733 | 9.02k | if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) |
734 | 0 | return CIP_DisallowedAlways; |
735 | | |
736 | 9.02k | const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); |
737 | | |
738 | 9.02k | const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); |
739 | | |
740 | 9.02k | auto CCE = getCurrentCFGElement().getAs<CFGConstructor>(); |
741 | 9.02k | const ConstructionContext *CC = CCE ? CCE->getConstructionContext()8.77k |
742 | 244 | : nullptr; |
743 | | |
744 | 9.02k | if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) && |
745 | 325 | !Opts.MayInlineCXXAllocator) |
746 | 3 | return CIP_DisallowedOnce; |
747 | | |
748 | | // FIXME: We don't handle constructors or destructors for arrays properly. |
749 | | // Even once we do, we still need to be careful about implicitly-generated |
750 | | // initializers for array fields in default move/copy constructors. |
751 | | // We still allow construction into ElementRegion targets when they don't |
752 | | // represent array elements. |
753 | 9.01k | if (CallOpts.IsArrayCtorOrDtor) |
754 | 151 | return CIP_DisallowedOnce; |
755 | | |
756 | | // Inlining constructors requires including initializers in the CFG. |
757 | 8.86k | const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); |
758 | 8.86k | assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); |
759 | 0 | (void)ADC; |
760 | | |
761 | | // If the destructor is trivial, it's always safe to inline the constructor. |
762 | 8.86k | if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) |
763 | 7.94k | break; |
764 | | |
765 | | // For other types, only inline constructors if destructor inlining is |
766 | | // also enabled. |
767 | 926 | if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) |
768 | 14 | return CIP_DisallowedAlways; |
769 | | |
770 | 912 | if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) { |
771 | | // If we don't handle temporary destructors, we shouldn't inline |
772 | | // their constructors. |
773 | 782 | if (CallOpts.IsTemporaryCtorOrDtor && |
774 | 257 | !Opts.ShouldIncludeTemporaryDtorsInCFG) |
775 | 74 | return CIP_DisallowedOnce; |
776 | | |
777 | | // If we did not find the correct this-region, it would be pointless |
778 | | // to inline the constructor. Instead we will simply invalidate |
779 | | // the fake temporary target. |
780 | 708 | if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) |
781 | 12 | return CIP_DisallowedOnce; |
782 | | |
783 | | // If the temporary is lifetime-extended by binding it to a reference-type |
784 | | // field within an aggregate, automatic destructors don't work properly. |
785 | 696 | if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate) |
786 | 16 | return CIP_DisallowedOnce; |
787 | 696 | } |
788 | | |
789 | 810 | break; |
790 | 912 | } |
791 | 4 | case CE_CXXInheritedConstructor: { |
792 | | // This doesn't really increase the cost of inlining ever, because |
793 | | // the stack frame of the inherited constructor is trivial. |
794 | 4 | return CIP_Allowed; |
795 | 912 | } |
796 | 755 | case CE_CXXDestructor: { |
797 | 755 | if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) |
798 | 6 | return CIP_DisallowedAlways; |
799 | | |
800 | | // Inlining destructors requires building the CFG correctly. |
801 | 749 | const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); |
802 | 749 | assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); |
803 | 0 | (void)ADC; |
804 | | |
805 | | // FIXME: We don't handle constructors or destructors for arrays properly. |
806 | 749 | if (CallOpts.IsArrayCtorOrDtor) |
807 | 16 | return CIP_DisallowedOnce; |
808 | | |
809 | | // Allow disabling temporary destructor inlining with a separate option. |
810 | 733 | if (CallOpts.IsTemporaryCtorOrDtor && |
811 | 148 | !Opts.MayInlineCXXTemporaryDtors) |
812 | 1 | return CIP_DisallowedOnce; |
813 | | |
814 | | // If we did not find the correct this-region, it would be pointless |
815 | | // to inline the destructor. Instead we will simply invalidate |
816 | | // the fake temporary target. |
817 | 732 | if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) |
818 | 9 | return CIP_DisallowedOnce; |
819 | 723 | break; |
820 | 732 | } |
821 | 0 | case CE_CXXDeallocator: |
822 | 0 | LLVM_FALLTHROUGH; |
823 | 361 | case CE_CXXAllocator: |
824 | 361 | if (Opts.MayInlineCXXAllocator) |
825 | 361 | break; |
826 | | // Do not inline allocators until we model deallocators. |
827 | | // This is unfortunate, but basically necessary for smart pointers and such. |
828 | 0 | return CIP_DisallowedAlways; |
829 | 352 | case CE_ObjCMessage: |
830 | 352 | if (!Opts.MayInlineObjCMethod) |
831 | 1 | return CIP_DisallowedAlways; |
832 | 351 | if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || |
833 | 339 | Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) |
834 | 1 | return CIP_DisallowedAlways; |
835 | 350 | break; |
836 | 33.9k | } |
837 | | |
838 | 33.6k | return CIP_Allowed; |
839 | 33.9k | } |
840 | | |
841 | | /// Returns true if the given C++ class contains a member with the given name. |
842 | | static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD, |
843 | 492 | StringRef Name) { |
844 | 492 | const IdentifierInfo &II = Ctx.Idents.get(Name); |
845 | 492 | return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II)); |
846 | 492 | } |
847 | | |
848 | | /// Returns true if the given C++ class is a container or iterator. |
849 | | /// |
850 | | /// Our heuristic for this is whether it contains a method named 'begin()' or a |
851 | | /// nested type named 'iterator' or 'iterator_category'. |
852 | 281 | static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) { |
853 | 281 | return hasMember(Ctx, RD, "begin") || |
854 | 160 | hasMember(Ctx, RD, "iterator") || |
855 | 51 | hasMember(Ctx, RD, "iterator_category"); |
856 | 281 | } |
857 | | |
858 | | /// Returns true if the given function refers to a method of a C++ container |
859 | | /// or iterator. |
860 | | /// |
861 | | /// We generally do a poor job modeling most containers right now, and might |
862 | | /// prefer not to inline their methods. |
863 | | static bool isContainerMethod(const ASTContext &Ctx, |
864 | 486 | const FunctionDecl *FD) { |
865 | 486 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) |
866 | 281 | return isContainerClass(Ctx, MD->getParent()); |
867 | 205 | return false; |
868 | 486 | } |
869 | | |
870 | | /// Returns true if the given function is the destructor of a class named |
871 | | /// "shared_ptr". |
872 | 4.00k | static bool isCXXSharedPtrDtor(const FunctionDecl *FD) { |
873 | 4.00k | const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD); |
874 | 4.00k | if (!Dtor) |
875 | 3.71k | return false; |
876 | | |
877 | 290 | const CXXRecordDecl *RD = Dtor->getParent(); |
878 | 290 | if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo()) |
879 | 290 | if (II->isStr("shared_ptr")) |
880 | 5 | return true; |
881 | | |
882 | 285 | return false; |
883 | 290 | } |
884 | | |
885 | | /// Returns true if the function in \p CalleeADC may be inlined in general. |
886 | | /// |
887 | | /// This checks static properties of the function, such as its signature and |
888 | | /// CFG, to determine whether the analyzer should ever consider inlining it, |
889 | | /// in any context. |
890 | 5.46k | bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const { |
891 | 5.46k | AnalyzerOptions &Opts = AMgr.getAnalyzerOptions(); |
892 | | // FIXME: Do not inline variadic calls. |
893 | 5.46k | if (CallEvent::isVariadic(CalleeADC->getDecl())) |
894 | 19 | return false; |
895 | | |
896 | | // Check certain C++-related inlining policies. |
897 | 5.45k | ASTContext &Ctx = CalleeADC->getASTContext(); |
898 | 5.45k | if (Ctx.getLangOpts().CPlusPlus) { |
899 | 4.44k | if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) { |
900 | | // Conditionally control the inlining of template functions. |
901 | 4.25k | if (!Opts.MayInlineTemplateFunctions) |
902 | 5 | if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) |
903 | 4 | return false; |
904 | | |
905 | | // Conditionally control the inlining of C++ standard library functions. |
906 | 4.25k | if (!Opts.MayInlineCXXStandardLibrary) |
907 | 19 | if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation())) |
908 | 19 | if (AnalysisDeclContext::isInStdNamespace(FD)) |
909 | 18 | return false; |
910 | | |
911 | | // Conditionally control the inlining of methods on objects that look |
912 | | // like C++ containers. |
913 | 4.23k | if (!Opts.MayInlineCXXContainerMethods) |
914 | 3.88k | if (!AMgr.isInCodeFile(FD->getLocation())) |
915 | 486 | if (isContainerMethod(Ctx, FD)) |
916 | 230 | return false; |
917 | | |
918 | | // Conditionally control the inlining of the destructor of C++ shared_ptr. |
919 | | // We don't currently do a good job modeling shared_ptr because we can't |
920 | | // see the reference count, so treating as opaque is probably the best |
921 | | // idea. |
922 | 4.00k | if (!Opts.MayInlineCXXSharedPtrDtor) |
923 | 4.00k | if (isCXXSharedPtrDtor(FD)) |
924 | 5 | return false; |
925 | 4.00k | } |
926 | 4.44k | } |
927 | | |
928 | | // It is possible that the CFG cannot be constructed. |
929 | | // Be safe, and check if the CalleeCFG is valid. |
930 | 5.19k | const CFG *CalleeCFG = CalleeADC->getCFG(); |
931 | 5.19k | if (!CalleeCFG) |
932 | 504 | return false; |
933 | | |
934 | | // Do not inline large functions. |
935 | 4.68k | if (isHuge(CalleeADC)) |
936 | 2 | return false; |
937 | | |
938 | | // It is possible that the live variables analysis cannot be |
939 | | // run. If so, bail out. |
940 | 4.68k | if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) |
941 | 0 | return false; |
942 | | |
943 | 4.68k | return true; |
944 | 4.68k | } |
945 | | |
946 | | bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, |
947 | | const ExplodedNode *Pred, |
948 | 65.2k | const EvalCallOptions &CallOpts) { |
949 | 65.2k | if (!D) |
950 | 26.4k | return false; |
951 | | |
952 | 38.8k | AnalysisManager &AMgr = getAnalysisManager(); |
953 | 38.8k | AnalyzerOptions &Opts = AMgr.options; |
954 | 38.8k | AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); |
955 | 38.8k | AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); |
956 | | |
957 | | // The auto-synthesized bodies are essential to inline as they are |
958 | | // usually small and commonly used. Note: we should do this check early on to |
959 | | // ensure we always inline these calls. |
960 | 38.8k | if (CalleeADC->isBodyAutosynthesized()) |
961 | 409 | return true; |
962 | | |
963 | 38.4k | if (!AMgr.shouldInlineCall()) |
964 | 36 | return false; |
965 | | |
966 | | // Check if this function has been marked as non-inlinable. |
967 | 38.3k | Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D); |
968 | 38.3k | if (MayInline.hasValue()) { |
969 | 32.9k | if (!MayInline.getValue()) |
970 | 3.62k | return false; |
971 | | |
972 | 5.46k | } else { |
973 | | // We haven't actually checked the static properties of this function yet. |
974 | | // Do that now, and record our decision in the function summaries. |
975 | 5.46k | if (mayInlineDecl(CalleeADC)) { |
976 | 4.68k | Engine.FunctionSummaries->markMayInline(D); |
977 | 782 | } else { |
978 | 782 | Engine.FunctionSummaries->markShouldNotInline(D); |
979 | 782 | return false; |
980 | 782 | } |
981 | 5.46k | } |
982 | | |
983 | | // Check if we should inline a call based on its kind. |
984 | | // FIXME: this checks both static and dynamic properties of the call, which |
985 | | // means we're redoing a bit of work that could be cached in the function |
986 | | // summary. |
987 | 33.9k | CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts); |
988 | 33.9k | if (CIP != CIP_Allowed) { |
989 | 304 | if (CIP == CIP_DisallowedAlways) { |
990 | 22 | assert(!MayInline.hasValue() || MayInline.getValue()); |
991 | 0 | Engine.FunctionSummaries->markShouldNotInline(D); |
992 | 22 | } |
993 | 0 | return false; |
994 | 304 | } |
995 | | |
996 | | // Do not inline if recursive or we've reached max stack frame count. |
997 | 33.6k | bool IsRecursive = false; |
998 | 33.6k | unsigned StackDepth = 0; |
999 | 33.6k | examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); |
1000 | 33.6k | if ((StackDepth >= Opts.InlineMaxStackDepth) && |
1001 | 285 | (!isSmall(CalleeADC) || IsRecursive275 )) |
1002 | 285 | return false; |
1003 | | |
1004 | | // Do not inline large functions too many times. |
1005 | 33.3k | if ((Engine.FunctionSummaries->getNumTimesInlined(D) > |
1006 | 33.3k | Opts.MaxTimesInlineLarge) && |
1007 | 16.7k | isLarge(CalleeADC)) { |
1008 | 0 | NumReachedInlineCountMax++; |
1009 | 0 | return false; |
1010 | 0 | } |
1011 | | |
1012 | 33.3k | if (HowToInline == Inline_Minimal && (25 !isSmall(CalleeADC)25 || IsRecursive24 )) |
1013 | 1 | return false; |
1014 | | |
1015 | 33.3k | return true; |
1016 | 33.3k | } |
1017 | | |
1018 | 65.4k | static bool isTrivialObjectAssignment(const CallEvent &Call) { |
1019 | 65.4k | const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); |
1020 | 65.4k | if (!ICall) |
1021 | 52.2k | return false; |
1022 | | |
1023 | 13.1k | const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); |
1024 | 13.1k | if (!MD) |
1025 | 1 | return false; |
1026 | 13.1k | if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()12.8k )) |
1027 | 11.4k | return false; |
1028 | | |
1029 | 1.67k | return MD->isTrivial(); |
1030 | 13.1k | } |
1031 | | |
1032 | | void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, |
1033 | | const CallEvent &CallTemplate, |
1034 | 65.4k | const EvalCallOptions &CallOpts) { |
1035 | | // Make sure we have the most recent state attached to the call. |
1036 | 65.4k | ProgramStateRef State = Pred->getState(); |
1037 | 65.4k | CallEventRef<> Call = CallTemplate.cloneWithState(State); |
1038 | | |
1039 | | // Special-case trivial assignment operators. |
1040 | 65.4k | if (isTrivialObjectAssignment(*Call)) { |
1041 | 110 | performTrivialCopy(Bldr, Pred, *Call); |
1042 | 110 | return; |
1043 | 110 | } |
1044 | | |
1045 | | // Try to inline the call. |
1046 | | // The origin expression here is just used as a kind of checksum; |
1047 | | // this should still be safe even for CallEvents that don't come from exprs. |
1048 | 65.3k | const Expr *E = Call->getOriginExpr(); |
1049 | | |
1050 | 65.3k | ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); |
1051 | 65.3k | if (InlinedFailedState) { |
1052 | | // If we already tried once and failed, make sure we don't retry later. |
1053 | 35 | State = InlinedFailedState; |
1054 | 65.2k | } else { |
1055 | 65.2k | RuntimeDefinition RD = Call->getRuntimeDefinition(); |
1056 | 65.2k | const Decl *D = RD.getDecl(); |
1057 | 65.2k | if (shouldInlineCall(*Call, D, Pred, CallOpts)) { |
1058 | 33.8k | if (RD.mayHaveOtherDefinitions()) { |
1059 | 18 | AnalyzerOptions &Options = getAnalysisManager().options; |
1060 | | |
1061 | | // Explore with and without inlining the call. |
1062 | 18 | if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { |
1063 | 15 | BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); |
1064 | 15 | return; |
1065 | 15 | } |
1066 | | |
1067 | | // Don't inline if we're not in any dynamic dispatch mode. |
1068 | 3 | if (Options.getIPAMode() != IPAK_DynamicDispatch) { |
1069 | 3 | conservativeEvalCall(*Call, Bldr, Pred, State); |
1070 | 3 | return; |
1071 | 3 | } |
1072 | 3 | } |
1073 | | |
1074 | | // We are not bifurcating and we do have a Decl, so just inline. |
1075 | 33.7k | if (inlineCall(*Call, D, Bldr, Pred, State)) |
1076 | 33.7k | return; |
1077 | 33.7k | } |
1078 | 65.2k | } |
1079 | | |
1080 | | // If we can't inline it, handle the return value and invalidate the regions. |
1081 | 31.5k | conservativeEvalCall(*Call, Bldr, Pred, State); |
1082 | 31.5k | } |
1083 | | |
1084 | | void ExprEngine::BifurcateCall(const MemRegion *BifurReg, |
1085 | | const CallEvent &Call, const Decl *D, |
1086 | 15 | NodeBuilder &Bldr, ExplodedNode *Pred) { |
1087 | 15 | assert(BifurReg); |
1088 | 0 | BifurReg = BifurReg->StripCasts(); |
1089 | | |
1090 | | // Check if we've performed the split already - note, we only want |
1091 | | // to split the path once per memory region. |
1092 | 15 | ProgramStateRef State = Pred->getState(); |
1093 | 15 | const unsigned *BState = |
1094 | 15 | State->get<DynamicDispatchBifurcationMap>(BifurReg); |
1095 | 15 | if (BState) { |
1096 | | // If we are on "inline path", keep inlining if possible. |
1097 | 6 | if (*BState == DynamicDispatchModeInlined) |
1098 | 3 | if (inlineCall(Call, D, Bldr, Pred, State)) |
1099 | 3 | return; |
1100 | | // If inline failed, or we are on the path where we assume we |
1101 | | // don't have enough info about the receiver to inline, conjure the |
1102 | | // return value and invalidate the regions. |
1103 | 3 | conservativeEvalCall(Call, Bldr, Pred, State); |
1104 | 3 | return; |
1105 | 6 | } |
1106 | | |
1107 | | // If we got here, this is the first time we process a message to this |
1108 | | // region, so split the path. |
1109 | 9 | ProgramStateRef IState = |
1110 | 9 | State->set<DynamicDispatchBifurcationMap>(BifurReg, |
1111 | 9 | DynamicDispatchModeInlined); |
1112 | 9 | inlineCall(Call, D, Bldr, Pred, IState); |
1113 | | |
1114 | 9 | ProgramStateRef NoIState = |
1115 | 9 | State->set<DynamicDispatchBifurcationMap>(BifurReg, |
1116 | 9 | DynamicDispatchModeConservative); |
1117 | 9 | conservativeEvalCall(Call, Bldr, Pred, NoIState); |
1118 | | |
1119 | 9 | NumOfDynamicDispatchPathSplits++; |
1120 | 9 | } |
1121 | | |
1122 | | void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, |
1123 | 22.5k | ExplodedNodeSet &Dst) { |
1124 | 22.5k | ExplodedNodeSet dstPreVisit; |
1125 | 22.5k | getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); |
1126 | | |
1127 | 22.5k | StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); |
1128 | | |
1129 | 22.5k | if (RS->getRetValue()) { |
1130 | 19.9k | for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), |
1131 | 39.8k | ei = dstPreVisit.end(); it != ei; ++it19.8k ) { |
1132 | 19.8k | B.generateNode(RS, *it, (*it)->getState()); |
1133 | 19.8k | } |
1134 | 19.9k | } |
1135 | 22.5k | } |