/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Analysis/AliasAnalysis.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements the generic AliasAnalysis interface which is used as the |
11 | | // common interface used by all clients and implementations of alias analysis. |
12 | | // |
13 | | // This file also implements the default version of the AliasAnalysis interface |
14 | | // that is to be used when no other implementation is specified. This does some |
15 | | // simple tests that detect obvious cases: two different global pointers cannot |
16 | | // alias, a global cannot alias a malloc, two different mallocs cannot alias, |
17 | | // etc. |
18 | | // |
19 | | // This alias analysis implementation really isn't very good for anything, but |
20 | | // it is very fast, and makes a nice clean default implementation. Because it |
21 | | // handles lots of little corner cases, other, more complex, alias analysis |
22 | | // implementations may choose to rely on this pass to resolve these simple and |
23 | | // easy cases. |
24 | | // |
25 | | //===----------------------------------------------------------------------===// |
26 | | |
27 | | #include "llvm/Analysis/AliasAnalysis.h" |
28 | | #include "llvm/Analysis/BasicAliasAnalysis.h" |
29 | | #include "llvm/Analysis/CFLAndersAliasAnalysis.h" |
30 | | #include "llvm/Analysis/CFLSteensAliasAnalysis.h" |
31 | | #include "llvm/Analysis/CaptureTracking.h" |
32 | | #include "llvm/Analysis/GlobalsModRef.h" |
33 | | #include "llvm/Analysis/MemoryLocation.h" |
34 | | #include "llvm/Analysis/ObjCARCAliasAnalysis.h" |
35 | | #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" |
36 | | #include "llvm/Analysis/ScopedNoAliasAA.h" |
37 | | #include "llvm/Analysis/TargetLibraryInfo.h" |
38 | | #include "llvm/Analysis/TypeBasedAliasAnalysis.h" |
39 | | #include "llvm/Analysis/ValueTracking.h" |
40 | | #include "llvm/IR/Argument.h" |
41 | | #include "llvm/IR/Attributes.h" |
42 | | #include "llvm/IR/BasicBlock.h" |
43 | | #include "llvm/IR/CallSite.h" |
44 | | #include "llvm/IR/Instruction.h" |
45 | | #include "llvm/IR/Instructions.h" |
46 | | #include "llvm/IR/Module.h" |
47 | | #include "llvm/IR/Type.h" |
48 | | #include "llvm/IR/Value.h" |
49 | | #include "llvm/Pass.h" |
50 | | #include "llvm/Support/AtomicOrdering.h" |
51 | | #include "llvm/Support/Casting.h" |
52 | | #include "llvm/Support/CommandLine.h" |
53 | | #include <algorithm> |
54 | | #include <cassert> |
55 | | #include <functional> |
56 | | #include <iterator> |
57 | | |
58 | | using namespace llvm; |
59 | | |
60 | | /// Allow disabling BasicAA from the AA results. This is particularly useful |
61 | | /// when testing to isolate a single AA implementation. |
62 | | static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden, |
63 | | cl::init(false)); |
64 | | |
65 | | AAResults::AAResults(AAResults &&Arg) |
66 | 998k | : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) { |
67 | 998k | for (auto &AA : AAs) |
68 | 3.91M | AA->setAAResults(this); |
69 | 998k | } |
70 | | |
71 | 12.6M | AAResults::~AAResults() { |
72 | 12.6M | // FIXME; It would be nice to at least clear out the pointers back to this |
73 | 12.6M | // aggregation here, but we end up with non-nesting lifetimes in the legacy |
74 | 12.6M | // pass manager that prevent this from working. In the legacy pass manager |
75 | 12.6M | // we'll end up with dangling references here in some cases. |
76 | | #if 0 |
77 | | for (auto &AA : AAs) |
78 | | AA->setAAResults(nullptr); |
79 | | #endif |
80 | | } |
81 | | |
82 | | bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA, |
83 | 629 | FunctionAnalysisManager::Invalidator &Inv) { |
84 | 629 | // Check if the AA manager itself has been invalidated. |
85 | 629 | auto PAC = PA.getChecker<AAManager>(); |
86 | 629 | if (!PAC.preserved() && 629 !PAC.preservedSet<AllAnalysesOn<Function>>()394 ) |
87 | 386 | return true; // The manager needs to be blown away, clear everything. |
88 | 243 | |
89 | 243 | // Check all of the dependencies registered. |
90 | 243 | for (AnalysisKey *ID : AADeps) |
91 | 181 | if (181 Inv.invalidate(ID, F, PA)181 ) |
92 | 5 | return true; |
93 | 238 | |
94 | 238 | // Everything we depend on is still fine, so are we. Nothing to invalidate. |
95 | 238 | return false; |
96 | 238 | } |
97 | | |
98 | | //===----------------------------------------------------------------------===// |
99 | | // Default chaining methods |
100 | | //===----------------------------------------------------------------------===// |
101 | | |
102 | | AliasResult AAResults::alias(const MemoryLocation &LocA, |
103 | 111M | const MemoryLocation &LocB) { |
104 | 278M | for (const auto &AA : AAs) { |
105 | 278M | auto Result = AA->alias(LocA, LocB); |
106 | 278M | if (Result != MayAlias) |
107 | 65.7M | return Result; |
108 | 46.1M | } |
109 | 46.1M | return MayAlias; |
110 | 46.1M | } |
111 | | |
112 | | bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc, |
113 | 26.8M | bool OrLocal) { |
114 | 26.8M | for (const auto &AA : AAs) |
115 | 94.6M | if (94.6M AA->pointsToConstantMemory(Loc, OrLocal)94.6M ) |
116 | 193k | return true; |
117 | 26.6M | |
118 | 26.6M | return false; |
119 | 26.6M | } |
120 | | |
121 | 960k | ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { |
122 | 960k | ModRefInfo Result = MRI_ModRef; |
123 | 960k | |
124 | 3.82M | for (const auto &AA : AAs) { |
125 | 3.82M | Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx)); |
126 | 3.82M | |
127 | 3.82M | // Early-exit the moment we reach the bottom of the lattice. |
128 | 3.82M | if (Result == MRI_NoModRef) |
129 | 1 | return Result; |
130 | 960k | } |
131 | 960k | |
132 | 960k | return Result; |
133 | 960k | } |
134 | | |
135 | 21.2k | ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) { |
136 | 21.2k | // We may have two calls |
137 | 21.2k | if (auto CS21.2k = ImmutableCallSite(I)) { |
138 | 5.49k | // Check if the two calls modify the same memory |
139 | 5.49k | return getModRefInfo(CS, Call); |
140 | 15.7k | } else if (15.7k I->isFenceLike()15.7k ) { |
141 | 1 | // If this is a fence, just return MRI_ModRef. |
142 | 1 | return MRI_ModRef; |
143 | 0 | } else { |
144 | 15.7k | // Otherwise, check if the call modifies or references the |
145 | 15.7k | // location this memory access defines. The best we can say |
146 | 15.7k | // is that if the call references what this instruction |
147 | 15.7k | // defines, it must be clobbered by this location. |
148 | 15.7k | const MemoryLocation DefLoc = MemoryLocation::get(I); |
149 | 15.7k | if (getModRefInfo(Call, DefLoc) != MRI_NoModRef) |
150 | 13.7k | return MRI_ModRef; |
151 | 1.97k | } |
152 | 1.97k | return MRI_NoModRef; |
153 | 1.97k | } |
154 | | |
155 | | ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, |
156 | 9.58M | const MemoryLocation &Loc) { |
157 | 9.58M | ModRefInfo Result = MRI_ModRef; |
158 | 9.58M | |
159 | 36.0M | for (const auto &AA : AAs) { |
160 | 36.0M | Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc)); |
161 | 36.0M | |
162 | 36.0M | // Early-exit the moment we reach the bottom of the lattice. |
163 | 36.0M | if (Result == MRI_NoModRef) |
164 | 624k | return Result; |
165 | 8.95M | } |
166 | 8.95M | |
167 | 8.95M | // Try to refine the mod-ref info further using other API entry points to the |
168 | 8.95M | // aggregate set of AA results. |
169 | 8.95M | auto MRB = getModRefBehavior(CS); |
170 | 8.95M | if (MRB == FMRB_DoesNotAccessMemory || |
171 | 8.90M | MRB == FMRB_OnlyAccessesInaccessibleMem) |
172 | 48.9k | return MRI_NoModRef; |
173 | 8.90M | |
174 | 8.90M | if (8.90M onlyReadsMemory(MRB)8.90M ) |
175 | 77.5k | Result = ModRefInfo(Result & MRI_Ref); |
176 | 8.82M | else if (8.82M doesNotReadMemory(MRB)8.82M ) |
177 | 36 | Result = ModRefInfo(Result & MRI_Mod); |
178 | 8.90M | |
179 | 8.90M | if (onlyAccessesArgPointees(MRB) || 8.90M onlyAccessesInaccessibleOrArgMem(MRB)7.70M ) { |
180 | 1.20M | bool DoesAlias = false; |
181 | 1.20M | ModRefInfo AllArgsMask = MRI_NoModRef; |
182 | 1.20M | if (doesAccessArgPointees(MRB)1.20M ) { |
183 | 4.72M | for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE4.72M ; ++AI3.51M ) { |
184 | 3.51M | const Value *Arg = *AI; |
185 | 3.51M | if (!Arg->getType()->isPointerTy()) |
186 | 2.17M | continue; |
187 | 1.34M | unsigned ArgIdx = std::distance(CS.arg_begin(), AI); |
188 | 1.34M | MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI); |
189 | 1.34M | AliasResult ArgAlias = alias(ArgLoc, Loc); |
190 | 1.34M | if (ArgAlias != NoAlias1.34M ) { |
191 | 420k | ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx); |
192 | 420k | DoesAlias = true; |
193 | 420k | AllArgsMask = ModRefInfo(AllArgsMask | ArgMask); |
194 | 420k | } |
195 | 3.51M | } |
196 | 1.20M | } |
197 | 1.20M | if (!DoesAlias) |
198 | 841k | return MRI_NoModRef; |
199 | 363k | Result = ModRefInfo(Result & AllArgsMask); |
200 | 363k | } |
201 | 8.90M | |
202 | 8.90M | // If Loc is a constant memory location, the call definitely could not |
203 | 8.90M | // modify the memory location. |
204 | 8.06M | if (8.06M (Result & MRI_Mod) && |
205 | 7.92M | pointsToConstantMemory(Loc, /*OrLocal*/ false)) |
206 | 25.4k | Result = ModRefInfo(Result & ~MRI_Mod); |
207 | 8.06M | |
208 | 8.06M | return Result; |
209 | 9.58M | } |
210 | | |
211 | | ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, |
212 | 2.52M | ImmutableCallSite CS2) { |
213 | 2.52M | ModRefInfo Result = MRI_ModRef; |
214 | 2.52M | |
215 | 10.1M | for (const auto &AA : AAs) { |
216 | 10.1M | Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2)); |
217 | 10.1M | |
218 | 10.1M | // Early-exit the moment we reach the bottom of the lattice. |
219 | 10.1M | if (Result == MRI_NoModRef) |
220 | 906 | return Result; |
221 | 2.52M | } |
222 | 2.52M | |
223 | 2.52M | // Try to refine the mod-ref info further using other API entry points to the |
224 | 2.52M | // aggregate set of AA results. |
225 | 2.52M | |
226 | 2.52M | // If CS1 or CS2 are readnone, they don't interact. |
227 | 2.52M | auto CS1B = getModRefBehavior(CS1); |
228 | 2.52M | if (CS1B == FMRB_DoesNotAccessMemory) |
229 | 8 | return MRI_NoModRef; |
230 | 2.52M | |
231 | 2.52M | auto CS2B = getModRefBehavior(CS2); |
232 | 2.52M | if (CS2B == FMRB_DoesNotAccessMemory) |
233 | 96 | return MRI_NoModRef; |
234 | 2.52M | |
235 | 2.52M | // If they both only read from memory, there is no dependence. |
236 | 2.52M | if (2.52M onlyReadsMemory(CS1B) && 2.52M onlyReadsMemory(CS2B)114k ) |
237 | 96.1k | return MRI_NoModRef; |
238 | 2.42M | |
239 | 2.42M | // If CS1 only reads memory, the only dependence on CS2 can be |
240 | 2.42M | // from CS1 reading memory written by CS2. |
241 | 2.42M | if (2.42M onlyReadsMemory(CS1B)2.42M ) |
242 | 18.4k | Result = ModRefInfo(Result & MRI_Ref); |
243 | 2.41M | else if (2.41M doesNotReadMemory(CS1B)2.41M ) |
244 | 20 | Result = ModRefInfo(Result & MRI_Mod); |
245 | 2.42M | |
246 | 2.42M | // If CS2 only access memory through arguments, accumulate the mod/ref |
247 | 2.42M | // information from CS1's references to the memory referenced by |
248 | 2.42M | // CS2's arguments. |
249 | 2.42M | if (onlyAccessesArgPointees(CS2B)2.42M ) { |
250 | 304k | ModRefInfo R = MRI_NoModRef; |
251 | 304k | if (doesAccessArgPointees(CS2B)304k ) { |
252 | 846k | for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E846k ; ++I542k ) { |
253 | 643k | const Value *Arg = *I; |
254 | 643k | if (!Arg->getType()->isPointerTy()) |
255 | 333k | continue; |
256 | 309k | unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I); |
257 | 309k | auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI); |
258 | 309k | |
259 | 309k | // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence |
260 | 309k | // of CS1 on that location is the inverse. |
261 | 309k | ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx); |
262 | 309k | if (ArgMask == MRI_Mod) |
263 | 14.0k | ArgMask = MRI_ModRef; |
264 | 295k | else if (295k ArgMask == MRI_Ref295k ) |
265 | 4.94k | ArgMask = MRI_Mod; |
266 | 309k | |
267 | 309k | ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc)); |
268 | 309k | |
269 | 309k | R = ModRefInfo((R | ArgMask) & Result); |
270 | 309k | if (R == Result) |
271 | 101k | break; |
272 | 643k | } |
273 | 304k | } |
274 | 304k | return R; |
275 | 304k | } |
276 | 2.12M | |
277 | 2.12M | // If CS1 only accesses memory through arguments, check if CS2 references |
278 | 2.12M | // any of the memory referenced by CS1's arguments. If not, return NoModRef. |
279 | 2.12M | if (2.12M onlyAccessesArgPointees(CS1B)2.12M ) { |
280 | 217k | ModRefInfo R = MRI_NoModRef; |
281 | 217k | if (doesAccessArgPointees(CS1B)217k ) { |
282 | 503k | for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E503k ; ++I285k ) { |
283 | 438k | const Value *Arg = *I; |
284 | 438k | if (!Arg->getType()->isPointerTy()) |
285 | 208k | continue; |
286 | 229k | unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I); |
287 | 229k | auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI); |
288 | 229k | |
289 | 229k | // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod |
290 | 229k | // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1 |
291 | 229k | // might Ref, then we care only about a Mod by CS2. |
292 | 229k | ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx); |
293 | 229k | ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc); |
294 | 229k | if (((ArgMask & MRI_Mod) != MRI_NoModRef && |
295 | 217k | (ArgR & MRI_ModRef) != MRI_NoModRef) || |
296 | 75.7k | ((ArgMask & MRI_Ref) != MRI_NoModRef && |
297 | 75.1k | (ArgR & MRI_Mod) != MRI_NoModRef)) |
298 | 165k | R = ModRefInfo((R | ArgMask) & Result); |
299 | 229k | |
300 | 229k | if (R == Result) |
301 | 152k | break; |
302 | 438k | } |
303 | 217k | } |
304 | 217k | return R; |
305 | 217k | } |
306 | 1.90M | |
307 | 1.90M | return Result; |
308 | 1.90M | } |
309 | | |
310 | 22.7M | FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) { |
311 | 22.7M | FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior; |
312 | 22.7M | |
313 | 90.0M | for (const auto &AA : AAs) { |
314 | 90.0M | Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS)); |
315 | 90.0M | |
316 | 90.0M | // Early-exit the moment we reach the bottom of the lattice. |
317 | 90.0M | if (Result == FMRB_DoesNotAccessMemory) |
318 | 152k | return Result; |
319 | 22.5M | } |
320 | 22.5M | |
321 | 22.5M | return Result; |
322 | 22.5M | } |
323 | | |
324 | 22.1M | FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) { |
325 | 22.1M | FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior; |
326 | 22.1M | |
327 | 88.1M | for (const auto &AA : AAs) { |
328 | 88.1M | Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F)); |
329 | 88.1M | |
330 | 88.1M | // Early-exit the moment we reach the bottom of the lattice. |
331 | 88.1M | if (Result == FMRB_DoesNotAccessMemory) |
332 | 26.0k | return Result; |
333 | 22.1M | } |
334 | 22.1M | |
335 | 22.1M | return Result; |
336 | 22.1M | } |
337 | | |
338 | | //===----------------------------------------------------------------------===// |
339 | | // Helper method implementation |
340 | | //===----------------------------------------------------------------------===// |
341 | | |
342 | | ModRefInfo AAResults::getModRefInfo(const LoadInst *L, |
343 | 2.24M | const MemoryLocation &Loc) { |
344 | 2.24M | // Be conservative in the face of atomic. |
345 | 2.24M | if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered)) |
346 | 743 | return MRI_ModRef; |
347 | 2.24M | |
348 | 2.24M | // If the load address doesn't alias the given address, it doesn't read |
349 | 2.24M | // or write the specified memory. |
350 | 2.24M | if (2.24M Loc.Ptr && 2.24M !alias(MemoryLocation::get(L), Loc)39.5k ) |
351 | 17.2k | return MRI_NoModRef; |
352 | 2.22M | |
353 | 2.22M | // Otherwise, a load just reads. |
354 | 2.22M | return MRI_Ref; |
355 | 2.22M | } |
356 | | |
357 | | ModRefInfo AAResults::getModRefInfo(const StoreInst *S, |
358 | 20.6M | const MemoryLocation &Loc) { |
359 | 20.6M | // Be conservative in the face of atomic. |
360 | 20.6M | if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered)) |
361 | 11.6k | return MRI_ModRef; |
362 | 20.6M | |
363 | 20.6M | if (20.6M Loc.Ptr20.6M ) { |
364 | 19.0M | // If the store address cannot alias the pointer in question, then the |
365 | 19.0M | // specified memory cannot be modified by the store. |
366 | 19.0M | if (!alias(MemoryLocation::get(S), Loc)) |
367 | 16.3M | return MRI_NoModRef; |
368 | 2.75M | |
369 | 2.75M | // If the pointer is a pointer to constant memory, then it could not have |
370 | 2.75M | // been modified by this store. |
371 | 2.75M | if (2.75M pointsToConstantMemory(Loc)2.75M ) |
372 | 23.0k | return MRI_NoModRef; |
373 | 4.26M | } |
374 | 4.26M | |
375 | 4.26M | // Otherwise, a store just writes. |
376 | 4.26M | return MRI_Mod; |
377 | 4.26M | } |
378 | | |
379 | 46.7k | ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) { |
380 | 46.7k | // If we know that the location is a constant memory location, the fence |
381 | 46.7k | // cannot modify this location. |
382 | 46.7k | if (Loc.Ptr && 46.7k pointsToConstantMemory(Loc)42.0k ) |
383 | 1 | return MRI_Ref; |
384 | 46.7k | return MRI_ModRef; |
385 | 46.7k | } |
386 | | |
387 | | ModRefInfo AAResults::getModRefInfo(const VAArgInst *V, |
388 | 793 | const MemoryLocation &Loc) { |
389 | 793 | if (Loc.Ptr793 ) { |
390 | 625 | // If the va_arg address cannot alias the pointer in question, then the |
391 | 625 | // specified memory cannot be accessed by the va_arg. |
392 | 625 | if (!alias(MemoryLocation::get(V), Loc)) |
393 | 375 | return MRI_NoModRef; |
394 | 250 | |
395 | 250 | // If the pointer is a pointer to constant memory, then it could not have |
396 | 250 | // been modified by this va_arg. |
397 | 250 | if (250 pointsToConstantMemory(Loc)250 ) |
398 | 0 | return MRI_NoModRef; |
399 | 418 | } |
400 | 418 | |
401 | 418 | // Otherwise, a va_arg reads and writes. |
402 | 418 | return MRI_ModRef; |
403 | 418 | } |
404 | | |
405 | | ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad, |
406 | 2 | const MemoryLocation &Loc) { |
407 | 2 | if (Loc.Ptr2 ) { |
408 | 0 | // If the pointer is a pointer to constant memory, |
409 | 0 | // then it could not have been modified by this catchpad. |
410 | 0 | if (pointsToConstantMemory(Loc)) |
411 | 0 | return MRI_NoModRef; |
412 | 2 | } |
413 | 2 | |
414 | 2 | // Otherwise, a catchpad reads and writes. |
415 | 2 | return MRI_ModRef; |
416 | 2 | } |
417 | | |
418 | | ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, |
419 | 6 | const MemoryLocation &Loc) { |
420 | 6 | if (Loc.Ptr6 ) { |
421 | 4 | // If the pointer is a pointer to constant memory, |
422 | 4 | // then it could not have been modified by this catchpad. |
423 | 4 | if (pointsToConstantMemory(Loc)) |
424 | 0 | return MRI_NoModRef; |
425 | 6 | } |
426 | 6 | |
427 | 6 | // Otherwise, a catchret reads and writes. |
428 | 6 | return MRI_ModRef; |
429 | 6 | } |
430 | | |
431 | | ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, |
432 | 52.1k | const MemoryLocation &Loc) { |
433 | 52.1k | // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. |
434 | 52.1k | if (isStrongerThanMonotonic(CX->getSuccessOrdering())) |
435 | 52.0k | return MRI_ModRef; |
436 | 128 | |
437 | 128 | // If the cmpxchg address does not alias the location, it does not access it. |
438 | 128 | if (128 Loc.Ptr && 128 !alias(MemoryLocation::get(CX), Loc)0 ) |
439 | 0 | return MRI_NoModRef; |
440 | 128 | |
441 | 128 | return MRI_ModRef; |
442 | 128 | } |
443 | | |
444 | | ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, |
445 | 117k | const MemoryLocation &Loc) { |
446 | 117k | // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. |
447 | 117k | if (isStrongerThanMonotonic(RMW->getOrdering())) |
448 | 116k | return MRI_ModRef; |
449 | 522 | |
450 | 522 | // If the atomicrmw address does not alias the location, it does not access it. |
451 | 522 | if (522 Loc.Ptr && 522 !alias(MemoryLocation::get(RMW), Loc)2 ) |
452 | 2 | return MRI_NoModRef; |
453 | 520 | |
454 | 520 | return MRI_ModRef; |
455 | 520 | } |
456 | | |
457 | | /// \brief Return information about whether a particular call site modifies |
458 | | /// or reads the specified memory location \p MemLoc before instruction \p I |
459 | | /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up |
460 | | /// instruction-ordering queries inside the BasicBlock containing \p I. |
461 | | /// FIXME: this is really just shoring-up a deficiency in alias analysis. |
462 | | /// BasicAA isn't willing to spend linear time determining whether an alloca |
463 | | /// was captured before or after this particular call, while we are. However, |
464 | | /// with a smarter AA in place, this test is just wasting compile time. |
465 | | ModRefInfo AAResults::callCapturesBefore(const Instruction *I, |
466 | | const MemoryLocation &MemLoc, |
467 | | DominatorTree *DT, |
468 | 1.92M | OrderedBasicBlock *OBB) { |
469 | 1.92M | if (!DT) |
470 | 0 | return MRI_ModRef; |
471 | 1.92M | |
472 | 1.92M | const Value *Object = |
473 | 1.92M | GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout()); |
474 | 1.92M | if (!isIdentifiedObject(Object) || 1.92M isa<GlobalValue>(Object)560k || |
475 | 137k | isa<Constant>(Object)) |
476 | 1.78M | return MRI_ModRef; |
477 | 137k | |
478 | 137k | ImmutableCallSite CS(I); |
479 | 137k | if (!CS.getInstruction() || 137k CS.getInstruction() == Object137k ) |
480 | 73 | return MRI_ModRef; |
481 | 137k | |
482 | 137k | if (137k PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, |
483 | 137k | /* StoreCaptures */ true, I, DT, |
484 | 137k | /* include Object */ true, |
485 | 137k | /* OrderedBasicBlock */ OBB)) |
486 | 121k | return MRI_ModRef; |
487 | 16.0k | |
488 | 16.0k | unsigned ArgNo = 0; |
489 | 16.0k | ModRefInfo R = MRI_NoModRef; |
490 | 16.0k | for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); |
491 | 32.1k | CI != CE32.1k ; ++CI, ++ArgNo16.0k ) { |
492 | 30.3k | // Only look at the no-capture or byval pointer arguments. If this |
493 | 30.3k | // pointer were passed to arguments that were neither of these, then it |
494 | 30.3k | // couldn't be no-capture. |
495 | 30.3k | if (!(*CI)->getType()->isPointerTy() || |
496 | 21.6k | (!CS.doesNotCapture(ArgNo) && |
497 | 21.6k | ArgNo < CS.getNumArgOperands()4.01k && !CS.isByValArgument(ArgNo)4.01k )) |
498 | 12.7k | continue; |
499 | 17.6k | |
500 | 17.6k | // If this is a no-capture pointer argument, see if we can tell that it |
501 | 17.6k | // is impossible to alias the pointer we're checking. If not, we have to |
502 | 17.6k | // assume that the call could touch the pointer, even though it doesn't |
503 | 17.6k | // escape. |
504 | 17.6k | if (17.6k isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))17.6k ) |
505 | 3.17k | continue; |
506 | 14.4k | if (14.4k CS.doesNotAccessMemory(ArgNo)14.4k ) |
507 | 0 | continue; |
508 | 14.4k | if (14.4k CS.onlyReadsMemory(ArgNo)14.4k ) { |
509 | 77 | R = MRI_Ref; |
510 | 77 | continue; |
511 | 77 | } |
512 | 14.3k | return MRI_ModRef; |
513 | 14.3k | } |
514 | 1.71k | return R; |
515 | 1.92M | } |
516 | | |
517 | | /// canBasicBlockModify - Return true if it is possible for execution of the |
518 | | /// specified basic block to modify the location Loc. |
519 | | /// |
520 | | bool AAResults::canBasicBlockModify(const BasicBlock &BB, |
521 | 949 | const MemoryLocation &Loc) { |
522 | 949 | return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod); |
523 | 949 | } |
524 | | |
525 | | /// canInstructionRangeModRef - Return true if it is possible for the |
526 | | /// execution of the specified instructions to mod\ref (according to the |
527 | | /// mode) the location Loc. The instructions to consider are all |
528 | | /// of the instructions in the range of [I1,I2] INCLUSIVE. |
529 | | /// I1 and I2 must be in the same basic block. |
530 | | bool AAResults::canInstructionRangeModRef(const Instruction &I1, |
531 | | const Instruction &I2, |
532 | | const MemoryLocation &Loc, |
533 | 8.49k | const ModRefInfo Mode) { |
534 | 8.49k | assert(I1.getParent() == I2.getParent() && |
535 | 8.49k | "Instructions not in same basic block!"); |
536 | 8.49k | BasicBlock::const_iterator I = I1.getIterator(); |
537 | 8.49k | BasicBlock::const_iterator E = I2.getIterator(); |
538 | 8.49k | ++E; // Convert from inclusive to exclusive range. |
539 | 8.49k | |
540 | 35.4k | for (; I != E35.4k ; ++I27.0k ) // Check every instruction in range |
541 | 28.9k | if (28.9k getModRefInfo(&*I, Loc) & Mode28.9k ) |
542 | 1.90k | return true; |
543 | 6.59k | return false; |
544 | 8.49k | } |
545 | | |
546 | | // Provide a definition for the root virtual destructor. |
547 | 44.3M | AAResults::Concept::~Concept() = default; |
548 | | |
549 | | // Provide a definition for the static object used to identify passes. |
550 | | AnalysisKey AAManager::Key; |
551 | | |
552 | | namespace { |
553 | | |
554 | | /// A wrapper pass for external alias analyses. This just squirrels away the |
555 | | /// callback used to run any analyses and register their results. |
556 | | struct ExternalAAWrapperPass : ImmutablePass { |
557 | | using CallbackT = std::function<void(Pass &, Function &, AAResults &)>; |
558 | | |
559 | | CallbackT CB; |
560 | | |
561 | | static char ID; |
562 | | |
563 | 0 | ExternalAAWrapperPass() : ImmutablePass(ID) { |
564 | 0 | initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); |
565 | 0 | } |
566 | | |
567 | | explicit ExternalAAWrapperPass(CallbackT CB) |
568 | 1.78k | : ImmutablePass(ID), CB(std::move(CB)) { |
569 | 1.78k | initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); |
570 | 1.78k | } |
571 | | |
572 | 1.77k | void getAnalysisUsage(AnalysisUsage &AU) const override { |
573 | 1.77k | AU.setPreservesAll(); |
574 | 1.77k | } |
575 | | }; |
576 | | |
577 | | } // end anonymous namespace |
578 | | |
579 | | char ExternalAAWrapperPass::ID = 0; |
580 | | |
581 | | INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis", |
582 | | false, true) |
583 | | |
584 | | ImmutablePass * |
585 | 1.78k | llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) { |
586 | 1.78k | return new ExternalAAWrapperPass(std::move(Callback)); |
587 | 1.78k | } |
588 | | |
589 | 389k | AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) { |
590 | 389k | initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry()); |
591 | 389k | } |
592 | | |
593 | | char AAResultsWrapperPass::ID = 0; |
594 | | |
595 | 90.2k | INITIALIZE_PASS_BEGIN90.2k (AAResultsWrapperPass, "aa",
|
596 | 90.2k | "Function Alias Analysis Results", false, true) |
597 | 90.2k | INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) |
598 | 90.2k | INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass) |
599 | 90.2k | INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass) |
600 | 90.2k | INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass) |
601 | 90.2k | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) |
602 | 90.2k | INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass) |
603 | 90.2k | INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) |
604 | 90.2k | INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass) |
605 | 90.2k | INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass) |
606 | 90.2k | INITIALIZE_PASS_END(AAResultsWrapperPass, "aa", |
607 | | "Function Alias Analysis Results", false, true) |
608 | | |
609 | 0 | FunctionPass *llvm::createAAResultsWrapperPass() { |
610 | 0 | return new AAResultsWrapperPass(); |
611 | 0 | } |
612 | | |
613 | | /// Run the wrapper pass to rebuild an aggregation over known AA passes. |
614 | | /// |
615 | | /// This is the legacy pass manager's interface to the new-style AA results |
616 | | /// aggregation object. Because this is somewhat shoe-horned into the legacy |
617 | | /// pass manager, we hard code all the specific alias analyses available into |
618 | | /// it. While the particular set enabled is configured via commandline flags, |
619 | | /// adding a new alias analysis to LLVM will require adding support for it to |
620 | | /// this list. |
621 | 10.6M | bool AAResultsWrapperPass::runOnFunction(Function &F) { |
622 | 10.6M | // NB! This *must* be reset before adding new AA results to the new |
623 | 10.6M | // AAResults object because in the legacy pass manager, each instance |
624 | 10.6M | // of these will refer to the *same* immutable analyses, registering and |
625 | 10.6M | // unregistering themselves with them. We need to carefully tear down the |
626 | 10.6M | // previous object first, in this case replacing it with an empty one, before |
627 | 10.6M | // registering new results. |
628 | 10.6M | AAR.reset( |
629 | 10.6M | new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI())); |
630 | 10.6M | |
631 | 10.6M | // BasicAA is always available for function analyses. Also, we add it first |
632 | 10.6M | // so that it can trump TBAA results when it proves MustAlias. |
633 | 10.6M | // FIXME: TBAA should have an explicit mode to support this and then we |
634 | 10.6M | // should reconsider the ordering here. |
635 | 10.6M | if (!DisableBasicAA) |
636 | 10.6M | AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult()); |
637 | 10.6M | |
638 | 10.6M | // Populate the results with the currently available AAs. |
639 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) |
640 | 10.5M | AAR->addAAResult(WrapperPass->getResult()); |
641 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) |
642 | 10.5M | AAR->addAAResult(WrapperPass->getResult()); |
643 | 10.6M | if (auto *WrapperPass = |
644 | 10.6M | getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>()) |
645 | 626 | AAR->addAAResult(WrapperPass->getResult()); |
646 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>()) |
647 | 8.49M | AAR->addAAResult(WrapperPass->getResult()); |
648 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>()) |
649 | 7 | AAR->addAAResult(WrapperPass->getResult()); |
650 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) |
651 | 42 | AAR->addAAResult(WrapperPass->getResult()); |
652 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) |
653 | 63 | AAR->addAAResult(WrapperPass->getResult()); |
654 | 10.6M | |
655 | 10.6M | // If available, run an external AA providing callback over the results as |
656 | 10.6M | // well. |
657 | 10.6M | if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>()) |
658 | 91.1k | if (91.1k WrapperPass->CB91.1k ) |
659 | 91.1k | WrapperPass->CB(*this, F, *AAR); |
660 | 10.6M | |
661 | 10.6M | // Analyses don't mutate the IR, so return false. |
662 | 10.6M | return false; |
663 | 10.6M | } |
664 | | |
665 | 389k | void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
666 | 389k | AU.setPreservesAll(); |
667 | 389k | AU.addRequired<BasicAAWrapperPass>(); |
668 | 389k | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
669 | 389k | |
670 | 389k | // We also need to mark all the alias analysis passes we will potentially |
671 | 389k | // probe in runOnFunction as used here to ensure the legacy pass manager |
672 | 389k | // preserves them. This hard coding of lists of alias analyses is specific to |
673 | 389k | // the legacy pass manager. |
674 | 389k | AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); |
675 | 389k | AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); |
676 | 389k | AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>(); |
677 | 389k | AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); |
678 | 389k | AU.addUsedIfAvailable<SCEVAAWrapperPass>(); |
679 | 389k | AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); |
680 | 389k | AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); |
681 | 389k | } |
682 | | |
683 | | AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F, |
684 | 996k | BasicAAResult &BAR) { |
685 | 996k | AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()); |
686 | 996k | |
687 | 996k | // Add in our explicitly constructed BasicAA results. |
688 | 996k | if (!DisableBasicAA) |
689 | 996k | AAR.addAAResult(BAR); |
690 | 996k | |
691 | 996k | // Populate the results with the other currently available AAs. |
692 | 996k | if (auto *WrapperPass = |
693 | 996k | P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) |
694 | 973k | AAR.addAAResult(WrapperPass->getResult()); |
695 | 996k | if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) |
696 | 973k | AAR.addAAResult(WrapperPass->getResult()); |
697 | 996k | if (auto *WrapperPass = |
698 | 996k | P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>()) |
699 | 52 | AAR.addAAResult(WrapperPass->getResult()); |
700 | 996k | if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>()) |
701 | 970k | AAR.addAAResult(WrapperPass->getResult()); |
702 | 996k | if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) |
703 | 0 | AAR.addAAResult(WrapperPass->getResult()); |
704 | 996k | if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) |
705 | 0 | AAR.addAAResult(WrapperPass->getResult()); |
706 | 996k | |
707 | 996k | return AAR; |
708 | 996k | } |
709 | | |
710 | 277M | bool llvm::isNoAliasCall(const Value *V) { |
711 | 277M | if (auto CS = ImmutableCallSite(V)) |
712 | 18.7M | return CS.hasRetAttr(Attribute::NoAlias); |
713 | 259M | return false; |
714 | 259M | } |
715 | | |
716 | 18.4M | bool llvm::isNoAliasArgument(const Value *V) { |
717 | 18.4M | if (const Argument *A = dyn_cast<Argument>(V)) |
718 | 6.64M | return A->hasNoAliasAttr(); |
719 | 11.8M | return false; |
720 | 11.8M | } |
721 | | |
722 | 223M | bool llvm::isIdentifiedObject(const Value *V) { |
723 | 223M | if (isa<AllocaInst>(V)) |
724 | 26.2M | return true; |
725 | 197M | if (197M isa<GlobalValue>(V) && 197M !isa<GlobalAlias>(V)36.2M ) |
726 | 36.2M | return true; |
727 | 161M | if (161M isNoAliasCall(V)161M ) |
728 | 3.16M | return true; |
729 | 158M | if (const Argument *158M A158M = dyn_cast<Argument>(V)) |
730 | 40.1M | return A->hasNoAliasAttr() || 40.1M A->hasByValAttr()35.5M ; |
731 | 118M | return false; |
732 | 118M | } |
733 | | |
734 | 20.9M | bool llvm::isIdentifiedFunctionLocal(const Value *V) { |
735 | 20.9M | return isa<AllocaInst>(V) || isNoAliasCall(V)18.9M || isNoAliasArgument(V)18.4M ; |
736 | 20.9M | } |
737 | | |
738 | 57.6k | void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) { |
739 | 57.6k | // This function needs to be in sync with llvm::createLegacyPMAAResults -- if |
740 | 57.6k | // more alias analyses are added to llvm::createLegacyPMAAResults, they need |
741 | 57.6k | // to be added here also. |
742 | 57.6k | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
743 | 57.6k | AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); |
744 | 57.6k | AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); |
745 | 57.6k | AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>(); |
746 | 57.6k | AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); |
747 | 57.6k | AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); |
748 | 57.6k | AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); |
749 | 57.6k | } |