/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/include/llvm/Analysis/MemorySSA.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | /// \file |
11 | | /// \brief This file exposes an interface to building/using memory SSA to |
12 | | /// walk memory instructions using a use/def graph. |
13 | | /// |
14 | | /// Memory SSA class builds an SSA form that links together memory access |
15 | | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
16 | | /// does a trivial form of "heap versioning" Every time the memory state changes |
17 | | /// in the program, we generate a new heap version. It generates |
18 | | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
19 | | /// |
20 | | /// As a trivial example, |
21 | | /// define i32 @main() #0 { |
22 | | /// entry: |
23 | | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
24 | | /// %0 = bitcast i8* %call to i32* |
25 | | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
26 | | /// %1 = bitcast i8* %call1 to i32* |
27 | | /// store i32 5, i32* %0, align 4 |
28 | | /// store i32 7, i32* %1, align 4 |
29 | | /// %2 = load i32* %0, align 4 |
30 | | /// %3 = load i32* %1, align 4 |
31 | | /// %add = add nsw i32 %2, %3 |
32 | | /// ret i32 %add |
33 | | /// } |
34 | | /// |
35 | | /// Will become |
36 | | /// define i32 @main() #0 { |
37 | | /// entry: |
38 | | /// ; 1 = MemoryDef(0) |
39 | | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
40 | | /// %2 = bitcast i8* %call to i32* |
41 | | /// ; 2 = MemoryDef(1) |
42 | | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
43 | | /// %4 = bitcast i8* %call1 to i32* |
44 | | /// ; 3 = MemoryDef(2) |
45 | | /// store i32 5, i32* %2, align 4 |
46 | | /// ; 4 = MemoryDef(3) |
47 | | /// store i32 7, i32* %4, align 4 |
48 | | /// ; MemoryUse(3) |
49 | | /// %7 = load i32* %2, align 4 |
50 | | /// ; MemoryUse(4) |
51 | | /// %8 = load i32* %4, align 4 |
52 | | /// %add = add nsw i32 %7, %8 |
53 | | /// ret i32 %add |
54 | | /// } |
55 | | /// |
56 | | /// Given this form, all the stores that could ever effect the load at %8 can be |
57 | | /// gotten by using the MemoryUse associated with it, and walking from use to |
58 | | /// def until you hit the top of the function. |
59 | | /// |
60 | | /// Each def also has a list of users associated with it, so you can walk from |
61 | | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
62 | | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
63 | | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
64 | | /// store, all the MemoryUses on its use lists are may-aliases of that store |
65 | | /// (but the MemoryDefs on its use list may not be). |
66 | | /// |
67 | | /// MemoryDefs are not disambiguated because it would require multiple reaching |
68 | | /// definitions, which would require multiple phis, and multiple memoryaccesses |
69 | | /// per instruction. |
70 | | // |
71 | | //===----------------------------------------------------------------------===// |
72 | | |
73 | | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
74 | | #define LLVM_ANALYSIS_MEMORYSSA_H |
75 | | |
76 | | #include "llvm/ADT/DenseMap.h" |
77 | | #include "llvm/ADT/GraphTraits.h" |
78 | | #include "llvm/ADT/SmallPtrSet.h" |
79 | | #include "llvm/ADT/SmallVector.h" |
80 | | #include "llvm/ADT/ilist.h" |
81 | | #include "llvm/ADT/ilist_node.h" |
82 | | #include "llvm/ADT/iterator.h" |
83 | | #include "llvm/ADT/iterator_range.h" |
84 | | #include "llvm/ADT/simple_ilist.h" |
85 | | #include "llvm/Analysis/AliasAnalysis.h" |
86 | | #include "llvm/Analysis/MemoryLocation.h" |
87 | | #include "llvm/Analysis/PHITransAddr.h" |
88 | | #include "llvm/IR/BasicBlock.h" |
89 | | #include "llvm/IR/DerivedUser.h" |
90 | | #include "llvm/IR/Dominators.h" |
91 | | #include "llvm/IR/Module.h" |
92 | | #include "llvm/IR/Type.h" |
93 | | #include "llvm/IR/Use.h" |
94 | | #include "llvm/IR/User.h" |
95 | | #include "llvm/IR/Value.h" |
96 | | #include "llvm/Pass.h" |
97 | | #include "llvm/Support/Casting.h" |
98 | | #include <algorithm> |
99 | | #include <cassert> |
100 | | #include <cstddef> |
101 | | #include <iterator> |
102 | | #include <memory> |
103 | | #include <utility> |
104 | | |
105 | | namespace llvm { |
106 | | |
107 | | class Function; |
108 | | class Instruction; |
109 | | class MemoryAccess; |
110 | | class MemorySSAWalker; |
111 | | class LLVMContext; |
112 | | class raw_ostream; |
113 | | |
114 | | namespace MSSAHelpers { |
115 | | |
116 | | struct AllAccessTag {}; |
117 | | struct DefsOnlyTag {}; |
118 | | |
119 | | } // end namespace MSSAHelpers |
120 | | |
121 | | enum { |
122 | | // Used to signify what the default invalid ID is for MemoryAccess's |
123 | | // getID() |
124 | | INVALID_MEMORYACCESS_ID = 0 |
125 | | }; |
126 | | |
127 | | template <class T> class memoryaccess_def_iterator_base; |
128 | | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
129 | | using const_memoryaccess_def_iterator = |
130 | | memoryaccess_def_iterator_base<const MemoryAccess>; |
131 | | |
132 | | // \brief The base for all memory accesses. All memory accesses in a block are |
133 | | // linked together using an intrusive list. |
134 | | class MemoryAccess |
135 | | : public DerivedUser, |
136 | | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
137 | | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
138 | | public: |
139 | | using AllAccessType = |
140 | | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
141 | | using DefsOnlyType = |
142 | | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
143 | | |
144 | | MemoryAccess(const MemoryAccess &) = delete; |
145 | | MemoryAccess &operator=(const MemoryAccess &) = delete; |
146 | | |
147 | | void *operator new(size_t) = delete; |
148 | | |
149 | | // Methods for support type inquiry through isa, cast, and |
150 | | // dyn_cast |
151 | 0 | static bool classof(const Value *V) { |
152 | 0 | unsigned ID = V->getValueID(); |
153 | 0 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
154 | 0 | } |
155 | | |
156 | 24.5M | BasicBlock *getBlock() const { return Block; } |
157 | | |
158 | | void print(raw_ostream &OS) const; |
159 | | void dump() const; |
160 | | |
161 | | /// \brief The user iterators for a memory access |
162 | | using iterator = user_iterator; |
163 | | using const_iterator = const_user_iterator; |
164 | | |
165 | | /// \brief This iterator walks over all of the defs in a given |
166 | | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
167 | | /// MemoryUse/MemoryDef, this walks the defining access. |
168 | | memoryaccess_def_iterator defs_begin(); |
169 | | const_memoryaccess_def_iterator defs_begin() const; |
170 | | memoryaccess_def_iterator defs_end(); |
171 | | const_memoryaccess_def_iterator defs_end() const; |
172 | | |
173 | | /// \brief Get the iterators for the all access list and the defs only list |
174 | | /// We default to the all access list. |
175 | 6 | AllAccessType::self_iterator getIterator() { |
176 | 6 | return this->AllAccessType::getIterator(); |
177 | 6 | } |
178 | 0 | AllAccessType::const_self_iterator getIterator() const { |
179 | 0 | return this->AllAccessType::getIterator(); |
180 | 0 | } |
181 | 19 | AllAccessType::reverse_self_iterator getReverseIterator() { |
182 | 19 | return this->AllAccessType::getReverseIterator(); |
183 | 19 | } |
184 | 0 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
185 | 0 | return this->AllAccessType::getReverseIterator(); |
186 | 0 | } |
187 | 16 | DefsOnlyType::self_iterator getDefsIterator() { |
188 | 16 | return this->DefsOnlyType::getIterator(); |
189 | 16 | } |
190 | 0 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
191 | 0 | return this->DefsOnlyType::getIterator(); |
192 | 0 | } |
193 | 31 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
194 | 31 | return this->DefsOnlyType::getReverseIterator(); |
195 | 31 | } |
196 | 0 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
197 | 0 | return this->DefsOnlyType::getReverseIterator(); |
198 | 0 | } |
199 | | |
200 | | protected: |
201 | | friend class MemoryDef; |
202 | | friend class MemoryPhi; |
203 | | friend class MemorySSA; |
204 | | friend class MemoryUse; |
205 | | friend class MemoryUseOrDef; |
206 | | |
207 | | /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is |
208 | | /// moved. |
209 | 56 | void setBlock(BasicBlock *BB) { Block = BB; } |
210 | | |
211 | | /// \brief Used for debugging and tracking things about MemoryAccesses. |
212 | | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
213 | | inline unsigned getID() const; |
214 | | |
215 | | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
216 | | BasicBlock *BB, unsigned NumOperands) |
217 | | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
218 | 7.85M | Block(BB) {} |
219 | | |
220 | | private: |
221 | | BasicBlock *Block; |
222 | | }; |
223 | | |
224 | 439 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
225 | 439 | MA.print(OS); |
226 | 439 | return OS; |
227 | 439 | } |
228 | | |
229 | | /// \brief Class that has the common methods + fields of memory uses/defs. It's |
230 | | /// a little awkward to have, but there are many cases where we want either a |
231 | | /// use or def, and there are many cases where uses are needed (defs aren't |
232 | | /// acceptable), and vice-versa. |
233 | | /// |
234 | | /// This class should never be instantiated directly; make a MemoryUse or |
235 | | /// MemoryDef instead. |
236 | | class MemoryUseOrDef : public MemoryAccess { |
237 | | public: |
238 | | void *operator new(size_t) = delete; |
239 | | |
240 | | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); |
241 | | |
242 | | /// \brief Get the instruction that this MemoryUse represents. |
243 | 13.8M | Instruction *getMemoryInst() const { return MemoryInst; } |
244 | | |
245 | | /// \brief Get the access that produces the memory state used by this Use. |
246 | 12.2M | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
247 | | |
248 | 12.8M | static bool classof(const Value *MA) { |
249 | 9.15M | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
250 | 12.8M | } |
251 | | |
252 | | // Sadly, these have to be public because they are needed in some of the |
253 | | // iterators. |
254 | | inline bool isOptimized() const; |
255 | | inline MemoryAccess *getOptimized() const; |
256 | | inline void setOptimized(MemoryAccess *); |
257 | | |
258 | | /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to |
259 | | /// be rewalked by the walker if necessary. |
260 | | /// This really should only be called by tests. |
261 | | inline void resetOptimized(); |
262 | | |
263 | | protected: |
264 | | friend class MemorySSA; |
265 | | friend class MemorySSAUpdater; |
266 | | |
267 | | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
268 | | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB) |
269 | 6.89M | : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) { |
270 | 6.89M | setDefiningAccess(DMA); |
271 | 6.89M | } |
272 | | |
273 | 15.6M | void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) { |
274 | 15.6M | if (!Optimized15.6M ) { |
275 | 13.3M | setOperand(0, DMA); |
276 | 13.3M | return; |
277 | 13.3M | } |
278 | 2.24M | setOptimized(DMA); |
279 | 2.24M | } |
280 | | |
281 | | private: |
282 | | Instruction *MemoryInst; |
283 | | }; |
284 | | |
285 | | template <> |
286 | | struct OperandTraits<MemoryUseOrDef> |
287 | | : public FixedNumOperandTraits<MemoryUseOrDef, 1> {}; |
288 | | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) |
289 | | |
290 | | /// \brief Represents read-only accesses to memory |
291 | | /// |
292 | | /// In particular, the set of Instructions that will be represented by |
293 | | /// MemoryUse's is exactly the set of Instructions for which |
294 | | /// AliasAnalysis::getModRefInfo returns "Ref". |
295 | | class MemoryUse final : public MemoryUseOrDef { |
296 | | public: |
297 | | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); |
298 | | |
299 | | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
300 | 2.25M | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {} |
301 | | |
302 | | // allocate space for exactly one operand |
303 | 2.25M | void *operator new(size_t s) { return User::operator new(s, 1); } |
304 | | |
305 | 8.53M | static bool classof(const Value *MA) { |
306 | 8.53M | return MA->getValueID() == MemoryUseVal; |
307 | 8.53M | } |
308 | | |
309 | | void print(raw_ostream &OS) const; |
310 | | |
311 | 3.14M | void setOptimized(MemoryAccess *DMA) { |
312 | 3.14M | OptimizedID = DMA->getID(); |
313 | 3.14M | setOperand(0, DMA); |
314 | 3.14M | } |
315 | | |
316 | 1.26M | bool isOptimized() const { |
317 | 1.26M | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
318 | 1.26M | } |
319 | | |
320 | 365k | MemoryAccess *getOptimized() const { |
321 | 365k | return getDefiningAccess(); |
322 | 365k | } |
323 | | |
324 | 73 | void resetOptimized() { |
325 | 73 | OptimizedID = INVALID_MEMORYACCESS_ID; |
326 | 73 | } |
327 | | |
328 | | protected: |
329 | | friend class MemorySSA; |
330 | | |
331 | | private: |
332 | | static void deleteMe(DerivedUser *Self); |
333 | | |
334 | | unsigned int OptimizedID = 0; |
335 | | }; |
336 | | |
337 | | template <> |
338 | | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
339 | | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) |
340 | | |
341 | | /// \brief Represents a read-write access to memory, whether it is a must-alias, |
342 | | /// or a may-alias. |
343 | | /// |
344 | | /// In particular, the set of Instructions that will be represented by |
345 | | /// MemoryDef's is exactly the set of Instructions for which |
346 | | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
347 | | /// Note that, in order to provide def-def chains, all defs also have a use |
348 | | /// associated with them. This use points to the nearest reaching |
349 | | /// MemoryDef/MemoryPhi. |
350 | | class MemoryDef final : public MemoryUseOrDef { |
351 | | public: |
352 | | friend class MemorySSA; |
353 | | |
354 | | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); |
355 | | |
356 | | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
357 | | unsigned Ver) |
358 | 4.64M | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {} |
359 | | |
360 | | // allocate space for exactly one operand |
361 | 4.64M | void *operator new(size_t s) { return User::operator new(s, 1); } |
362 | | |
363 | 26.9M | static bool classof(const Value *MA) { |
364 | 26.9M | return MA->getValueID() == MemoryDefVal; |
365 | 26.9M | } |
366 | | |
367 | 11.6k | void setOptimized(MemoryAccess *MA) { |
368 | 11.6k | Optimized = MA; |
369 | 11.6k | OptimizedID = getDefiningAccess()->getID(); |
370 | 11.6k | } |
371 | | |
372 | 11.6k | MemoryAccess *getOptimized() const { return Optimized; } |
373 | | |
374 | 11.6k | bool isOptimized() const { |
375 | 0 | return getOptimized() && getDefiningAccess() && |
376 | 0 | OptimizedID == getDefiningAccess()->getID(); |
377 | 11.6k | } |
378 | | |
379 | 23.8k | void resetOptimized() { |
380 | 23.8k | OptimizedID = INVALID_MEMORYACCESS_ID; |
381 | 23.8k | } |
382 | | |
383 | | void print(raw_ostream &OS) const; |
384 | | |
385 | 1.93M | unsigned getID() const { return ID; } |
386 | | |
387 | | private: |
388 | | static void deleteMe(DerivedUser *Self); |
389 | | |
390 | | const unsigned ID; |
391 | | MemoryAccess *Optimized = nullptr; |
392 | | unsigned int OptimizedID = INVALID_MEMORYACCESS_ID; |
393 | | }; |
394 | | |
395 | | template <> |
396 | | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {}; |
397 | | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) |
398 | | |
399 | | /// \brief Represents phi nodes for memory accesses. |
400 | | /// |
401 | | /// These have the same semantic as regular phi nodes, with the exception that |
402 | | /// only one phi will ever exist in a given basic block. |
403 | | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
404 | | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
405 | | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
406 | | /// a MemoryPhi's operands. |
407 | | /// That is, given |
408 | | /// if (a) { |
409 | | /// store %a |
410 | | /// store %b |
411 | | /// } |
412 | | /// it *must* be transformed into |
413 | | /// if (a) { |
414 | | /// 1 = MemoryDef(liveOnEntry) |
415 | | /// store %a |
416 | | /// 2 = MemoryDef(1) |
417 | | /// store %b |
418 | | /// } |
419 | | /// and *not* |
420 | | /// if (a) { |
421 | | /// 1 = MemoryDef(liveOnEntry) |
422 | | /// store %a |
423 | | /// 2 = MemoryDef(liveOnEntry) |
424 | | /// store %b |
425 | | /// } |
426 | | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
427 | | /// end of the branch, and if there are not two phi nodes, one will be |
428 | | /// disconnected completely from the SSA graph below that point. |
429 | | /// Because MemoryUse's do not generate new definitions, they do not have this |
430 | | /// issue. |
431 | | class MemoryPhi final : public MemoryAccess { |
432 | | // allocate space for exactly zero operands |
433 | 958k | void *operator new(size_t s) { return User::operator new(s); } |
434 | | |
435 | | public: |
436 | | /// Provide fast operand accessors |
437 | | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); |
438 | | |
439 | | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
440 | | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
441 | 958k | ReservedSpace(NumPreds) { |
442 | 958k | allocHungoffUses(ReservedSpace); |
443 | 958k | } |
444 | | |
445 | | // Block iterator interface. This provides access to the list of incoming |
446 | | // basic blocks, which parallels the list of incoming values. |
447 | | using block_iterator = BasicBlock **; |
448 | | using const_block_iterator = BasicBlock *const *; |
449 | | |
450 | 2.47M | block_iterator block_begin() { |
451 | 2.47M | auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace); |
452 | 2.47M | return reinterpret_cast<block_iterator>(Ref + 1); |
453 | 2.47M | } |
454 | | |
455 | 4.90M | const_block_iterator block_begin() const { |
456 | 4.90M | const auto *Ref = |
457 | 4.90M | reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace); |
458 | 4.90M | return reinterpret_cast<const_block_iterator>(Ref + 1); |
459 | 4.90M | } |
460 | | |
461 | 4 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
462 | | |
463 | 0 | const_block_iterator block_end() const { |
464 | 0 | return block_begin() + getNumOperands(); |
465 | 0 | } |
466 | | |
467 | 0 | iterator_range<block_iterator> blocks() { |
468 | 0 | return make_range(block_begin(), block_end()); |
469 | 0 | } |
470 | | |
471 | 0 | iterator_range<const_block_iterator> blocks() const { |
472 | 0 | return make_range(block_begin(), block_end()); |
473 | 0 | } |
474 | | |
475 | 483 | op_range incoming_values() { return operands(); } |
476 | | |
477 | 0 | const_op_range incoming_values() const { return operands(); } |
478 | | |
479 | | /// \brief Return the number of incoming edges |
480 | 4.92M | unsigned getNumIncomingValues() const { return getNumOperands(); } |
481 | | |
482 | | /// \brief Return incoming value number x |
483 | 4.92M | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
484 | 2.47M | void setIncomingValue(unsigned I, MemoryAccess *V) { |
485 | 2.47M | assert(V && "PHI node got a null value!"); |
486 | 2.47M | setOperand(I, V); |
487 | 2.47M | } |
488 | | |
489 | 0 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
490 | 0 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
491 | | |
492 | | /// \brief Return incoming basic block number @p i. |
493 | 4.90M | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
494 | | |
495 | | /// \brief Return incoming basic block corresponding |
496 | | /// to an operand of the PHI. |
497 | 544 | BasicBlock *getIncomingBlock(const Use &U) const { |
498 | 544 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); |
499 | 544 | return getIncomingBlock(unsigned(&U - op_begin())); |
500 | 544 | } |
501 | | |
502 | | /// \brief Return incoming basic block corresponding |
503 | | /// to value use iterator. |
504 | 0 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
505 | 0 | return getIncomingBlock(I.getUse()); |
506 | 0 | } |
507 | | |
508 | 2.47M | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
509 | 2.47M | assert(BB && "PHI node got a null basic block!"); |
510 | 2.47M | block_begin()[I] = BB; |
511 | 2.47M | } |
512 | | |
513 | | /// \brief Add an incoming value to the end of the PHI list |
514 | 2.47M | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
515 | 2.47M | if (getNumOperands() == ReservedSpace) |
516 | 1.32M | growOperands(); // Get more space! |
517 | 2.47M | // Initialize some new operands. |
518 | 2.47M | setNumHungOffUseOperands(getNumOperands() + 1); |
519 | 2.47M | setIncomingValue(getNumOperands() - 1, V); |
520 | 2.47M | setIncomingBlock(getNumOperands() - 1, BB); |
521 | 2.47M | } |
522 | | |
523 | | /// \brief Return the first index of the specified basic |
524 | | /// block in the value list for this PHI. Returns -1 if no instance. |
525 | 4 | int getBasicBlockIndex(const BasicBlock *BB) const { |
526 | 7 | for (unsigned I = 0, E = getNumOperands(); I != E7 ; ++I3 ) |
527 | 7 | if (7 block_begin()[I] == BB7 ) |
528 | 4 | return I; |
529 | 0 | return -1; |
530 | 4 | } |
531 | | |
532 | 0 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
533 | 0 | int Idx = getBasicBlockIndex(BB); |
534 | 0 | assert(Idx >= 0 && "Invalid basic block argument!"); |
535 | 0 | return getIncomingValue(Idx); |
536 | 0 | } |
537 | | |
538 | 24.3M | static bool classof(const Value *V) { |
539 | 24.3M | return V->getValueID() == MemoryPhiVal; |
540 | 24.3M | } |
541 | | |
542 | | void print(raw_ostream &OS) const; |
543 | | |
544 | 2.48M | unsigned getID() const { return ID; } |
545 | | |
546 | | protected: |
547 | | friend class MemorySSA; |
548 | | |
549 | | /// \brief this is more complicated than the generic |
550 | | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
551 | | /// values and pointers to the incoming blocks, all in one allocation. |
552 | 958k | void allocHungoffUses(unsigned N) { |
553 | 958k | User::allocHungoffUses(N, /* IsPhi */ true); |
554 | 958k | } |
555 | | |
556 | | private: |
557 | | // For debugging only |
558 | | const unsigned ID; |
559 | | unsigned ReservedSpace; |
560 | | |
561 | | /// \brief This grows the operand list in response to a push_back style of |
562 | | /// operation. This grows the number of ops by 1.5 times. |
563 | 1.32M | void growOperands() { |
564 | 1.32M | unsigned E = getNumOperands(); |
565 | 1.32M | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
566 | 1.32M | ReservedSpace = std::max(E + E / 2, 2u); |
567 | 1.32M | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
568 | 1.32M | } |
569 | | |
570 | | static void deleteMe(DerivedUser *Self); |
571 | | }; |
572 | | |
573 | 4.41M | inline unsigned MemoryAccess::getID() const { |
574 | 4.41M | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && |
575 | 4.41M | "only memory defs and phis have ids"); |
576 | 4.41M | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
577 | 1.93M | return MD->getID(); |
578 | 2.48M | return cast<MemoryPhi>(this)->getID(); |
579 | 4.41M | } |
580 | | |
581 | 1.27M | inline bool MemoryUseOrDef::isOptimized() const { |
582 | 1.27M | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
583 | 11.6k | return MD->isOptimized(); |
584 | 1.26M | return cast<MemoryUse>(this)->isOptimized(); |
585 | 1.27M | } |
586 | | |
587 | 365k | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
588 | 365k | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
589 | 0 | return MD->getOptimized(); |
590 | 365k | return cast<MemoryUse>(this)->getOptimized(); |
591 | 365k | } |
592 | | |
593 | 3.15M | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
594 | 3.15M | if (auto *MD = dyn_cast<MemoryDef>(this)) |
595 | 11.6k | MD->setOptimized(MA); |
596 | 3.15M | else |
597 | 3.14M | cast<MemoryUse>(this)->setOptimized(MA); |
598 | 3.15M | } |
599 | | |
600 | 23.9k | inline void MemoryUseOrDef::resetOptimized() { |
601 | 23.9k | if (auto *MD = dyn_cast<MemoryDef>(this)) |
602 | 23.8k | MD->resetOptimized(); |
603 | 23.9k | else |
604 | 72 | cast<MemoryUse>(this)->resetOptimized(); |
605 | 23.9k | } |
606 | | |
607 | | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
608 | | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) |
609 | | |
610 | | /// \brief Encapsulates MemorySSA, including all data associated with memory |
611 | | /// accesses. |
612 | | class MemorySSA { |
613 | | public: |
614 | | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
615 | | ~MemorySSA(); |
616 | | |
617 | | MemorySSAWalker *getWalker(); |
618 | | |
619 | | /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA |
620 | | /// access associated with it. If passed a basic block gets the memory phi |
621 | | /// node that exists for that block, if there is one. Otherwise, this will get |
622 | | /// a MemoryUseOrDef. |
623 | | MemoryUseOrDef *getMemoryAccess(const Instruction *) const; |
624 | | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const; |
625 | | |
626 | | void dump() const; |
627 | | void print(raw_ostream &) const; |
628 | | |
629 | | /// \brief Return true if \p MA represents the live on entry value |
630 | | /// |
631 | | /// Loads and stores from pointer arguments and other global values may be |
632 | | /// defined by memory operations that do not occur in the current function, so |
633 | | /// they may be live on entry to the function. MemorySSA represents such |
634 | | /// memory state by the live on entry definition, which is guaranteed to occur |
635 | | /// before any other memory access in the function. |
636 | 5.24M | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
637 | 5.24M | return MA == LiveOnEntryDef.get(); |
638 | 5.24M | } |
639 | | |
640 | 1.63M | inline MemoryAccess *getLiveOnEntryDef() const { |
641 | 1.63M | return LiveOnEntryDef.get(); |
642 | 1.63M | } |
643 | | |
644 | | // Sadly, iplists, by default, owns and deletes pointers added to the |
645 | | // list. It's not currently possible to have two iplists for the same type, |
646 | | // where one owns the pointers, and one does not. This is because the traits |
647 | | // are per-type, not per-tag. If this ever changes, we should make the |
648 | | // DefList an iplist. |
649 | | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
650 | | using DefsList = |
651 | | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
652 | | |
653 | | /// \brief Return the list of MemoryAccess's for a given basic block. |
654 | | /// |
655 | | /// This list is not modifiable by the user. |
656 | 62.6k | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
657 | 62.6k | return getWritableBlockAccesses(BB); |
658 | 62.6k | } |
659 | | |
660 | | /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic |
661 | | /// block. |
662 | | /// |
663 | | /// This list is not modifiable by the user. |
664 | 2.17M | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
665 | 2.17M | return getWritableBlockDefs(BB); |
666 | 2.17M | } |
667 | | |
668 | | /// \brief Given two memory accesses in the same basic block, determine |
669 | | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
670 | | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
671 | | |
672 | | /// \brief Given two memory accesses in potentially different blocks, |
673 | | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
674 | | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
675 | | |
676 | | /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
677 | | /// dominates Use \p B. |
678 | | bool dominates(const MemoryAccess *A, const Use &B) const; |
679 | | |
680 | | /// \brief Verify that MemorySSA is self consistent (IE definitions dominate |
681 | | /// all uses, uses appear in the right places). This is used by unit tests. |
682 | | void verifyMemorySSA() const; |
683 | | |
684 | | /// Used in various insertion functions to specify whether we are talking |
685 | | /// about the beginning or end of a block. |
686 | | enum InsertionPlace { Beginning, End }; |
687 | | |
688 | | protected: |
689 | | // Used by Memory SSA annotater, dumpers, and wrapper pass |
690 | | friend class MemorySSAAnnotatedWriter; |
691 | | friend class MemorySSAPrinterLegacyPass; |
692 | | friend class MemorySSAUpdater; |
693 | | |
694 | | void verifyDefUses(Function &F) const; |
695 | | void verifyDomination(Function &F) const; |
696 | | void verifyOrdering(Function &F) const; |
697 | | |
698 | | // This is used by the use optimizer and updater. |
699 | 3.79M | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
700 | 3.79M | auto It = PerBlockAccesses.find(BB); |
701 | 3.79M | return It == PerBlockAccesses.end() ? nullptr948k : It->second.get()2.84M ; |
702 | 3.79M | } |
703 | | |
704 | | // This is used by the use optimizer and updater. |
705 | 2.17M | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
706 | 2.17M | auto It = PerBlockDefs.find(BB); |
707 | 2.17M | return It == PerBlockDefs.end() ? nullptr1.24M : It->second.get()930k ; |
708 | 2.17M | } |
709 | | |
710 | | // These is used by the updater to perform various internal MemorySSA |
711 | | // machinsations. They do not always leave the IR in a correct state, and |
712 | | // relies on the updater to fixup what it breaks, so it is not public. |
713 | | |
714 | | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
715 | | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point); |
716 | | |
717 | | // Rename the dominator tree branch rooted at BB. |
718 | | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
719 | 1 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
720 | 1 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
721 | 1 | } |
722 | | |
723 | | void removeFromLookups(MemoryAccess *); |
724 | | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
725 | | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
726 | | InsertionPlace); |
727 | | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
728 | | AccessList::iterator); |
729 | | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *); |
730 | | |
731 | | private: |
732 | | class CachingWalker; |
733 | | class OptimizeUses; |
734 | | |
735 | | CachingWalker *getWalkerImpl(); |
736 | | void buildMemorySSA(); |
737 | | void optimizeUses(); |
738 | | |
739 | | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
740 | | |
741 | | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
742 | | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
743 | | |
744 | | void |
745 | | determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); |
746 | | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
747 | | bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; |
748 | | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
749 | | MemoryUseOrDef *createNewAccess(Instruction *); |
750 | | MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); |
751 | | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &, |
752 | | const DenseMap<const BasicBlock *, unsigned int> &); |
753 | | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
754 | | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
755 | | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
756 | | SmallPtrSetImpl<BasicBlock *> &Visited, |
757 | | bool SkipVisited = false, bool RenameAllUses = false); |
758 | | AccessList *getOrCreateAccessList(const BasicBlock *); |
759 | | DefsList *getOrCreateDefsList(const BasicBlock *); |
760 | | void renumberBlock(const BasicBlock *) const; |
761 | | AliasAnalysis *AA; |
762 | | DominatorTree *DT; |
763 | | Function &F; |
764 | | |
765 | | // Memory SSA mappings |
766 | | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
767 | | |
768 | | // These two mappings contain the main block to access/def mappings for |
769 | | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
770 | | // MemoryAccesses. |
771 | | // Both maps maintain the invariant that if a block is found in them, the |
772 | | // corresponding list is not empty, and if a block is not found in them, the |
773 | | // corresponding list is empty. |
774 | | AccessMap PerBlockAccesses; |
775 | | DefsMap PerBlockDefs; |
776 | | std::unique_ptr<MemoryAccess> LiveOnEntryDef; |
777 | | |
778 | | // Domination mappings |
779 | | // Note that the numbering is local to a block, even though the map is |
780 | | // global. |
781 | | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
782 | | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
783 | | |
784 | | // Memory SSA building info |
785 | | std::unique_ptr<CachingWalker> Walker; |
786 | | unsigned NextID; |
787 | | }; |
788 | | |
789 | | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
790 | | class MemorySSAUtil { |
791 | | protected: |
792 | | friend class GVNHoist; |
793 | | friend class MemorySSAWalker; |
794 | | |
795 | | // This function should not be used by new passes. |
796 | | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
797 | | AliasAnalysis &AA); |
798 | | }; |
799 | | |
800 | | // This pass does eager building and then printing of MemorySSA. It is used by |
801 | | // the tests to be able to build, dump, and verify Memory SSA. |
802 | | class MemorySSAPrinterLegacyPass : public FunctionPass { |
803 | | public: |
804 | | MemorySSAPrinterLegacyPass(); |
805 | | |
806 | | bool runOnFunction(Function &) override; |
807 | | void getAnalysisUsage(AnalysisUsage &AU) const override; |
808 | | |
809 | | static char ID; |
810 | | }; |
811 | | |
812 | | /// An analysis that produces \c MemorySSA for a function. |
813 | | /// |
814 | | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
815 | | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
816 | | |
817 | | static AnalysisKey Key; |
818 | | |
819 | | public: |
820 | | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
821 | | // pointers after construction. Use a wrapper class instead of plain |
822 | | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
823 | | struct Result { |
824 | 158 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
825 | | |
826 | 191 | MemorySSA &getMSSA() { return *MSSA.get(); } |
827 | | |
828 | | std::unique_ptr<MemorySSA> MSSA; |
829 | | }; |
830 | | |
831 | | Result run(Function &F, FunctionAnalysisManager &AM); |
832 | | }; |
833 | | |
834 | | /// \brief Printer pass for \c MemorySSA. |
835 | | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
836 | | raw_ostream &OS; |
837 | | |
838 | | public: |
839 | 0 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
840 | | |
841 | | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
842 | | }; |
843 | | |
844 | | /// \brief Verifier pass for \c MemorySSA. |
845 | | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
846 | | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
847 | | }; |
848 | | |
849 | | /// \brief Legacy analysis pass which computes \c MemorySSA. |
850 | | class MemorySSAWrapperPass : public FunctionPass { |
851 | | public: |
852 | | MemorySSAWrapperPass(); |
853 | | |
854 | | static char ID; |
855 | | |
856 | | bool runOnFunction(Function &) override; |
857 | | void releaseMemory() override; |
858 | 516k | MemorySSA &getMSSA() { return *MSSA; } |
859 | 0 | const MemorySSA &getMSSA() const { return *MSSA; } |
860 | | |
861 | | void getAnalysisUsage(AnalysisUsage &AU) const override; |
862 | | |
863 | | void verifyAnalysis() const override; |
864 | | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
865 | | |
866 | | private: |
867 | | std::unique_ptr<MemorySSA> MSSA; |
868 | | }; |
869 | | |
870 | | /// \brief This is the generic walker interface for walkers of MemorySSA. |
871 | | /// Walkers are used to be able to further disambiguate the def-use chains |
872 | | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
873 | | /// you. |
874 | | /// In particular, while the def-use chains provide basic information, and are |
875 | | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
876 | | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
877 | | /// information. In particular, they may want to use SCEV info to further |
878 | | /// disambiguate memory accesses, or they may want the nearest dominating |
879 | | /// may-aliasing MemoryDef for a call or a store. This API enables a |
880 | | /// standardized interface to getting and using that info. |
881 | | class MemorySSAWalker { |
882 | | public: |
883 | | MemorySSAWalker(MemorySSA *); |
884 | 516k | virtual ~MemorySSAWalker() = default; |
885 | | |
886 | | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
887 | | |
888 | | /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this |
889 | | /// will give you the nearest dominating MemoryAccess that Mod's the location |
890 | | /// the instruction accesses (by skipping any def which AA can prove does not |
891 | | /// alias the location(s) accessed by the instruction given). |
892 | | /// |
893 | | /// Note that this will return a single access, and it must dominate the |
894 | | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
895 | | /// this will return the MemoryPhi, not the operand. This means that |
896 | | /// given: |
897 | | /// if (a) { |
898 | | /// 1 = MemoryDef(liveOnEntry) |
899 | | /// store %a |
900 | | /// } else { |
901 | | /// 2 = MemoryDef(liveOnEntry) |
902 | | /// store %b |
903 | | /// } |
904 | | /// 3 = MemoryPhi(2, 1) |
905 | | /// MemoryUse(3) |
906 | | /// load %a |
907 | | /// |
908 | | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
909 | | /// in the if (a) branch. |
910 | 1.27M | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
911 | 1.27M | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
912 | 1.27M | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); |
913 | 1.27M | return getClobberingMemoryAccess(MA); |
914 | 1.27M | } |
915 | | |
916 | | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
917 | | /// but takes a MemoryAccess instead of an Instruction. |
918 | | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; |
919 | | |
920 | | /// \brief Given a potentially clobbering memory access and a new location, |
921 | | /// calling this will give you the nearest dominating clobbering MemoryAccess |
922 | | /// (by skipping non-aliasing def links). |
923 | | /// |
924 | | /// This version of the function is mainly used to disambiguate phi translated |
925 | | /// pointers, where the value of a pointer may have changed from the initial |
926 | | /// memory access. Note that this expects to be handed either a MemoryUse, |
927 | | /// or an already potentially clobbering access. Unlike the above API, if |
928 | | /// given a MemoryDef that clobbers the pointer as the starting access, it |
929 | | /// will return that MemoryDef, whereas the above would return the clobber |
930 | | /// starting from the use side of the memory def. |
931 | | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
932 | | const MemoryLocation &) = 0; |
933 | | |
934 | | /// \brief Given a memory access, invalidate anything this walker knows about |
935 | | /// that access. |
936 | | /// This API is used by walkers that store information to perform basic cache |
937 | | /// invalidation. This will be called by MemorySSA at appropriate times for |
938 | | /// the walker it uses or returns. |
939 | 0 | virtual void invalidateInfo(MemoryAccess *) {} |
940 | | |
941 | 93 | virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); } |
942 | | |
943 | | protected: |
944 | | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
945 | | // constructor. |
946 | | MemorySSA *MSSA; |
947 | | }; |
948 | | |
949 | | /// \brief A MemorySSAWalker that does no alias queries, or anything else. It |
950 | | /// simply returns the links as they were constructed by the builder. |
951 | | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
952 | | public: |
953 | | // Keep the overrides below from hiding the Instruction overload of |
954 | | // getClobberingMemoryAccess. |
955 | | using MemorySSAWalker::getClobberingMemoryAccess; |
956 | | |
957 | | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
958 | | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
959 | | const MemoryLocation &) override; |
960 | | }; |
961 | | |
962 | | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
963 | | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
964 | | |
965 | | /// \brief Iterator base class used to implement const and non-const iterators |
966 | | /// over the defining accesses of a MemoryAccess. |
967 | | template <class T> |
968 | | class memoryaccess_def_iterator_base |
969 | | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
970 | | std::forward_iterator_tag, T, ptrdiff_t, T *, |
971 | | T *> { |
972 | | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
973 | | |
974 | | public: |
975 | 2.00M | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
976 | 6.93M | memoryaccess_def_iterator_base() = default; |
977 | | |
978 | 11.8M | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
979 | 4.01M | return Access == Other.Access && (!Access || 4.01M ArgNo == Other.ArgNo0 ); |
980 | 11.8M | } |
981 | | |
982 | | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
983 | | // block from the operand in constant time (In a PHINode, the uselist has |
984 | | // both, so it's just subtraction). We provide it as part of the |
985 | | // iterator to avoid callers having to linear walk to get the block. |
986 | | // If the operation becomes constant time on MemoryPHI's, this bit of |
987 | | // abstraction breaking should be removed. |
988 | 4.90M | BasicBlock *getPhiArgBlock() const { |
989 | 4.90M | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
990 | 4.90M | assert(MP && "Tried to get phi arg block when not iterating over a PHI"); |
991 | 4.90M | return MP->getIncomingBlock(ArgNo); |
992 | 4.90M | } |
993 | | |
994 | 4.92M | typename BaseT::iterator::pointer operator*() const { |
995 | 4.92M | assert(Access && "Tried to access past the end of our iterator"); |
996 | 4.92M | // Go to the first argument for phis, and the defining access for everything |
997 | 4.92M | // else. |
998 | 4.92M | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
999 | 4.92M | return MP->getIncomingValue(ArgNo); |
1000 | 0 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1001 | 4.92M | } |
1002 | | |
1003 | | using BaseT::operator++; |
1004 | 4.92M | memoryaccess_def_iterator &operator++() { |
1005 | 4.92M | assert(Access && "Hit end of iterator"); |
1006 | 4.92M | if (MemoryPhi *MP4.92M = dyn_cast<MemoryPhi>(Access)) { |
1007 | 4.92M | if (++ArgNo >= MP->getNumIncomingValues()4.92M ) { |
1008 | 2.00M | ArgNo = 0; |
1009 | 2.00M | Access = nullptr; |
1010 | 2.00M | } |
1011 | 0 | } else { |
1012 | 0 | Access = nullptr; |
1013 | 0 | } |
1014 | 4.92M | return *this; |
1015 | 4.92M | } |
1016 | | |
1017 | | private: |
1018 | | T *Access = nullptr; |
1019 | | unsigned ArgNo = 0; |
1020 | | }; |
1021 | | |
1022 | 0 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1023 | 0 | return memoryaccess_def_iterator(this); |
1024 | 0 | } |
1025 | | |
1026 | 0 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1027 | 0 | return const_memoryaccess_def_iterator(this); |
1028 | 0 | } |
1029 | | |
1030 | 4.92M | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1031 | 4.92M | return memoryaccess_def_iterator(); |
1032 | 4.92M | } |
1033 | | |
1034 | 0 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1035 | 0 | return const_memoryaccess_def_iterator(); |
1036 | 0 | } |
1037 | | |
1038 | | /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1039 | | /// and uses in the inverse case. |
1040 | | template <> struct GraphTraits<MemoryAccess *> { |
1041 | | using NodeRef = MemoryAccess *; |
1042 | | using ChildIteratorType = memoryaccess_def_iterator; |
1043 | | |
1044 | 0 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1045 | 0 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1046 | 0 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1047 | | }; |
1048 | | |
1049 | | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1050 | | using NodeRef = MemoryAccess *; |
1051 | | using ChildIteratorType = MemoryAccess::iterator; |
1052 | | |
1053 | 0 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1054 | 0 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1055 | 0 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1056 | | }; |
1057 | | |
1058 | | /// \brief Provide an iterator that walks defs, giving both the memory access, |
1059 | | /// and the current pointer location, updating the pointer location as it |
1060 | | /// changes due to phi node translation. |
1061 | | /// |
1062 | | /// This iterator, while somewhat specialized, is what most clients actually |
1063 | | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1064 | | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1065 | | /// memory location through phi nodes for the user. |
1066 | | class upward_defs_iterator |
1067 | | : public iterator_facade_base<upward_defs_iterator, |
1068 | | std::forward_iterator_tag, |
1069 | | const MemoryAccessPair> { |
1070 | | using BaseT = upward_defs_iterator::iterator_facade_base; |
1071 | | |
1072 | | public: |
1073 | | upward_defs_iterator(const MemoryAccessPair &Info) |
1074 | | : DefIterator(Info.first), Location(Info.second), |
1075 | 2.00M | OriginalAccess(Info.first) { |
1076 | 2.00M | CurrentPair.first = nullptr; |
1077 | 2.00M | |
1078 | 2.00M | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1079 | 2.00M | fillInCurrentPair(); |
1080 | 2.00M | } |
1081 | | |
1082 | 2.00M | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1083 | | |
1084 | 6.93M | bool operator==(const upward_defs_iterator &Other) const { |
1085 | 6.93M | return DefIterator == Other.DefIterator; |
1086 | 6.93M | } |
1087 | | |
1088 | 4.92M | BaseT::iterator::reference operator*() const { |
1089 | 4.92M | assert(DefIterator != OriginalAccess->defs_end() && |
1090 | 4.92M | "Tried to access past the end of our iterator"); |
1091 | 4.92M | return CurrentPair; |
1092 | 4.92M | } |
1093 | | |
1094 | | using BaseT::operator++; |
1095 | 4.92M | upward_defs_iterator &operator++() { |
1096 | 4.92M | assert(DefIterator != OriginalAccess->defs_end() && |
1097 | 4.92M | "Tried to access past the end of the iterator"); |
1098 | 4.92M | ++DefIterator; |
1099 | 4.92M | if (DefIterator != OriginalAccess->defs_end()) |
1100 | 2.91M | fillInCurrentPair(); |
1101 | 4.92M | return *this; |
1102 | 4.92M | } |
1103 | | |
1104 | 0 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1105 | | |
1106 | | private: |
1107 | 4.92M | void fillInCurrentPair() { |
1108 | 4.92M | CurrentPair.first = *DefIterator; |
1109 | 4.92M | if (WalkingPhi && 4.92M Location.Ptr4.92M ) { |
1110 | 4.90M | PHITransAddr Translator( |
1111 | 4.90M | const_cast<Value *>(Location.Ptr), |
1112 | 4.90M | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1113 | 4.90M | if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), |
1114 | 4.90M | DefIterator.getPhiArgBlock(), nullptr, |
1115 | 4.90M | false)) |
1116 | 0 | if (0 Translator.getAddr() != Location.Ptr0 ) { |
1117 | 0 | CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); |
1118 | 0 | return; |
1119 | 0 | } |
1120 | 4.90M | } |
1121 | 4.92M | CurrentPair.second = Location; |
1122 | 4.92M | } |
1123 | | |
1124 | | MemoryAccessPair CurrentPair; |
1125 | | memoryaccess_def_iterator DefIterator; |
1126 | | MemoryLocation Location; |
1127 | | MemoryAccess *OriginalAccess = nullptr; |
1128 | | bool WalkingPhi = false; |
1129 | | }; |
1130 | | |
1131 | 2.00M | inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { |
1132 | 2.00M | return upward_defs_iterator(Pair); |
1133 | 2.00M | } |
1134 | | |
1135 | 2.00M | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1136 | | |
1137 | | inline iterator_range<upward_defs_iterator> |
1138 | 0 | upward_defs(const MemoryAccessPair &Pair) { |
1139 | 0 | return make_range(upward_defs_begin(Pair), upward_defs_end()); |
1140 | 0 | } |
1141 | | |
1142 | | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1143 | | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1144 | | /// comparing against a null def_chain_iterator, this will compare equal only |
1145 | | /// after walking said Phi/liveOnEntry. |
1146 | | /// |
1147 | | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1148 | | /// access chain, or all the accesses. |
1149 | | /// |
1150 | | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1151 | | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1152 | | /// a phi node. The optimized chain walks the clobbering access of a store. |
1153 | | /// So if you are just trying to find, given a store, what the next |
1154 | | /// thing that would clobber the same memory is, you want the optimized chain. |
1155 | | template <class T, bool UseOptimizedChain = false> |
1156 | | struct def_chain_iterator |
1157 | | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1158 | | std::forward_iterator_tag, MemoryAccess *> { |
1159 | | def_chain_iterator() : MA(nullptr) {} |
1160 | 6.90M | def_chain_iterator(T MA) : MA(MA) {} |
1161 | | |
1162 | 5.44M | T operator*() const { return MA; } llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator*() const Line | Count | Source | 1162 | 5.44M | T operator*() const { return MA; } |
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator*() const |
1163 | | |
1164 | 4.00M | def_chain_iterator &operator++() { |
1165 | 4.00M | // N.B. liveOnEntry has a null defining access. |
1166 | 4.00M | if (auto *MUD4.00M = dyn_cast<MemoryUseOrDef>(MA)) { |
1167 | 1.99M | if (UseOptimizedChain && 1.99M MUD->isOptimized()0 ) |
1168 | 0 | MA = MUD->getOptimized(); |
1169 | 1.99M | else |
1170 | 1.99M | MA = MUD->getDefiningAccess(); |
1171 | 4.00M | } else { |
1172 | 2.00M | MA = nullptr; |
1173 | 2.00M | } |
1174 | 4.00M | |
1175 | 4.00M | return *this; |
1176 | 4.00M | } llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator++() Line | Count | Source | 1164 | 4.00M | def_chain_iterator &operator++() { | 1165 | 4.00M | // N.B. liveOnEntry has a null defining access. | 1166 | 4.00M | if (auto *MUD4.00M = dyn_cast<MemoryUseOrDef>(MA)) { | 1167 | 1.99M | if (UseOptimizedChain && 1.99M MUD->isOptimized()0 ) | 1168 | 0 | MA = MUD->getOptimized(); | 1169 | 1.99M | else | 1170 | 1.99M | MA = MUD->getDefiningAccess(); | 1171 | 4.00M | } else { | 1172 | 2.00M | MA = nullptr; | 1173 | 2.00M | } | 1174 | 4.00M | | 1175 | 4.00M | return *this; | 1176 | 4.00M | } |
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator++() |
1177 | | |
1178 | 7.45M | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator==(llvm::def_chain_iterator<llvm::MemoryAccess*, false> const&) const Line | Count | Source | 1178 | 7.45M | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator==(llvm::def_chain_iterator<llvm::MemoryAccess const*, true> const&) const |
1179 | | |
1180 | | private: |
1181 | | T MA; |
1182 | | }; |
1183 | | |
1184 | | template <class T> |
1185 | | inline iterator_range<def_chain_iterator<T>> |
1186 | 3.45M | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1187 | | #ifdef EXPENSIVE_CHECKS |
1188 | | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && |
1189 | | "UpTo isn't in the def chain!"); |
1190 | | #endif |
1191 | | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1192 | 3.45M | } |
1193 | | |
1194 | | template <class T> |
1195 | 0 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1196 | 0 | return make_range(def_chain_iterator<T, true>(MA), |
1197 | 0 | def_chain_iterator<T, true>(nullptr)); |
1198 | 0 | } |
1199 | | |
1200 | | } // end namespace llvm |
1201 | | |
1202 | | #endif // LLVM_ANALYSIS_MEMORYSSA_H |