Coverage Report

Created: 2018-07-19 03:59

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/include/llvm/Analysis/MemorySSA.h
Line
Count
Source (jump to first uncovered line)
1
//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
/// \file
11
/// This file exposes an interface to building/using memory SSA to
12
/// walk memory instructions using a use/def graph.
13
///
14
/// Memory SSA class builds an SSA form that links together memory access
15
/// instructions such as loads, stores, atomics, and calls. Additionally, it
16
/// does a trivial form of "heap versioning" Every time the memory state changes
17
/// in the program, we generate a new heap version. It generates
18
/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19
///
20
/// As a trivial example,
21
/// define i32 @main() #0 {
22
/// entry:
23
///   %call = call noalias i8* @_Znwm(i64 4) #2
24
///   %0 = bitcast i8* %call to i32*
25
///   %call1 = call noalias i8* @_Znwm(i64 4) #2
26
///   %1 = bitcast i8* %call1 to i32*
27
///   store i32 5, i32* %0, align 4
28
///   store i32 7, i32* %1, align 4
29
///   %2 = load i32* %0, align 4
30
///   %3 = load i32* %1, align 4
31
///   %add = add nsw i32 %2, %3
32
///   ret i32 %add
33
/// }
34
///
35
/// Will become
36
/// define i32 @main() #0 {
37
/// entry:
38
///   ; 1 = MemoryDef(0)
39
///   %call = call noalias i8* @_Znwm(i64 4) #3
40
///   %2 = bitcast i8* %call to i32*
41
///   ; 2 = MemoryDef(1)
42
///   %call1 = call noalias i8* @_Znwm(i64 4) #3
43
///   %4 = bitcast i8* %call1 to i32*
44
///   ; 3 = MemoryDef(2)
45
///   store i32 5, i32* %2, align 4
46
///   ; 4 = MemoryDef(3)
47
///   store i32 7, i32* %4, align 4
48
///   ; MemoryUse(3)
49
///   %7 = load i32* %2, align 4
50
///   ; MemoryUse(4)
51
///   %8 = load i32* %4, align 4
52
///   %add = add nsw i32 %7, %8
53
///   ret i32 %add
54
/// }
55
///
56
/// Given this form, all the stores that could ever effect the load at %8 can be
57
/// gotten by using the MemoryUse associated with it, and walking from use to
58
/// def until you hit the top of the function.
59
///
60
/// Each def also has a list of users associated with it, so you can walk from
61
/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62
/// but not the RHS of MemoryDefs. You can see this above at %7, which would
63
/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64
/// store, all the MemoryUses on its use lists are may-aliases of that store
65
/// (but the MemoryDefs on its use list may not be).
66
///
67
/// MemoryDefs are not disambiguated because it would require multiple reaching
68
/// definitions, which would require multiple phis, and multiple memoryaccesses
69
/// per instruction.
70
//
71
//===----------------------------------------------------------------------===//
72
73
#ifndef LLVM_ANALYSIS_MEMORYSSA_H
74
#define LLVM_ANALYSIS_MEMORYSSA_H
75
76
#include "llvm/ADT/DenseMap.h"
77
#include "llvm/ADT/GraphTraits.h"
78
#include "llvm/ADT/SmallPtrSet.h"
79
#include "llvm/ADT/SmallVector.h"
80
#include "llvm/ADT/ilist.h"
81
#include "llvm/ADT/ilist_node.h"
82
#include "llvm/ADT/iterator.h"
83
#include "llvm/ADT/iterator_range.h"
84
#include "llvm/ADT/simple_ilist.h"
85
#include "llvm/Analysis/AliasAnalysis.h"
86
#include "llvm/Analysis/MemoryLocation.h"
87
#include "llvm/Analysis/PHITransAddr.h"
88
#include "llvm/IR/BasicBlock.h"
89
#include "llvm/IR/DerivedUser.h"
90
#include "llvm/IR/Dominators.h"
91
#include "llvm/IR/Module.h"
92
#include "llvm/IR/Type.h"
93
#include "llvm/IR/Use.h"
94
#include "llvm/IR/User.h"
95
#include "llvm/IR/Value.h"
96
#include "llvm/IR/ValueHandle.h"
97
#include "llvm/Pass.h"
98
#include "llvm/Support/Casting.h"
99
#include <algorithm>
100
#include <cassert>
101
#include <cstddef>
102
#include <iterator>
103
#include <memory>
104
#include <utility>
105
106
namespace llvm {
107
108
class Function;
109
class Instruction;
110
class MemoryAccess;
111
class MemorySSAWalker;
112
class LLVMContext;
113
class raw_ostream;
114
115
namespace MSSAHelpers {
116
117
struct AllAccessTag {};
118
struct DefsOnlyTag {};
119
120
} // end namespace MSSAHelpers
121
122
enum : unsigned {
123
  // Used to signify what the default invalid ID is for MemoryAccess's
124
  // getID()
125
  INVALID_MEMORYACCESS_ID = -1U
126
};
127
128
template <class T> class memoryaccess_def_iterator_base;
129
using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
130
using const_memoryaccess_def_iterator =
131
    memoryaccess_def_iterator_base<const MemoryAccess>;
132
133
// The base for all memory accesses. All memory accesses in a block are
134
// linked together using an intrusive list.
135
class MemoryAccess
136
    : public DerivedUser,
137
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
138
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
139
public:
140
  using AllAccessType =
141
      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
142
  using DefsOnlyType =
143
      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
144
145
  MemoryAccess(const MemoryAccess &) = delete;
146
  MemoryAccess &operator=(const MemoryAccess &) = delete;
147
148
  void *operator new(size_t) = delete;
149
150
  // Methods for support type inquiry through isa, cast, and
151
  // dyn_cast
152
9
  static bool classof(const Value *V) {
153
9
    unsigned ID = V->getValueID();
154
9
    return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
155
9
  }
156
157
17.6M
  BasicBlock *getBlock() const { return Block; }
158
159
  void print(raw_ostream &OS) const;
160
  void dump() const;
161
162
  /// The user iterators for a memory access
163
  using iterator = user_iterator;
164
  using const_iterator = const_user_iterator;
165
166
  /// This iterator walks over all of the defs in a given
167
  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
168
  /// MemoryUse/MemoryDef, this walks the defining access.
169
  memoryaccess_def_iterator defs_begin();
170
  const_memoryaccess_def_iterator defs_begin() const;
171
  memoryaccess_def_iterator defs_end();
172
  const_memoryaccess_def_iterator defs_end() const;
173
174
  /// Get the iterators for the all access list and the defs only list
175
  /// We default to the all access list.
176
6
  AllAccessType::self_iterator getIterator() {
177
6
    return this->AllAccessType::getIterator();
178
6
  }
179
0
  AllAccessType::const_self_iterator getIterator() const {
180
0
    return this->AllAccessType::getIterator();
181
0
  }
182
19
  AllAccessType::reverse_self_iterator getReverseIterator() {
183
19
    return this->AllAccessType::getReverseIterator();
184
19
  }
185
0
  AllAccessType::const_reverse_self_iterator getReverseIterator() const {
186
0
    return this->AllAccessType::getReverseIterator();
187
0
  }
188
11
  DefsOnlyType::self_iterator getDefsIterator() {
189
11
    return this->DefsOnlyType::getIterator();
190
11
  }
191
0
  DefsOnlyType::const_self_iterator getDefsIterator() const {
192
0
    return this->DefsOnlyType::getIterator();
193
0
  }
194
30
  DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
195
30
    return this->DefsOnlyType::getReverseIterator();
196
30
  }
197
0
  DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
198
0
    return this->DefsOnlyType::getReverseIterator();
199
0
  }
200
201
protected:
202
  friend class MemoryDef;
203
  friend class MemoryPhi;
204
  friend class MemorySSA;
205
  friend class MemoryUse;
206
  friend class MemoryUseOrDef;
207
208
  /// Used by MemorySSA to change the block of a MemoryAccess when it is
209
  /// moved.
210
59
  void setBlock(BasicBlock *BB) { Block = BB; }
211
212
  /// Used for debugging and tracking things about MemoryAccesses.
213
  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
214
  inline unsigned getID() const;
215
216
  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
217
               BasicBlock *BB, unsigned NumOperands)
218
      : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
219
5.75M
        Block(BB) {}
220
221
  // Use deleteValue() to delete a generic MemoryAccess.
222
5.75M
  ~MemoryAccess() = default;
223
224
private:
225
  BasicBlock *Block;
226
};
227
228
template <>
229
struct ilist_alloc_traits<MemoryAccess> {
230
5.31M
  static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
231
};
232
233
472
inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
234
472
  MA.print(OS);
235
472
  return OS;
236
472
}
237
238
/// Class that has the common methods + fields of memory uses/defs. It's
239
/// a little awkward to have, but there are many cases where we want either a
240
/// use or def, and there are many cases where uses are needed (defs aren't
241
/// acceptable), and vice-versa.
242
///
243
/// This class should never be instantiated directly; make a MemoryUse or
244
/// MemoryDef instead.
245
class MemoryUseOrDef : public MemoryAccess {
246
public:
247
  void *operator new(size_t) = delete;
248
249
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
250
251
  /// Get the instruction that this MemoryUse represents.
252
10.5M
  Instruction *getMemoryInst() const { return MemoryInstruction; }
253
254
  /// Get the access that produces the memory state used by this Use.
255
9.12M
  MemoryAccess *getDefiningAccess() const { return getOperand(0); }
256
257
9.57M
  static bool classof(const Value *MA) {
258
9.57M
    return MA->getValueID() == MemoryUseVal || 
MA->getValueID() == MemoryDefVal6.81M
;
259
9.57M
  }
260
261
  // Sadly, these have to be public because they are needed in some of the
262
  // iterators.
263
  inline bool isOptimized() const;
264
  inline MemoryAccess *getOptimized() const;
265
  inline void setOptimized(MemoryAccess *);
266
267
  // Retrieve AliasResult type of the optimized access. Ideally this would be
268
  // returned by the caching walker and may go away in the future.
269
193
  Optional<AliasResult> getOptimizedAccessType() const {
270
193
    return OptimizedAccessAlias;
271
193
  }
272
273
  /// Reset the ID of what this MemoryUse was optimized to, causing it to
274
  /// be rewalked by the walker if necessary.
275
  /// This really should only be called by tests.
276
  inline void resetOptimized();
277
278
protected:
279
  friend class MemorySSA;
280
  friend class MemorySSAUpdater;
281
282
  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
283
                 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
284
      : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
285
5.08M
        OptimizedAccessAlias(MayAlias) {
286
5.08M
    setDefiningAccess(DMA);
287
5.08M
  }
288
289
  // Use deleteValue() to delete a generic MemoryUseOrDef.
290
5.08M
  ~MemoryUseOrDef() = default;
291
292
1.69M
  void setOptimizedAccessType(Optional<AliasResult> AR) {
293
1.69M
    OptimizedAccessAlias = AR;
294
1.69M
  }
295
296
  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
297
11.4M
                         Optional<AliasResult> AR = MayAlias) {
298
11.4M
    if (!Optimized) {
299
9.83M
      setOperand(0, DMA);
300
9.83M
      return;
301
9.83M
    }
302
1.65M
    setOptimized(DMA);
303
1.65M
    setOptimizedAccessType(AR);
304
1.65M
  }
305
306
private:
307
  Instruction *MemoryInstruction;
308
  Optional<AliasResult> OptimizedAccessAlias;
309
};
310
311
template <>
312
struct OperandTraits<MemoryUseOrDef>
313
    : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
314
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
315
316
/// Represents read-only accesses to memory
317
///
318
/// In particular, the set of Instructions that will be represented by
319
/// MemoryUse's is exactly the set of Instructions for which
320
/// AliasAnalysis::getModRefInfo returns "Ref".
321
class MemoryUse final : public MemoryUseOrDef {
322
public:
323
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
324
325
  MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
326
1.68M
      : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
327
328
  // allocate space for exactly one operand
329
1.68M
  void *operator new(size_t s) { return User::operator new(s, 1); }
330
331
6.23M
  static bool classof(const Value *MA) {
332
6.23M
    return MA->getValueID() == MemoryUseVal;
333
6.23M
  }
334
335
  void print(raw_ostream &OS) const;
336
337
2.28M
  void setOptimized(MemoryAccess *DMA) {
338
2.28M
    OptimizedID = DMA->getID();
339
2.28M
    setOperand(0, DMA);
340
2.28M
  }
341
342
899k
  bool isOptimized() const {
343
899k
    return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
344
899k
  }
345
346
279k
  MemoryAccess *getOptimized() const {
347
279k
    return getDefiningAccess();
348
279k
  }
349
350
185
  void resetOptimized() {
351
185
    OptimizedID = INVALID_MEMORYACCESS_ID;
352
185
  }
353
354
protected:
355
  friend class MemorySSA;
356
357
private:
358
  static void deleteMe(DerivedUser *Self);
359
360
  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
361
};
362
363
template <>
364
struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
365
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
366
367
/// Represents a read-write access to memory, whether it is a must-alias,
368
/// or a may-alias.
369
///
370
/// In particular, the set of Instructions that will be represented by
371
/// MemoryDef's is exactly the set of Instructions for which
372
/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
373
/// Note that, in order to provide def-def chains, all defs also have a use
374
/// associated with them. This use points to the nearest reaching
375
/// MemoryDef/MemoryPhi.
376
class MemoryDef final : public MemoryUseOrDef {
377
public:
378
  friend class MemorySSA;
379
380
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
381
382
  MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
383
            unsigned Ver)
384
3.40M
      : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
385
386
  // allocate space for exactly one operand
387
3.40M
  void *operator new(size_t s) { return User::operator new(s, 1); }
388
389
19.8M
  static bool classof(const Value *MA) {
390
19.8M
    return MA->getValueID() == MemoryDefVal;
391
19.8M
  }
392
393
6.08k
  void setOptimized(MemoryAccess *MA) {
394
6.08k
    Optimized = MA;
395
6.08k
    OptimizedID = getDefiningAccess()->getID();
396
6.08k
  }
397
398
6.34k
  MemoryAccess *getOptimized() const {
399
6.34k
    return cast_or_null<MemoryAccess>(Optimized);
400
6.34k
  }
401
402
6.34k
  bool isOptimized() const {
403
6.34k
    return getOptimized() && 
getDefiningAccess()14
&&
404
6.34k
           
OptimizedID == getDefiningAccess()->getID()14
;
405
6.34k
  }
406
407
14.1k
  void resetOptimized() {
408
14.1k
    OptimizedID = INVALID_MEMORYACCESS_ID;
409
14.1k
  }
410
411
  void print(raw_ostream &OS) const;
412
413
1.49M
  unsigned getID() const { return ID; }
414
415
private:
416
  static void deleteMe(DerivedUser *Self);
417
418
  const unsigned ID;
419
  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
420
  WeakVH Optimized;
421
};
422
423
template <>
424
struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
425
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
426
427
/// Represents phi nodes for memory accesses.
428
///
429
/// These have the same semantic as regular phi nodes, with the exception that
430
/// only one phi will ever exist in a given basic block.
431
/// Guaranteeing one phi per block means guaranteeing there is only ever one
432
/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
433
/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
434
/// a MemoryPhi's operands.
435
/// That is, given
436
/// if (a) {
437
///   store %a
438
///   store %b
439
/// }
440
/// it *must* be transformed into
441
/// if (a) {
442
///    1 = MemoryDef(liveOnEntry)
443
///    store %a
444
///    2 = MemoryDef(1)
445
///    store %b
446
/// }
447
/// and *not*
448
/// if (a) {
449
///    1 = MemoryDef(liveOnEntry)
450
///    store %a
451
///    2 = MemoryDef(liveOnEntry)
452
///    store %b
453
/// }
454
/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
455
/// end of the branch, and if there are not two phi nodes, one will be
456
/// disconnected completely from the SSA graph below that point.
457
/// Because MemoryUse's do not generate new definitions, they do not have this
458
/// issue.
459
class MemoryPhi final : public MemoryAccess {
460
  // allocate space for exactly zero operands
461
666k
  void *operator new(size_t s) { return User::operator new(s); }
462
463
public:
464
  /// Provide fast operand accessors
465
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
466
467
  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
468
      : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
469
666k
        ReservedSpace(NumPreds) {
470
666k
    allocHungoffUses(ReservedSpace);
471
666k
  }
472
473
  // Block iterator interface. This provides access to the list of incoming
474
  // basic blocks, which parallels the list of incoming values.
475
  using block_iterator = BasicBlock **;
476
  using const_block_iterator = BasicBlock *const *;
477
478
1.70M
  block_iterator block_begin() {
479
1.70M
    auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
480
1.70M
    return reinterpret_cast<block_iterator>(Ref + 1);
481
1.70M
  }
482
483
3.48M
  const_block_iterator block_begin() const {
484
3.48M
    const auto *Ref =
485
3.48M
        reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
486
3.48M
    return reinterpret_cast<const_block_iterator>(Ref + 1);
487
3.48M
  }
488
489
6
  block_iterator block_end() { return block_begin() + getNumOperands(); }
490
491
0
  const_block_iterator block_end() const {
492
0
    return block_begin() + getNumOperands();
493
0
  }
494
495
  iterator_range<block_iterator> blocks() {
496
    return make_range(block_begin(), block_end());
497
  }
498
499
  iterator_range<const_block_iterator> blocks() const {
500
    return make_range(block_begin(), block_end());
501
  }
502
503
602
  op_range incoming_values() { return operands(); }
504
505
0
  const_op_range incoming_values() const { return operands(); }
506
507
  /// Return the number of incoming edges
508
3.50M
  unsigned getNumIncomingValues() const { return getNumOperands(); }
509
510
  /// Return incoming value number x
511
3.50M
  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
512
1.70M
  void setIncomingValue(unsigned I, MemoryAccess *V) {
513
1.70M
    assert(V && "PHI node got a null value!");
514
1.70M
    setOperand(I, V);
515
1.70M
  }
516
517
  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
518
  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
519
520
  /// Return incoming basic block number @p i.
521
3.48M
  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
522
523
  /// Return incoming basic block corresponding
524
  /// to an operand of the PHI.
525
563
  BasicBlock *getIncomingBlock(const Use &U) const {
526
563
    assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
527
563
    return getIncomingBlock(unsigned(&U - op_begin()));
528
563
  }
529
530
  /// Return incoming basic block corresponding
531
  /// to value use iterator.
532
0
  BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
533
0
    return getIncomingBlock(I.getUse());
534
0
  }
535
536
1.70M
  void setIncomingBlock(unsigned I, BasicBlock *BB) {
537
1.70M
    assert(BB && "PHI node got a null basic block!");
538
1.70M
    block_begin()[I] = BB;
539
1.70M
  }
540
541
  /// Add an incoming value to the end of the PHI list
542
1.70M
  void addIncoming(MemoryAccess *V, BasicBlock *BB) {
543
1.70M
    if (getNumOperands() == ReservedSpace)
544
934k
      growOperands(); // Get more space!
545
1.70M
    // Initialize some new operands.
546
1.70M
    setNumHungOffUseOperands(getNumOperands() + 1);
547
1.70M
    setIncomingValue(getNumOperands() - 1, V);
548
1.70M
    setIncomingBlock(getNumOperands() - 1, BB);
549
1.70M
  }
550
551
  /// Return the first index of the specified basic
552
  /// block in the value list for this PHI.  Returns -1 if no instance.
553
5
  int getBasicBlockIndex(const BasicBlock *BB) const {
554
9
    for (unsigned I = 0, E = getNumOperands(); I != E; 
++I4
)
555
9
      if (block_begin()[I] == BB)
556
5
        return I;
557
5
    
return -10
;
558
5
  }
559
560
  MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
561
    int Idx = getBasicBlockIndex(BB);
562
    assert(Idx >= 0 && "Invalid basic block argument!");
563
    return getIncomingValue(Idx);
564
  }
565
566
  // After deleting incoming position I, the order of incoming may be changed.
567
0
  void unorderedDeleteIncoming(unsigned I) {
568
0
    unsigned E = getNumOperands();
569
0
    assert(I < E && "Cannot remove out of bounds Phi entry.");
570
0
    // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
571
0
    // itself should be deleted.
572
0
    assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
573
0
                     "at least 2 values.");
574
0
    setIncomingValue(I, getIncomingValue(E - 1));
575
0
    setIncomingBlock(I, block_begin()[E - 1]);
576
0
    setOperand(E - 1, nullptr);
577
0
    block_begin()[E - 1] = nullptr;
578
0
    setNumHungOffUseOperands(getNumOperands() - 1);
579
0
  }
580
581
  // After deleting incoming block BB, the incoming blocks order may be changed.
582
0
  void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
583
0
    for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
584
0
      if (block_begin()[I] == BB) {
585
0
        unorderedDeleteIncoming(I);
586
0
        E = getNumOperands();
587
0
        --I;
588
0
      }
589
0
    assert(getNumOperands() >= 1 &&
590
0
           "Cannot remove all incoming blocks in a MemoryPhi.");
591
0
  }
592
593
  // After deleting incoming memory access MA, the incoming accesses order may
594
  // be changed.
595
  void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
596
    for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
597
      if (getIncomingValue(I) == MA) {
598
        unorderedDeleteIncoming(I);
599
        E = getNumOperands();
600
        --I;
601
      }
602
    assert(getNumOperands() >= 1 &&
603
           "Cannot remove all incoming values in a MemoryPhi.");
604
  }
605
606
17.4M
  static bool classof(const Value *V) {
607
17.4M
    return V->getValueID() == MemoryPhiVal;
608
17.4M
  }
609
610
  void print(raw_ostream &OS) const;
611
612
1.69M
  unsigned getID() const { return ID; }
613
614
protected:
615
  friend class MemorySSA;
616
617
  /// this is more complicated than the generic
618
  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
619
  /// values and pointers to the incoming blocks, all in one allocation.
620
666k
  void allocHungoffUses(unsigned N) {
621
666k
    User::allocHungoffUses(N, /* IsPhi */ true);
622
666k
  }
623
624
private:
625
  // For debugging only
626
  const unsigned ID;
627
  unsigned ReservedSpace;
628
629
  /// This grows the operand list in response to a push_back style of
630
  /// operation.  This grows the number of ops by 1.5 times.
631
934k
  void growOperands() {
632
934k
    unsigned E = getNumOperands();
633
934k
    // 2 op PHI nodes are VERY common, so reserve at least enough for that.
634
934k
    ReservedSpace = std::max(E + E / 2, 2u);
635
934k
    growHungoffUses(ReservedSpace, /* IsPhi */ true);
636
934k
  }
637
638
  static void deleteMe(DerivedUser *Self);
639
};
640
641
3.18M
inline unsigned MemoryAccess::getID() const {
642
3.18M
  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
643
3.18M
         "only memory defs and phis have ids");
644
3.18M
  if (const auto *MD = dyn_cast<MemoryDef>(this))
645
1.49M
    return MD->getID();
646
1.69M
  return cast<MemoryPhi>(this)->getID();
647
1.69M
}
648
649
905k
inline bool MemoryUseOrDef::isOptimized() const {
650
905k
  if (const auto *MD = dyn_cast<MemoryDef>(this))
651
6.08k
    return MD->isOptimized();
652
899k
  return cast<MemoryUse>(this)->isOptimized();
653
899k
}
654
655
279k
inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
656
279k
  if (const auto *MD = dyn_cast<MemoryDef>(this))
657
1
    return MD->getOptimized();
658
279k
  return cast<MemoryUse>(this)->getOptimized();
659
279k
}
660
661
2.28M
inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
662
2.28M
  if (auto *MD = dyn_cast<MemoryDef>(this))
663
6.08k
    MD->setOptimized(MA);
664
2.28M
  else
665
2.28M
    cast<MemoryUse>(this)->setOptimized(MA);
666
2.28M
}
667
668
14.3k
inline void MemoryUseOrDef::resetOptimized() {
669
14.3k
  if (auto *MD = dyn_cast<MemoryDef>(this))
670
14.1k
    MD->resetOptimized();
671
184
  else
672
184
    cast<MemoryUse>(this)->resetOptimized();
673
14.3k
}
674
675
template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
676
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
677
678
/// Encapsulates MemorySSA, including all data associated with memory
679
/// accesses.
680
class MemorySSA {
681
public:
682
  MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
683
  ~MemorySSA();
684
685
  MemorySSAWalker *getWalker();
686
687
  /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
688
  /// access associated with it. If passed a basic block gets the memory phi
689
  /// node that exists for that block, if there is one. Otherwise, this will get
690
  /// a MemoryUseOrDef.
691
  MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
692
  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
693
694
  void dump() const;
695
  void print(raw_ostream &) const;
696
697
  /// Return true if \p MA represents the live on entry value
698
  ///
699
  /// Loads and stores from pointer arguments and other global values may be
700
  /// defined by memory operations that do not occur in the current function, so
701
  /// they may be live on entry to the function. MemorySSA represents such
702
  /// memory state by the live on entry definition, which is guaranteed to occur
703
  /// before any other memory access in the function.
704
5.77M
  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
705
5.77M
    return MA == LiveOnEntryDef.get();
706
5.77M
  }
707
708
1.25M
  inline MemoryAccess *getLiveOnEntryDef() const {
709
1.25M
    return LiveOnEntryDef.get();
710
1.25M
  }
711
712
  // Sadly, iplists, by default, owns and deletes pointers added to the
713
  // list. It's not currently possible to have two iplists for the same type,
714
  // where one owns the pointers, and one does not. This is because the traits
715
  // are per-type, not per-tag.  If this ever changes, we should make the
716
  // DefList an iplist.
717
  using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
718
  using DefsList =
719
      simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
720
721
  /// Return the list of MemoryAccess's for a given basic block.
722
  ///
723
  /// This list is not modifiable by the user.
724
44.1k
  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
725
44.1k
    return getWritableBlockAccesses(BB);
726
44.1k
  }
727
728
  /// Return the list of MemoryDef's and MemoryPhi's for a given basic
729
  /// block.
730
  ///
731
  /// This list is not modifiable by the user.
732
1.29M
  const DefsList *getBlockDefs(const BasicBlock *BB) const {
733
1.29M
    return getWritableBlockDefs(BB);
734
1.29M
  }
735
736
  /// Given two memory accesses in the same basic block, determine
737
  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
738
  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
739
740
  /// Given two memory accesses in potentially different blocks,
741
  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
742
  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
743
744
  /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
745
  /// dominates Use \p B.
746
  bool dominates(const MemoryAccess *A, const Use &B) const;
747
748
  /// Verify that MemorySSA is self consistent (IE definitions dominate
749
  /// all uses, uses appear in the right places).  This is used by unit tests.
750
  void verifyMemorySSA() const;
751
752
  /// Used in various insertion functions to specify whether we are talking
753
  /// about the beginning or end of a block.
754
  enum InsertionPlace { Beginning, End };
755
756
protected:
757
  // Used by Memory SSA annotater, dumpers, and wrapper pass
758
  friend class MemorySSAAnnotatedWriter;
759
  friend class MemorySSAPrinterLegacyPass;
760
  friend class MemorySSAUpdater;
761
762
  void verifyDefUses(Function &F) const;
763
  void verifyDomination(Function &F) const;
764
  void verifyOrdering(Function &F) const;
765
  void verifyDominationNumbers(const Function &F) const;
766
767
  // This is used by the use optimizer and updater.
768
2.71M
  AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
769
2.71M
    auto It = PerBlockAccesses.find(BB);
770
2.71M
    return It == PerBlockAccesses.end() ? 
nullptr582k
:
It->second.get()2.13M
;
771
2.71M
  }
772
773
  // This is used by the use optimizer and updater.
774
1.29M
  DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
775
1.29M
    auto It = PerBlockDefs.find(BB);
776
1.29M
    return It == PerBlockDefs.end() ? 
nullptr597k
:
It->second.get()694k
;
777
1.29M
  }
778
779
  // These is used by the updater to perform various internal MemorySSA
780
  // machinsations.  They do not always leave the IR in a correct state, and
781
  // relies on the updater to fixup what it breaks, so it is not public.
782
783
  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
784
  void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
785
786
  // Rename the dominator tree branch rooted at BB.
787
  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
788
1
                  SmallPtrSetImpl<BasicBlock *> &Visited) {
789
1
    renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
790
1
  }
791
792
  void removeFromLookups(MemoryAccess *);
793
  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
794
  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
795
                               InsertionPlace);
796
  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
797
                             AccessList::iterator);
798
  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
799
800
private:
801
  class CachingWalker;
802
  class OptimizeUses;
803
804
  CachingWalker *getWalkerImpl();
805
  void buildMemorySSA();
806
  void optimizeUses();
807
808
  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
809
810
  using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
811
  using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
812
813
  void
814
  determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
815
  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
816
  bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
817
  MemoryPhi *createMemoryPhi(BasicBlock *BB);
818
  MemoryUseOrDef *createNewAccess(Instruction *);
819
  MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
820
  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
821
  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
822
  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
823
  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
824
                  SmallPtrSetImpl<BasicBlock *> &Visited,
825
                  bool SkipVisited = false, bool RenameAllUses = false);
826
  AccessList *getOrCreateAccessList(const BasicBlock *);
827
  DefsList *getOrCreateDefsList(const BasicBlock *);
828
  void renumberBlock(const BasicBlock *) const;
829
  AliasAnalysis *AA;
830
  DominatorTree *DT;
831
  Function &F;
832
833
  // Memory SSA mappings
834
  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
835
836
  // These two mappings contain the main block to access/def mappings for
837
  // MemorySSA. The list contained in PerBlockAccesses really owns all the
838
  // MemoryAccesses.
839
  // Both maps maintain the invariant that if a block is found in them, the
840
  // corresponding list is not empty, and if a block is not found in them, the
841
  // corresponding list is empty.
842
  AccessMap PerBlockAccesses;
843
  DefsMap PerBlockDefs;
844
  std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
845
846
  // Domination mappings
847
  // Note that the numbering is local to a block, even though the map is
848
  // global.
849
  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
850
  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
851
852
  // Memory SSA building info
853
  std::unique_ptr<CachingWalker> Walker;
854
  unsigned NextID;
855
};
856
857
// Internal MemorySSA utils, for use by MemorySSA classes and walkers
858
class MemorySSAUtil {
859
protected:
860
  friend class GVNHoist;
861
  friend class MemorySSAWalker;
862
863
  // This function should not be used by new passes.
864
  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
865
                                  AliasAnalysis &AA);
866
};
867
868
// This pass does eager building and then printing of MemorySSA. It is used by
869
// the tests to be able to build, dump, and verify Memory SSA.
870
class MemorySSAPrinterLegacyPass : public FunctionPass {
871
public:
872
  MemorySSAPrinterLegacyPass();
873
874
  bool runOnFunction(Function &) override;
875
  void getAnalysisUsage(AnalysisUsage &AU) const override;
876
877
  static char ID;
878
};
879
880
/// An analysis that produces \c MemorySSA for a function.
881
///
882
class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
883
  friend AnalysisInfoMixin<MemorySSAAnalysis>;
884
885
  static AnalysisKey Key;
886
887
public:
888
  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
889
  // pointers after construction.  Use a wrapper class instead of plain
890
  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
891
  struct Result {
892
181
    Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
893
894
216
    MemorySSA &getMSSA() { return *MSSA.get(); }
895
896
    std::unique_ptr<MemorySSA> MSSA;
897
  };
898
899
  Result run(Function &F, FunctionAnalysisManager &AM);
900
};
901
902
/// Printer pass for \c MemorySSA.
903
class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
904
  raw_ostream &OS;
905
906
public:
907
20
  explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
908
909
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
910
};
911
912
/// Verifier pass for \c MemorySSA.
913
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
914
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
915
};
916
917
/// Legacy analysis pass which computes \c MemorySSA.
918
class MemorySSAWrapperPass : public FunctionPass {
919
public:
920
  MemorySSAWrapperPass();
921
922
  static char ID;
923
924
  bool runOnFunction(Function &) override;
925
  void releaseMemory() override;
926
433k
  MemorySSA &getMSSA() { return *MSSA; }
927
0
  const MemorySSA &getMSSA() const { return *MSSA; }
928
929
  void getAnalysisUsage(AnalysisUsage &AU) const override;
930
931
  void verifyAnalysis() const override;
932
  void print(raw_ostream &OS, const Module *M = nullptr) const override;
933
934
private:
935
  std::unique_ptr<MemorySSA> MSSA;
936
};
937
938
/// This is the generic walker interface for walkers of MemorySSA.
939
/// Walkers are used to be able to further disambiguate the def-use chains
940
/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
941
/// you.
942
/// In particular, while the def-use chains provide basic information, and are
943
/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
944
/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
945
/// information. In particular, they may want to use SCEV info to further
946
/// disambiguate memory accesses, or they may want the nearest dominating
947
/// may-aliasing MemoryDef for a call or a store. This API enables a
948
/// standardized interface to getting and using that info.
949
class MemorySSAWalker {
950
public:
951
  MemorySSAWalker(MemorySSA *);
952
433k
  virtual ~MemorySSAWalker() = default;
953
954
  using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
955
956
  /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
957
  /// will give you the nearest dominating MemoryAccess that Mod's the location
958
  /// the instruction accesses (by skipping any def which AA can prove does not
959
  /// alias the location(s) accessed by the instruction given).
960
  ///
961
  /// Note that this will return a single access, and it must dominate the
962
  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
963
  /// this will return the MemoryPhi, not the operand. This means that
964
  /// given:
965
  /// if (a) {
966
  ///   1 = MemoryDef(liveOnEntry)
967
  ///   store %a
968
  /// } else {
969
  ///   2 = MemoryDef(liveOnEntry)
970
  ///   store %b
971
  /// }
972
  /// 3 = MemoryPhi(2, 1)
973
  /// MemoryUse(3)
974
  /// load %a
975
  ///
976
  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
977
  /// in the if (a) branch.
978
905k
  MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
979
905k
    MemoryAccess *MA = MSSA->getMemoryAccess(I);
980
905k
    assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
981
905k
    return getClobberingMemoryAccess(MA);
982
905k
  }
983
984
  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
985
  /// but takes a MemoryAccess instead of an Instruction.
986
  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
987
988
  /// Given a potentially clobbering memory access and a new location,
989
  /// calling this will give you the nearest dominating clobbering MemoryAccess
990
  /// (by skipping non-aliasing def links).
991
  ///
992
  /// This version of the function is mainly used to disambiguate phi translated
993
  /// pointers, where the value of a pointer may have changed from the initial
994
  /// memory access. Note that this expects to be handed either a MemoryUse,
995
  /// or an already potentially clobbering access. Unlike the above API, if
996
  /// given a MemoryDef that clobbers the pointer as the starting access, it
997
  /// will return that MemoryDef, whereas the above would return the clobber
998
  /// starting from the use side of  the memory def.
999
  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1000
                                                  const MemoryLocation &) = 0;
1001
1002
  /// Given a memory access, invalidate anything this walker knows about
1003
  /// that access.
1004
  /// This API is used by walkers that store information to perform basic cache
1005
  /// invalidation.  This will be called by MemorySSA at appropriate times for
1006
  /// the walker it uses or returns.
1007
0
  virtual void invalidateInfo(MemoryAccess *) {}
1008
1009
98
  virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
1010
1011
protected:
1012
  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1013
                          // constructor.
1014
  MemorySSA *MSSA;
1015
};
1016
1017
/// A MemorySSAWalker that does no alias queries, or anything else. It
1018
/// simply returns the links as they were constructed by the builder.
1019
class DoNothingMemorySSAWalker final : public MemorySSAWalker {
1020
public:
1021
  // Keep the overrides below from hiding the Instruction overload of
1022
  // getClobberingMemoryAccess.
1023
  using MemorySSAWalker::getClobberingMemoryAccess;
1024
1025
  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1026
  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1027
                                          const MemoryLocation &) override;
1028
};
1029
1030
using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1031
using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1032
1033
/// Iterator base class used to implement const and non-const iterators
1034
/// over the defining accesses of a MemoryAccess.
1035
template <class T>
1036
class memoryaccess_def_iterator_base
1037
    : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1038
                                  std::forward_iterator_tag, T, ptrdiff_t, T *,
1039
                                  T *> {
1040
  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1041
1042
public:
1043
1.47M
  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1044
4.97M
  memoryaccess_def_iterator_base() = default;
1045
1046
8.48M
  bool operator==(const memoryaccess_def_iterator_base &Other) const {
1047
8.48M
    return Access == Other.Access && 
(2.94M
!Access2.94M
||
ArgNo == Other.ArgNo0
);
1048
8.48M
  }
1049
1050
  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1051
  // block from the operand in constant time (In a PHINode, the uselist has
1052
  // both, so it's just subtraction). We provide it as part of the
1053
  // iterator to avoid callers having to linear walk to get the block.
1054
  // If the operation becomes constant time on MemoryPHI's, this bit of
1055
  // abstraction breaking should be removed.
1056
3.48M
  BasicBlock *getPhiArgBlock() const {
1057
3.48M
    MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1058
3.48M
    assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1059
3.48M
    return MP->getIncomingBlock(ArgNo);
1060
3.48M
  }
1061
1062
3.50M
  typename BaseT::iterator::pointer operator*() const {
1063
3.50M
    assert(Access && "Tried to access past the end of our iterator");
1064
3.50M
    // Go to the first argument for phis, and the defining access for everything
1065
3.50M
    // else.
1066
3.50M
    if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1067
3.50M
      return MP->getIncomingValue(ArgNo);
1068
0
    return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1069
0
  }
1070
1071
  using BaseT::operator++;
1072
3.50M
  memoryaccess_def_iterator &operator++() {
1073
3.50M
    assert(Access && "Hit end of iterator");
1074
3.50M
    if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1075
3.50M
      if (++ArgNo >= MP->getNumIncomingValues()) {
1076
1.47M
        ArgNo = 0;
1077
1.47M
        Access = nullptr;
1078
1.47M
      }
1079
3.50M
    } else {
1080
0
      Access = nullptr;
1081
0
    }
1082
3.50M
    return *this;
1083
3.50M
  }
1084
1085
private:
1086
  T *Access = nullptr;
1087
  unsigned ArgNo = 0;
1088
};
1089
1090
inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1091
  return memoryaccess_def_iterator(this);
1092
}
1093
1094
inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1095
  return const_memoryaccess_def_iterator(this);
1096
}
1097
1098
3.50M
inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1099
3.50M
  return memoryaccess_def_iterator();
1100
3.50M
}
1101
1102
0
inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1103
0
  return const_memoryaccess_def_iterator();
1104
0
}
1105
1106
/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1107
/// and uses in the inverse case.
1108
template <> struct GraphTraits<MemoryAccess *> {
1109
  using NodeRef = MemoryAccess *;
1110
  using ChildIteratorType = memoryaccess_def_iterator;
1111
1112
  static NodeRef getEntryNode(NodeRef N) { return N; }
1113
  static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1114
  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1115
};
1116
1117
template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1118
  using NodeRef = MemoryAccess *;
1119
  using ChildIteratorType = MemoryAccess::iterator;
1120
1121
  static NodeRef getEntryNode(NodeRef N) { return N; }
1122
  static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1123
  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1124
};
1125
1126
/// Provide an iterator that walks defs, giving both the memory access,
1127
/// and the current pointer location, updating the pointer location as it
1128
/// changes due to phi node translation.
1129
///
1130
/// This iterator, while somewhat specialized, is what most clients actually
1131
/// want when walking upwards through MemorySSA def chains. It takes a pair of
1132
/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1133
/// memory location through phi nodes for the user.
1134
class upward_defs_iterator
1135
    : public iterator_facade_base<upward_defs_iterator,
1136
                                  std::forward_iterator_tag,
1137
                                  const MemoryAccessPair> {
1138
  using BaseT = upward_defs_iterator::iterator_facade_base;
1139
1140
public:
1141
  upward_defs_iterator(const MemoryAccessPair &Info)
1142
      : DefIterator(Info.first), Location(Info.second),
1143
1.47M
        OriginalAccess(Info.first) {
1144
1.47M
    CurrentPair.first = nullptr;
1145
1.47M
1146
1.47M
    WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1147
1.47M
    fillInCurrentPair();
1148
1.47M
  }
1149
1150
1.47M
  upward_defs_iterator() { CurrentPair.first = nullptr; }
1151
1152
4.97M
  bool operator==(const upward_defs_iterator &Other) const {
1153
4.97M
    return DefIterator == Other.DefIterator;
1154
4.97M
  }
1155
1156
3.50M
  BaseT::iterator::reference operator*() const {
1157
3.50M
    assert(DefIterator != OriginalAccess->defs_end() &&
1158
3.50M
           "Tried to access past the end of our iterator");
1159
3.50M
    return CurrentPair;
1160
3.50M
  }
1161
1162
  using BaseT::operator++;
1163
3.50M
  upward_defs_iterator &operator++() {
1164
3.50M
    assert(DefIterator != OriginalAccess->defs_end() &&
1165
3.50M
           "Tried to access past the end of the iterator");
1166
3.50M
    ++DefIterator;
1167
3.50M
    if (DefIterator != OriginalAccess->defs_end())
1168
2.03M
      fillInCurrentPair();
1169
3.50M
    return *this;
1170
3.50M
  }
1171
1172
0
  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1173
1174
private:
1175
3.50M
  void fillInCurrentPair() {
1176
3.50M
    CurrentPair.first = *DefIterator;
1177
3.50M
    if (WalkingPhi && Location.Ptr) {
1178
3.48M
      PHITransAddr Translator(
1179
3.48M
          const_cast<Value *>(Location.Ptr),
1180
3.48M
          OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1181
3.48M
      if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1182
3.48M
                                        DefIterator.getPhiArgBlock(), nullptr,
1183
3.48M
                                        false))
1184
0
        if (Translator.getAddr() != Location.Ptr) {
1185
0
          CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1186
0
          return;
1187
0
        }
1188
3.50M
    }
1189
3.50M
    CurrentPair.second = Location;
1190
3.50M
  }
1191
1192
  MemoryAccessPair CurrentPair;
1193
  memoryaccess_def_iterator DefIterator;
1194
  MemoryLocation Location;
1195
  MemoryAccess *OriginalAccess = nullptr;
1196
  bool WalkingPhi = false;
1197
};
1198
1199
1.47M
inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1200
1.47M
  return upward_defs_iterator(Pair);
1201
1.47M
}
1202
1203
1.47M
inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1204
1205
inline iterator_range<upward_defs_iterator>
1206
upward_defs(const MemoryAccessPair &Pair) {
1207
  return make_range(upward_defs_begin(Pair), upward_defs_end());
1208
}
1209
1210
/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1211
/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1212
/// comparing against a null def_chain_iterator, this will compare equal only
1213
/// after walking said Phi/liveOnEntry.
1214
///
1215
/// The UseOptimizedChain flag specifies whether to walk the clobbering
1216
/// access chain, or all the accesses.
1217
///
1218
/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1219
/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1220
/// a phi node.  The optimized chain walks the clobbering access of a store.
1221
/// So if you are just trying to find, given a store, what the next
1222
/// thing that would clobber the same memory is, you want the optimized chain.
1223
template <class T, bool UseOptimizedChain = false>
1224
struct def_chain_iterator
1225
    : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1226
                                  std::forward_iterator_tag, MemoryAccess *> {
1227
  def_chain_iterator() : MA(nullptr) {}
1228
5.26M
  def_chain_iterator(T MA) : MA(MA) {}
1229
1230
4.29M
  T operator*() const { return MA; }
llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator*() const
Line
Count
Source
1230
4.29M
  T operator*() const { return MA; }
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator*() const
1231
1232
3.14M
  def_chain_iterator &operator++() {
1233
3.14M
    // N.B. liveOnEntry has a null defining access.
1234
3.14M
    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1235
1.66M
      if (UseOptimizedChain && 
MUD->isOptimized()0
)
1236
0
        MA = MUD->getOptimized();
1237
1.66M
      else
1238
1.66M
        MA = MUD->getDefiningAccess();
1239
1.66M
    } else {
1240
1.47M
      MA = nullptr;
1241
1.47M
    }
1242
3.14M
1243
3.14M
    return *this;
1244
3.14M
  }
llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator++()
Line
Count
Source
1232
3.14M
  def_chain_iterator &operator++() {
1233
3.14M
    // N.B. liveOnEntry has a null defining access.
1234
3.14M
    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1235
1.66M
      if (UseOptimizedChain && 
MUD->isOptimized()0
)
1236
0
        MA = MUD->getOptimized();
1237
1.66M
      else
1238
1.66M
        MA = MUD->getDefiningAccess();
1239
1.66M
    } else {
1240
1.47M
      MA = nullptr;
1241
1.47M
    }
1242
3.14M
1243
3.14M
    return *this;
1244
3.14M
  }
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator++()
1245
1246
5.77M
  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
llvm::def_chain_iterator<llvm::MemoryAccess*, false>::operator==(llvm::def_chain_iterator<llvm::MemoryAccess*, false> const&) const
Line
Count
Source
1246
5.77M
  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
Unexecuted instantiation: llvm::def_chain_iterator<llvm::MemoryAccess const*, true>::operator==(llvm::def_chain_iterator<llvm::MemoryAccess const*, true> const&) const
1247
1248
private:
1249
  T MA;
1250
};
1251
1252
template <class T>
1253
inline iterator_range<def_chain_iterator<T>>
1254
2.63M
def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1255
2.63M
#ifdef EXPENSIVE_CHECKS
1256
2.63M
  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1257
2.63M
         "UpTo isn't in the def chain!");
1258
2.63M
#endif
1259
2.63M
  return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1260
2.63M
}
1261
1262
template <class T>
1263
0
inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1264
0
  return make_range(def_chain_iterator<T, true>(MA),
1265
0
                    def_chain_iterator<T, true>(nullptr));
1266
0
}
1267
1268
} // end namespace llvm
1269
1270
#endif // LLVM_ANALYSIS_MEMORYSSA_H