Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This pass performs a simple dominator tree walk that eliminates trivially
10
// redundant instructions.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "llvm/Transforms/Scalar/EarlyCSE.h"
15
#include "llvm/ADT/DenseMapInfo.h"
16
#include "llvm/ADT/Hashing.h"
17
#include "llvm/ADT/STLExtras.h"
18
#include "llvm/ADT/ScopedHashTable.h"
19
#include "llvm/ADT/SetVector.h"
20
#include "llvm/ADT/SmallVector.h"
21
#include "llvm/ADT/Statistic.h"
22
#include "llvm/Analysis/AssumptionCache.h"
23
#include "llvm/Analysis/GlobalsModRef.h"
24
#include "llvm/Analysis/GuardUtils.h"
25
#include "llvm/Analysis/InstructionSimplify.h"
26
#include "llvm/Analysis/MemorySSA.h"
27
#include "llvm/Analysis/MemorySSAUpdater.h"
28
#include "llvm/Analysis/TargetLibraryInfo.h"
29
#include "llvm/Analysis/TargetTransformInfo.h"
30
#include "llvm/Transforms/Utils/Local.h"
31
#include "llvm/Analysis/ValueTracking.h"
32
#include "llvm/IR/BasicBlock.h"
33
#include "llvm/IR/Constants.h"
34
#include "llvm/IR/DataLayout.h"
35
#include "llvm/IR/Dominators.h"
36
#include "llvm/IR/Function.h"
37
#include "llvm/IR/InstrTypes.h"
38
#include "llvm/IR/Instruction.h"
39
#include "llvm/IR/Instructions.h"
40
#include "llvm/IR/IntrinsicInst.h"
41
#include "llvm/IR/Intrinsics.h"
42
#include "llvm/IR/LLVMContext.h"
43
#include "llvm/IR/PassManager.h"
44
#include "llvm/IR/PatternMatch.h"
45
#include "llvm/IR/Type.h"
46
#include "llvm/IR/Use.h"
47
#include "llvm/IR/Value.h"
48
#include "llvm/Pass.h"
49
#include "llvm/Support/Allocator.h"
50
#include "llvm/Support/AtomicOrdering.h"
51
#include "llvm/Support/Casting.h"
52
#include "llvm/Support/Debug.h"
53
#include "llvm/Support/DebugCounter.h"
54
#include "llvm/Support/RecyclingAllocator.h"
55
#include "llvm/Support/raw_ostream.h"
56
#include "llvm/Transforms/Scalar.h"
57
#include "llvm/Transforms/Utils/GuardUtils.h"
58
#include <cassert>
59
#include <deque>
60
#include <memory>
61
#include <utility>
62
63
using namespace llvm;
64
using namespace llvm::PatternMatch;
65
66
#define DEBUG_TYPE "early-cse"
67
68
STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
69
STATISTIC(NumCSE,      "Number of instructions CSE'd");
70
STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
71
STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
72
STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
73
STATISTIC(NumDSE,      "Number of trivial dead stores removed");
74
75
DEBUG_COUNTER(CSECounter, "early-cse",
76
              "Controls which instructions are removed");
77
78
static cl::opt<unsigned> EarlyCSEMssaOptCap(
79
    "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
80
    cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
81
             "for faster compile. Caps the MemorySSA clobbering calls."));
82
83
static cl::opt<bool> EarlyCSEDebugHash(
84
    "earlycse-debug-hash", cl::init(false), cl::Hidden,
85
    cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
86
             "function is well-behaved w.r.t. its isEqual predicate"));
87
88
//===----------------------------------------------------------------------===//
89
// SimpleValue
90
//===----------------------------------------------------------------------===//
91
92
namespace {
93
94
/// Struct representing the available values in the scoped hash table.
95
struct SimpleValue {
96
  Instruction *Inst;
97
98
150M
  SimpleValue(Instruction *I) : Inst(I) {
99
150M
    assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
100
150M
  }
101
102
416M
  bool isSentinel() const {
103
416M
    return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
104
416M
           
Inst == DenseMapInfo<Instruction *>::getTombstoneKey()264M
;
105
416M
  }
106
107
31.9M
  static bool canHandle(Instruction *Inst) {
108
31.9M
    // This can only handle non-void readnone functions.
109
31.9M
    if (CallInst *CI = dyn_cast<CallInst>(Inst))
110
4.20M
      return CI->doesNotAccessMemory() && 
!CI->getType()->isVoidTy()258k
;
111
27.7M
    return isa<CastInst>(Inst) || 
isa<BinaryOperator>(Inst)24.8M
||
112
27.7M
           
isa<GetElementPtrInst>(Inst)22.3M
||
isa<CmpInst>(Inst)17.3M
||
113
27.7M
           
isa<SelectInst>(Inst)12.5M
||
isa<ExtractElementInst>(Inst)12.3M
||
114
27.7M
           
isa<InsertElementInst>(Inst)12.3M
||
isa<ShuffleVectorInst>(Inst)12.2M
||
115
27.7M
           
isa<ExtractValueInst>(Inst)12.2M
||
isa<InsertValueInst>(Inst)12.1M
;
116
27.7M
  }
117
};
118
119
} // end anonymous namespace
120
121
namespace llvm {
122
123
template <> struct DenseMapInfo<SimpleValue> {
124
61.6M
  static inline SimpleValue getEmptyKey() {
125
61.6M
    return DenseMapInfo<Instruction *>::getEmptyKey();
126
61.6M
  }
127
128
60.6M
  static inline SimpleValue getTombstoneKey() {
129
60.6M
    return DenseMapInfo<Instruction *>::getTombstoneKey();
130
60.6M
  }
131
132
  static unsigned getHashValue(SimpleValue Val);
133
  static bool isEqual(SimpleValue LHS, SimpleValue RHS);
134
};
135
136
} // end namespace llvm
137
138
/// Match a 'select' including an optional 'not's of the condition.
139
static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
140
                                           Value *&B,
141
32.8M
                                           SelectPatternFlavor &Flavor) {
142
32.8M
  // Return false if V is not even a select.
143
32.8M
  if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
144
32.1M
    return false;
145
653k
146
653k
  // Look through a 'not' of the condition operand by swapping A/B.
147
653k
  Value *CondNot;
148
653k
  if (match(Cond, m_Not(m_Value(CondNot)))) {
149
220
    Cond = CondNot;
150
220
    std::swap(A, B);
151
220
  }
152
653k
153
653k
  // Set flavor if we find a match, or set it to unknown otherwise; in
154
653k
  // either case, return true to indicate that this is a select we can
155
653k
  // process.
156
653k
  if (auto *CmpI = dyn_cast<ICmpInst>(Cond))
157
570k
    Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor;
158
83.4k
  else
159
83.4k
    Flavor = SPF_UNKNOWN;
160
653k
161
653k
  return true;
162
653k
}
163
164
47.1M
static unsigned getHashValueImpl(SimpleValue Val) {
165
47.1M
  Instruction *Inst = Val.Inst;
166
47.1M
  // Hash in all of the operands as pointers.
167
47.1M
  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
168
7.53M
    Value *LHS = BinOp->getOperand(0);
169
7.53M
    Value *RHS = BinOp->getOperand(1);
170
7.53M
    if (BinOp->isCommutative() && 
BinOp->getOperand(0) > BinOp->getOperand(1)5.44M
)
171
3.61M
      std::swap(LHS, RHS);
172
7.53M
173
7.53M
    return hash_combine(BinOp->getOpcode(), LHS, RHS);
174
7.53M
  }
175
39.6M
176
39.6M
  if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
177
12.6M
    // Compares can be commuted by swapping the comparands and
178
12.6M
    // updating the predicate.  Choose the form that has the
179
12.6M
    // comparands in sorted order, or in the case of a tie, the
180
12.6M
    // one with the lower predicate.
181
12.6M
    Value *LHS = CI->getOperand(0);
182
12.6M
    Value *RHS = CI->getOperand(1);
183
12.6M
    CmpInst::Predicate Pred = CI->getPredicate();
184
12.6M
    CmpInst::Predicate SwappedPred = CI->getSwappedPredicate();
185
12.6M
    if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
186
8.55M
      std::swap(LHS, RHS);
187
8.55M
      Pred = SwappedPred;
188
8.55M
    }
189
12.6M
    return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
190
12.6M
  }
191
26.9M
192
26.9M
  // Hash general selects to allow matching commuted true/false operands.
193
26.9M
  SelectPatternFlavor SPF;
194
26.9M
  Value *Cond, *A, *B;
195
26.9M
  if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
196
604k
    // Hash min/max/abs (cmp + select) to allow for commuted operands.
197
604k
    // Min/max may also have non-canonical compare predicate (eg, the compare for
198
604k
    // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
199
604k
    // compare.
200
604k
    // TODO: We should also detect FP min/max.
201
604k
    if (SPF == SPF_SMIN || 
SPF == SPF_SMAX585k
||
202
604k
        
SPF == SPF_UMIN507k
||
SPF == SPF_UMAX490k
) {
203
195k
      if (A > B)
204
146k
        std::swap(A, B);
205
195k
      return hash_combine(Inst->getOpcode(), SPF, A, B);
206
195k
    }
207
408k
    if (SPF == SPF_ABS || 
SPF == SPF_NABS370k
) {
208
37.9k
      // ABS/NABS always puts the input in A and its negation in B.
209
37.9k
      return hash_combine(Inst->getOpcode(), SPF, A, B);
210
37.9k
    }
211
370k
212
370k
    // Hash general selects to allow matching commuted true/false operands.
213
370k
214
370k
    // If we do not have a compare as the condition, just hash in the condition.
215
370k
    CmpInst::Predicate Pred;
216
370k
    Value *X, *Y;
217
370k
    if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
218
59.6k
      return hash_combine(Inst->getOpcode(), Cond, A, B);
219
310k
220
310k
    // Similar to cmp normalization (above) - canonicalize the predicate value:
221
310k
    // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
222
310k
    if (CmpInst::getInversePredicate(Pred) < Pred) {
223
80.3k
      Pred = CmpInst::getInversePredicate(Pred);
224
80.3k
      std::swap(A, B);
225
80.3k
    }
226
310k
    return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
227
310k
  }
228
26.3M
229
26.3M
  if (CastInst *CI = dyn_cast<CastInst>(Inst))
230
8.46M
    return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
231
17.9M
232
17.9M
  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
233
376k
    return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
234
376k
                        hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
235
17.5M
236
17.5M
  if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
237
125k
    return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
238
125k
                        IVI->getOperand(1),
239
125k
                        hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
240
17.4M
241
17.4M
  assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
242
17.4M
          isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
243
17.4M
          isa<ShuffleVectorInst>(Inst)) &&
244
17.4M
         "Invalid/unknown instruction");
245
17.4M
246
17.4M
  // Mix in the opcode.
247
17.4M
  return hash_combine(
248
17.4M
      Inst->getOpcode(),
249
17.4M
      hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
250
17.4M
}
251
252
47.1M
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
253
#ifndef NDEBUG
254
  // If -earlycse-debug-hash was specified, return a constant -- this
255
  // will force all hashing to collide, so we'll exhaustively search
256
  // the table for a match, and the assertion in isEqual will fire if
257
  // there's a bug causing equal keys to hash differently.
258
  if (EarlyCSEDebugHash)
259
    return 0;
260
#endif
261
  return getHashValueImpl(Val);
262
47.1M
}
263
264
275M
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
265
275M
  Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
266
275M
267
275M
  if (LHS.isSentinel() || 
RHS.isSentinel()140M
)
268
232M
    return LHSI == RHSI;
269
43.4M
270
43.4M
  if (LHSI->getOpcode() != RHSI->getOpcode())
271
17.5M
    return false;
272
25.9M
  if (LHSI->isIdenticalToWhenDefined(RHSI))
273
18.3M
    return true;
274
7.63M
275
7.63M
  // If we're not strictly identical, we still might be a commutable instruction
276
7.63M
  if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
277
372k
    if (!LHSBinOp->isCommutative())
278
69.6k
      return false;
279
302k
280
302k
    assert(isa<BinaryOperator>(RHSI) &&
281
302k
           "same opcode, but different instruction type?");
282
302k
    BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
283
302k
284
302k
    // Commuted equality
285
302k
    return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
286
302k
           
LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0)2.11k
;
287
302k
  }
288
7.26M
  if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
289
1.45M
    assert(isa<CmpInst>(RHSI) &&
290
1.45M
           "same opcode, but different instruction type?");
291
1.45M
    CmpInst *RHSCmp = cast<CmpInst>(RHSI);
292
1.45M
    // Commuted equality
293
1.45M
    return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
294
1.45M
           
LHSCmp->getOperand(1) == RHSCmp->getOperand(0)3.43k
&&
295
1.45M
           
LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate()1.07k
;
296
1.45M
  }
297
5.80M
298
5.80M
  // Min/max/abs can occur with commuted operands, non-canonical predicates,
299
5.80M
  // and/or non-canonical operands.
300
5.80M
  // Selects can be non-trivially equivalent via inverted conditions and swaps.
301
5.80M
  SelectPatternFlavor LSPF, RSPF;
302
5.80M
  Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
303
5.80M
  if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
304
5.80M
      
matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)24.9k
) {
305
24.9k
    if (LSPF == RSPF) {
306
14.0k
      // TODO: We should also detect FP min/max.
307
14.0k
      if (LSPF == SPF_SMIN || 
LSPF == SPF_SMAX13.8k
||
308
14.0k
          
LSPF == SPF_UMIN12.6k
||
LSPF == SPF_UMAX12.5k
)
309
4.05k
        return ((LHSA == RHSA && 
LHSB == RHSB31
) ||
310
4.05k
                
(4.03k
LHSA == RHSB4.03k
&&
LHSB == RHSA46
));
311
10.0k
312
10.0k
      if (LSPF == SPF_ABS || 
LSPF == SPF_NABS8.20k
) {
313
1.83k
        // Abs results are placed in a defined order by matchSelectPattern.
314
1.83k
        return LHSA == RHSA && 
LHSB == RHSB12
;
315
1.83k
      }
316
8.19k
317
8.19k
      // select Cond, A, B <--> select not(Cond), B, A
318
8.19k
      if (CondL == CondR && 
LHSA == RHSA953
&&
LHSB == RHSB54
)
319
4
        return true;
320
19.0k
    }
321
19.0k
322
19.0k
    // If the true/false operands are swapped and the conditions are compares
323
19.0k
    // with inverted predicates, the selects are equal:
324
19.0k
    // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
325
19.0k
    //
326
19.0k
    // This also handles patterns with a double-negation in the sense of not +
327
19.0k
    // inverse, because we looked through a 'not' in the matching function and
328
19.0k
    // swapped A/B:
329
19.0k
    // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
330
19.0k
    //
331
19.0k
    // This intentionally does NOT handle patterns with a double-negation in
332
19.0k
    // the sense of not + not, because doing so could result in values
333
19.0k
    // comparing
334
19.0k
    // as equal that hash differently in the min/max/abs cases like:
335
19.0k
    // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
336
19.0k
    //   ^ hashes as min                  ^ would not hash as min
337
19.0k
    // In the context of the EarlyCSE pass, however, such cases never reach
338
19.0k
    // this code, as we simplify the double-negation before hashing the second
339
19.0k
    // select (and so still succeed at CSEing them).
340
19.0k
    if (LHSA == RHSB && 
LHSB == RHSA1.04k
) {
341
70
      CmpInst::Predicate PredL, PredR;
342
70
      Value *X, *Y;
343
70
      if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
344
70
          
match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y)))41
&&
345
70
          
CmpInst::getInversePredicate(PredL) == PredR34
)
346
16
        return true;
347
5.79M
    }
348
19.0k
  }
349
5.79M
350
5.79M
  return false;
351
5.79M
}
352
353
275M
bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
354
275M
  // These comparisons are nontrivial, so assert that equality implies
355
275M
  // hash equality (DenseMap demands this as an invariant).
356
275M
  bool Result = isEqualImpl(LHS, RHS);
357
275M
  assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
358
275M
         getHashValueImpl(LHS) == getHashValueImpl(RHS));
359
275M
  return Result;
360
275M
}
361
362
//===----------------------------------------------------------------------===//
363
// CallValue
364
//===----------------------------------------------------------------------===//
365
366
namespace {
367
368
/// Struct representing the available call values in the scoped hash
369
/// table.
370
struct CallValue {
371
  Instruction *Inst;
372
373
489k
  CallValue(Instruction *I) : Inst(I) {
374
489k
    assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
375
489k
  }
376
377
1.83M
  bool isSentinel() const {
378
1.83M
    return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
379
1.83M
           
Inst == DenseMapInfo<Instruction *>::getTombstoneKey()384k
;
380
1.83M
  }
381
382
12.7M
  static bool canHandle(Instruction *Inst) {
383
12.7M
    // Don't value number anything that returns void.
384
12.7M
    if (Inst->getType()->isVoidTy())
385
8.94M
      return false;
386
3.77M
387
3.77M
    CallInst *CI = dyn_cast<CallInst>(Inst);
388
3.77M
    if (!CI || 
!CI->onlyReadsMemory()2.27M
)
389
3.72M
      return false;
390
46.3k
    return true;
391
46.3k
  }
392
};
393
394
} // end anonymous namespace
395
396
namespace llvm {
397
398
template <> struct DenseMapInfo<CallValue> {
399
208k
  static inline CallValue getEmptyKey() {
400
208k
    return DenseMapInfo<Instruction *>::getEmptyKey();
401
208k
  }
402
403
188k
  static inline CallValue getTombstoneKey() {
404
188k
    return DenseMapInfo<Instruction *>::getTombstoneKey();
405
188k
  }
406
407
  static unsigned getHashValue(CallValue Val);
408
  static bool isEqual(CallValue LHS, CallValue RHS);
409
};
410
411
} // end namespace llvm
412
413
124k
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
414
124k
  Instruction *Inst = Val.Inst;
415
124k
  // Hash all of the operands as pointers and mix in the opcode.
416
124k
  return hash_combine(
417
124k
      Inst->getOpcode(),
418
124k
      hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
419
124k
}
420
421
1.64M
bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
422
1.64M
  Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
423
1.64M
  if (LHS.isSentinel() || 
RHS.isSentinel()193k
)
424
1.57M
    return LHSI == RHSI;
425
68.2k
  return LHSI->isIdenticalTo(RHSI);
426
68.2k
}
427
428
//===----------------------------------------------------------------------===//
429
// EarlyCSE implementation
430
//===----------------------------------------------------------------------===//
431
432
namespace {
433
434
/// A simple and fast domtree-based CSE pass.
435
///
436
/// This pass does a simple depth-first walk over the dominator tree,
437
/// eliminating trivially redundant instructions and using instsimplify to
438
/// canonicalize things as it goes. It is intended to be fast and catch obvious
439
/// cases so that instcombine and other passes are more effective. It is
440
/// expected that a later pass of GVN will catch the interesting/hard cases.
441
class EarlyCSE {
442
public:
443
  const TargetLibraryInfo &TLI;
444
  const TargetTransformInfo &TTI;
445
  DominatorTree &DT;
446
  AssumptionCache &AC;
447
  const SimplifyQuery SQ;
448
  MemorySSA *MSSA;
449
  std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
450
451
  using AllocatorTy =
452
      RecyclingAllocator<BumpPtrAllocator,
453
                         ScopedHashTableVal<SimpleValue, Value *>>;
454
  using ScopedHTType =
455
      ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
456
                      AllocatorTy>;
457
458
  /// A scoped hash table of the current values of all of our simple
459
  /// scalar expressions.
460
  ///
461
  /// As we walk down the domtree, we look to see if instructions are in this:
462
  /// if so, we replace them with what we find, otherwise we insert them so
463
  /// that dominated values can succeed in their lookup.
464
  ScopedHTType AvailableValues;
465
466
  /// A scoped hash table of the current values of previously encountered
467
  /// memory locations.
468
  ///
469
  /// This allows us to get efficient access to dominating loads or stores when
470
  /// we have a fully redundant load.  In addition to the most recent load, we
471
  /// keep track of a generation count of the read, which is compared against
472
  /// the current generation count.  The current generation count is incremented
473
  /// after every possibly writing memory operation, which ensures that we only
474
  /// CSE loads with other loads that have no intervening store.  Ordering
475
  /// events (such as fences or atomic instructions) increment the generation
476
  /// count as well; essentially, we model these as writes to all possible
477
  /// locations.  Note that atomic and/or volatile loads and stores can be
478
  /// present the table; it is the responsibility of the consumer to inspect
479
  /// the atomicity/volatility if needed.
480
  struct LoadValue {
481
    Instruction *DefInst = nullptr;
482
    unsigned Generation = 0;
483
    int MatchingId = -1;
484
    bool IsAtomic = false;
485
486
4.19M
    LoadValue() = default;
487
    LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
488
              bool IsAtomic)
489
        : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
490
5.15M
          IsAtomic(IsAtomic) {}
491
  };
492
493
  using LoadMapAllocator =
494
      RecyclingAllocator<BumpPtrAllocator,
495
                         ScopedHashTableVal<Value *, LoadValue>>;
496
  using LoadHTType =
497
      ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
498
                      LoadMapAllocator>;
499
500
  LoadHTType AvailableLoads;
501
502
  // A scoped hash table mapping memory locations (represented as typed
503
  // addresses) to generation numbers at which that memory location became
504
  // (henceforth indefinitely) invariant.
505
  using InvariantMapAllocator =
506
      RecyclingAllocator<BumpPtrAllocator,
507
                         ScopedHashTableVal<MemoryLocation, unsigned>>;
508
  using InvariantHTType =
509
      ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
510
                      InvariantMapAllocator>;
511
  InvariantHTType AvailableInvariants;
512
513
  /// A scoped hash table of the current values of read-only call
514
  /// values.
515
  ///
516
  /// It uses the same generation count as loads.
517
  using CallHTType =
518
      ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
519
  CallHTType AvailableCalls;
520
521
  /// This is the current generation of the memory value.
522
  unsigned CurrentGeneration = 0;
523
524
  /// Set up the EarlyCSE runner for a particular function.
525
  EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
526
           const TargetTransformInfo &TTI, DominatorTree &DT,
527
           AssumptionCache &AC, MemorySSA *MSSA)
528
      : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
529
1.15M
        MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
530
531
  bool run();
532
533
private:
534
  unsigned ClobberCounter = 0;
535
  // Almost a POD, but needs to call the constructors for the scoped hash
536
  // tables so that a new scope gets pushed on. These are RAII so that the
537
  // scope gets popped when the NodeScope is destroyed.
538
  class NodeScope {
539
  public:
540
    NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
541
              InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
542
      : Scope(AvailableValues), LoadScope(AvailableLoads),
543
5.28M
        InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
544
    NodeScope(const NodeScope &) = delete;
545
    NodeScope &operator=(const NodeScope &) = delete;
546
547
  private:
548
    ScopedHTType::ScopeTy Scope;
549
    LoadHTType::ScopeTy LoadScope;
550
    InvariantHTType::ScopeTy InvariantScope;
551
    CallHTType::ScopeTy CallScope;
552
  };
553
554
  // Contains all the needed information to create a stack for doing a depth
555
  // first traversal of the tree. This includes scopes for values, loads, and
556
  // calls as well as the generation. There is a child iterator so that the
557
  // children do not need to be store separately.
558
  class StackNode {
559
  public:
560
    StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
561
              InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
562
              unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
563
              DomTreeNode::iterator end)
564
        : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
565
          EndIter(end),
566
          Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
567
                 AvailableCalls)
568
5.28M
          {}
569
    StackNode(const StackNode &) = delete;
570
    StackNode &operator=(const StackNode &) = delete;
571
572
    // Accessors.
573
14.7M
    unsigned currentGeneration() { return CurrentGeneration; }
574
4.13M
    unsigned childGeneration() { return ChildGeneration; }
575
5.28M
    void childGeneration(unsigned generation) { ChildGeneration = generation; }
576
5.28M
    DomTreeNode *node() { return Node; }
577
9.41M
    DomTreeNode::iterator childIter() { return ChildIter; }
578
579
4.13M
    DomTreeNode *nextChild() {
580
4.13M
      DomTreeNode *child = *ChildIter;
581
4.13M
      ++ChildIter;
582
4.13M
      return child;
583
4.13M
    }
584
585
9.41M
    DomTreeNode::iterator end() { return EndIter; }
586
14.7M
    bool isProcessed() { return Processed; }
587
5.28M
    void process() { Processed = true; }
588
589
  private:
590
    unsigned CurrentGeneration;
591
    unsigned ChildGeneration;
592
    DomTreeNode *Node;
593
    DomTreeNode::iterator ChildIter;
594
    DomTreeNode::iterator EndIter;
595
    NodeScope Scopes;
596
    bool Processed = false;
597
  };
598
599
  /// Wrapper class to handle memory instructions, including loads,
600
  /// stores and intrinsic loads and stores defined by the target.
601
  class ParseMemoryInst {
602
  public:
603
    ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
604
16.8M
      : Inst(Inst) {
605
16.8M
      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
606
488k
        if (TTI.getTgtMemIntrinsic(II, Info))
607
2.23k
          IsTargetMemInst = true;
608
16.8M
    }
609
610
5.34M
    bool isLoad() const {
611
5.34M
      if (IsTargetMemInst) 
return Info.ReadMem2.13k
;
612
5.34M
      return isa<LoadInst>(Inst);
613
5.34M
    }
614
615
4.21M
    bool isStore() const {
616
4.21M
      if (IsTargetMemInst) 
return Info.WriteMem1.13k
;
617
4.21M
      return isa<StoreInst>(Inst);
618
4.21M
    }
619
620
5.79M
    bool isAtomic() const {
621
5.79M
      if (IsTargetMemInst)
622
2.15k
        return Info.Ordering != AtomicOrdering::NotAtomic;
623
5.78M
      return Inst->isAtomic();
624
5.78M
    }
625
626
5.94M
    bool isUnordered() const {
627
5.94M
      if (IsTargetMemInst)
628
2.22k
        return Info.isUnordered();
629
5.93M
630
5.93M
      if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
631
3.82M
        return LI->isUnordered();
632
3.82M
      } else 
if (StoreInst *2.11M
SI2.11M
= dyn_cast<StoreInst>(Inst)) {
633
2.11M
        return SI->isUnordered();
634
2.11M
      }
635
18.4E
      // Conservative answer
636
18.4E
      return !Inst->isAtomic();
637
18.4E
    }
638
639
5.96M
    bool isVolatile() const {
640
5.96M
      if (IsTargetMemInst)
641
2.22k
        return Info.IsVolatile;
642
5.95M
643
5.95M
      if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
644
3.88M
        return LI->isVolatile();
645
3.88M
      } else 
if (StoreInst *2.06M
SI2.06M
= dyn_cast<StoreInst>(Inst)) {
646
2.06M
        return SI->isVolatile();
647
2.06M
      }
648
0
      // Conservative answer
649
0
      return true;
650
0
    }
651
652
3.23M
    bool isInvariantLoad() const {
653
3.23M
      if (auto *LI = dyn_cast<LoadInst>(Inst))
654
3.23M
        return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
655
1.56k
      return false;
656
1.56k
    }
657
658
857k
    bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
659
857k
      return (getPointerOperand() == Inst.getPointerOperand() &&
660
857k
              
getMatchingId() == Inst.getMatchingId()2.89k
);
661
857k
    }
662
663
38.7M
    bool isValid() const { return getPointerOperand() != nullptr; }
664
665
    // For regular (non-intrinsic) loads/stores, this is set to -1. For
666
    // intrinsic loads/stores, the id is retrieved from the corresponding
667
    // field in the MemIntrinsicInfo structure.  That field contains
668
    // non-negative values only.
669
5.82M
    int getMatchingId() const {
670
5.82M
      if (IsTargetMemInst) 
return Info.MatchingId2.17k
;
671
5.82M
      return -1;
672
5.82M
    }
673
674
50.9M
    Value *getPointerOperand() const {
675
50.9M
      if (IsTargetMemInst) 
return Info.PtrVal8.23k
;
676
50.9M
      return getLoadStorePointerOperand(Inst);
677
50.9M
    }
678
679
46.0k
    bool mayReadFromMemory() const {
680
46.0k
      if (IsTargetMemInst) 
return Info.ReadMem568
;
681
45.4k
      return Inst->mayReadFromMemory();
682
45.4k
    }
683
684
0
    bool mayWriteToMemory() const {
685
0
      if (IsTargetMemInst) return Info.WriteMem;
686
0
      return Inst->mayWriteToMemory();
687
0
    }
688
689
  private:
690
    bool IsTargetMemInst = false;
691
    MemIntrinsicInfo Info;
692
    Instruction *Inst;
693
  };
694
695
  bool processNode(DomTreeNode *Node);
696
697
  bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
698
                             const BasicBlock *BB, const BasicBlock *Pred);
699
700
674k
  Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
701
674k
    if (auto *LI = dyn_cast<LoadInst>(Inst))
702
149k
      return LI;
703
524k
    if (auto *SI = dyn_cast<StoreInst>(Inst))
704
524k
      return SI->getValueOperand();
705
90
    assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
706
90
    return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
707
90
                                                 ExpectedType);
708
90
  }
709
710
  /// Return true if the instruction is known to only operate on memory
711
  /// provably invariant in the given "generation".
712
  bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
713
714
  bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
715
                           Instruction *EarlierInst, Instruction *LaterInst);
716
717
2.29M
  void removeMSSA(Instruction *Inst) {
718
2.29M
    if (!MSSA)
719
1.87M
      return;
720
422k
    if (VerifyMemorySSA)
721
0
      MSSA->verifyMemorySSA();
722
422k
    // Removing a store here can leave MemorySSA in an unoptimized state by
723
422k
    // creating MemoryPhis that have identical arguments and by creating
724
422k
    // MemoryUses whose defining access is not an actual clobber. The phi case
725
422k
    // is handled by MemorySSA when passing OptimizePhis = true to
726
422k
    // removeMemoryAccess.  The non-optimized MemoryUse case is lazily updated
727
422k
    // by MemorySSA's getClobberingMemoryAccess.
728
422k
    MSSAUpdater->removeMemoryAccess(Inst, true);
729
422k
  }
730
};
731
732
} // end anonymous namespace
733
734
/// Determine if the memory referenced by LaterInst is from the same heap
735
/// version as EarlierInst.
736
/// This is currently called in two scenarios:
737
///
738
///   load p
739
///   ...
740
///   load p
741
///
742
/// and
743
///
744
///   x = load p
745
///   ...
746
///   store x, p
747
///
748
/// in both cases we want to verify that there are no possible writes to the
749
/// memory referenced by p between the earlier and later instruction.
750
bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
751
                                   unsigned LaterGeneration,
752
                                   Instruction *EarlierInst,
753
652k
                                   Instruction *LaterInst) {
754
652k
  // Check the simple memory generation tracking first.
755
652k
  if (EarlierGeneration == LaterGeneration)
756
123k
    return true;
757
528k
758
528k
  if (!MSSA)
759
225k
    return false;
760
303k
761
303k
  // If MemorySSA has determined that one of EarlierInst or LaterInst does not
762
303k
  // read/write memory, then we can safely return true here.
763
303k
  // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
764
303k
  // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
765
303k
  // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
766
303k
  // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
767
303k
  // with the default optimization pipeline.
768
303k
  auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
769
303k
  if (!EarlierMA)
770
1
    return true;
771
303k
  auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
772
303k
  if (!LaterMA)
773
0
    return true;
774
303k
775
303k
  // Since we know LaterDef dominates LaterInst and EarlierInst dominates
776
303k
  // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
777
303k
  // EarlierInst and LaterInst and neither can any other write that potentially
778
303k
  // clobbers LaterInst.
779
303k
  MemoryAccess *LaterDef;
780
303k
  if (ClobberCounter < EarlyCSEMssaOptCap) {
781
301k
    LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
782
301k
    ClobberCounter++;
783
301k
  } else
784
2.00k
    LaterDef = LaterMA->getDefiningAccess();
785
303k
786
303k
  return MSSA->dominates(LaterDef, EarlierMA);
787
303k
}
788
789
650k
bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
790
650k
  // A location loaded from with an invariant_load is assumed to *never* change
791
650k
  // within the visible scope of the compilation.
792
650k
  if (auto *LI = dyn_cast<LoadInst>(I))
793
638k
    if (LI->getMetadata(LLVMContext::MD_invariant_load))
794
105
      return true;
795
650k
796
650k
  auto MemLocOpt = MemoryLocation::getOrNone(I);
797
650k
  if (!MemLocOpt)
798
89
    // "target" intrinsic forms of loads aren't currently known to
799
89
    // MemoryLocation::get.  TODO
800
89
    return false;
801
650k
  MemoryLocation MemLoc = *MemLocOpt;
802
650k
  if (!AvailableInvariants.count(MemLoc))
803
650k
    return false;
804
36
805
36
  // Is the generation at which this became invariant older than the
806
36
  // current one?
807
36
  return AvailableInvariants.lookup(MemLoc) <= GenAt;
808
36
}
809
810
bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
811
                                     const BranchInst *BI, const BasicBlock *BB,
812
2.29M
                                     const BasicBlock *Pred) {
813
2.29M
  assert(BI->isConditional() && "Should be a conditional branch!");
814
2.29M
  assert(BI->getCondition() == CondInst && "Wrong condition?");
815
2.29M
  assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
816
2.29M
  auto *TorF = (BI->getSuccessor(0) == BB)
817
2.29M
                   ? 
ConstantInt::getTrue(BB->getContext())1.19M
818
2.29M
                   : 
ConstantInt::getFalse(BB->getContext())1.10M
;
819
2.45M
  auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
820
2.45M
    if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
821
145k
      return BOp->getOpcode() == Opcode;
822
2.30M
    return false;
823
2.30M
  };
824
2.29M
  // If the condition is AND operation, we can propagate its operands into the
825
2.29M
  // true branch. If it is OR operation, we can propagate them into the false
826
2.29M
  // branch.
827
2.29M
  unsigned PropagateOpcode =
828
2.29M
      (BI->getSuccessor(0) == BB) ? 
Instruction::And1.19M
:
Instruction::Or1.10M
;
829
2.29M
830
2.29M
  bool MadeChanges = false;
831
2.29M
  SmallVector<Instruction *, 4> WorkList;
832
2.29M
  SmallPtrSet<Instruction *, 4> Visited;
833
2.29M
  WorkList.push_back(CondInst);
834
4.74M
  while (!WorkList.empty()) {
835
2.45M
    Instruction *Curr = WorkList.pop_back_val();
836
2.45M
837
2.45M
    AvailableValues.insert(Curr, TorF);
838
2.45M
    LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
839
2.45M
                      << Curr->getName() << "' as " << *TorF << " in "
840
2.45M
                      << BB->getName() << "\n");
841
2.45M
    if (!DebugCounter::shouldExecute(CSECounter)) {
842
0
      LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
843
2.45M
    } else {
844
2.45M
      // Replace all dominated uses with the known value.
845
2.45M
      if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
846
2.78k
                                                    BasicBlockEdge(Pred, BB))) {
847
2.78k
        NumCSECVP += Count;
848
2.78k
        MadeChanges = true;
849
2.78k
      }
850
2.45M
    }
851
2.45M
852
2.45M
    if (MatchBinOp(Curr, PropagateOpcode))
853
81.3k
      for (auto &Op : cast<BinaryOperator>(Curr)->operands())
854
162k
        if (Instruction *OPI = dyn_cast<Instruction>(Op))
855
162k
          if (SimpleValue::canHandle(OPI) && 
Visited.insert(OPI).second161k
)
856
161k
            WorkList.push_back(OPI);
857
2.45M
  }
858
2.29M
859
2.29M
  return MadeChanges;
860
2.29M
}
861
862
5.28M
bool EarlyCSE::processNode(DomTreeNode *Node) {
863
5.28M
  bool Changed = false;
864
5.28M
  BasicBlock *BB = Node->getBlock();
865
5.28M
866
5.28M
  // If this block has a single predecessor, then the predecessor is the parent
867
5.28M
  // of the domtree node and all of the live out memory values are still current
868
5.28M
  // in this block.  If this block has multiple predecessors, then they could
869
5.28M
  // have invalidated the live-out memory values of our parent value.  For now,
870
5.28M
  // just be conservative and invalidate memory if this block has multiple
871
5.28M
  // predecessors.
872
5.28M
  if (!BB->getSinglePredecessor())
873
2.65M
    ++CurrentGeneration;
874
5.28M
875
5.28M
  // If this node has a single predecessor which ends in a conditional branch,
876
5.28M
  // we can infer the value of the branch condition given that we took this
877
5.28M
  // path.  We need the single predecessor to ensure there's not another path
878
5.28M
  // which reaches this block where the condition might hold a different
879
5.28M
  // value.  Since we're adding this to the scoped hash table (like any other
880
5.28M
  // def), it will have been popped if we encounter a future merge block.
881
5.28M
  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
882
2.63M
    auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
883
2.63M
    if (BI && 
BI->isConditional()2.46M
) {
884
2.44M
      auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
885
2.44M
      if (CondInst && 
SimpleValue::canHandle(CondInst)2.39M
)
886
2.29M
        Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
887
2.44M
    }
888
2.63M
  }
889
5.28M
890
5.28M
  /// LastStore - Keep track of the last non-volatile store that we saw... for
891
5.28M
  /// as long as there in no instruction that reads memory.  If we see a store
892
5.28M
  /// to the same location, we delete the dead store.  This zaps trivial dead
893
5.28M
  /// stores which can occur in bitfield code among other things.
894
5.28M
  Instruction *LastStore = nullptr;
895
5.28M
896
5.28M
  // See if any instructions in the block can be eliminated.  If so, do it.  If
897
5.28M
  // not, add them to AvailableValues.
898
35.9M
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
899
30.6M
    Instruction *Inst = &*I++;
900
30.6M
901
30.6M
    // Dead instructions should just be removed.
902
30.6M
    if (isInstructionTriviallyDead(Inst, &TLI)) {
903
1.00M
      LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
904
1.00M
      if (!DebugCounter::shouldExecute(CSECounter)) {
905
0
        LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
906
0
        continue;
907
0
      }
908
1.00M
      if (!salvageDebugInfo(*Inst))
909
1.00M
        replaceDbgUsesWithUndef(Inst);
910
1.00M
      removeMSSA(Inst);
911
1.00M
      Inst->eraseFromParent();
912
1.00M
      Changed = true;
913
1.00M
      ++NumSimplify;
914
1.00M
      continue;
915
1.00M
    }
916
29.6M
917
29.6M
    // Skip assume intrinsics, they don't really have side effects (although
918
29.6M
    // they're marked as such to ensure preservation of control dependencies),
919
29.6M
    // and this pass will not bother with its removal. However, we should mark
920
29.6M
    // its condition as true for all dominated blocks.
921
29.6M
    if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
922
94
      auto *CondI =
923
94
          dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
924
94
      if (CondI && 
SimpleValue::canHandle(CondI)81
) {
925
81
        LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
926
81
                          << '\n');
927
81
        AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
928
81
      } else
929
94
        LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
930
94
      continue;
931
94
    }
932
29.6M
933
29.6M
    // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
934
29.6M
    if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
935
2
      LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
936
2
      continue;
937
2
    }
938
29.6M
939
29.6M
    // We can skip all invariant.start intrinsics since they only read memory,
940
29.6M
    // and we can forward values across it. For invariant starts without
941
29.6M
    // invariant ends, we can use the fact that the invariantness never ends to
942
29.6M
    // start a scope in the current generaton which is true for all future
943
29.6M
    // generations.  Also, we dont need to consume the last store since the
944
29.6M
    // semantics of invariant.start allow us to perform   DSE of the last
945
29.6M
    // store, if there was a store following invariant.start. Consider:
946
29.6M
    //
947
29.6M
    // store 30, i8* p
948
29.6M
    // invariant.start(p)
949
29.6M
    // store 40, i8* p
950
29.6M
    // We can DSE the store to 30, since the store 40 to invariant location p
951
29.6M
    // causes undefined behaviour.
952
29.6M
    if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
953
263
      // If there are any uses, the scope might end.
954
263
      if (!Inst->use_empty())
955
6
        continue;
956
257
      auto *CI = cast<CallInst>(Inst);
957
257
      MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
958
257
      // Don't start a scope if we already have a better one pushed
959
257
      if (!AvailableInvariants.count(MemLoc))
960
255
        AvailableInvariants.insert(MemLoc, CurrentGeneration);
961
257
      continue;
962
257
    }
963
29.6M
964
29.6M
    if (isGuard(Inst)) {
965
76
      if (auto *CondI =
966
68
              dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
967
68
        if (SimpleValue::canHandle(CondI)) {
968
68
          // Do we already know the actual value of this condition?
969
68
          if (auto *KnownCond = AvailableValues.lookup(CondI)) {
970
68
            // Is the condition known to be true?
971
68
            if (isa<ConstantInt>(KnownCond) &&
972
68
                
cast<ConstantInt>(KnownCond)->isOne()42
) {
973
42
              LLVM_DEBUG(dbgs()
974
42
                         << "EarlyCSE removing guard: " << *Inst << '\n');
975
42
              removeMSSA(Inst);
976
42
              Inst->eraseFromParent();
977
42
              Changed = true;
978
42
              continue;
979
42
            } else
980
26
              // Use the known value if it wasn't true.
981
26
              cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
982
68
          }
983
68
          // The condition we're on guarding here is true for all dominated
984
68
          // locations.
985
68
          AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
986
26
        }
987
68
      }
988
76
989
76
      // Guard intrinsics read all memory, but don't write any memory.
990
76
      // Accordingly, don't update the generation but consume the last store (to
991
76
      // avoid an incorrect DSE).
992
76
      LastStore = nullptr;
993
34
      continue;
994
29.6M
    }
995
29.6M
996
29.6M
    // If the instruction can be simplified (e.g. X+0 = X) then replace it with
997
29.6M
    // its simpler value.
998
29.6M
    if (Value *V = SimplifyInstruction(Inst, SQ)) {
999
266k
      LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V
1000
266k
                        << '\n');
1001
266k
      if (!DebugCounter::shouldExecute(CSECounter)) {
1002
0
        LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1003
266k
      } else {
1004
266k
        bool Killed = false;
1005
266k
        if (!Inst->use_empty()) {
1006
266k
          Inst->replaceAllUsesWith(V);
1007
266k
          Changed = true;
1008
266k
        }
1009
266k
        if (isInstructionTriviallyDead(Inst, &TLI)) {
1010
266k
          removeMSSA(Inst);
1011
266k
          Inst->eraseFromParent();
1012
266k
          Changed = true;
1013
266k
          Killed = true;
1014
266k
        }
1015
266k
        if (Changed)
1016
266k
          ++NumSimplify;
1017
266k
        if (Killed)
1018
266k
          continue;
1019
29.3M
      }
1020
266k
    }
1021
29.3M
1022
29.3M
    // If this is a simple instruction that we can value number, process it.
1023
29.3M
    if (SimpleValue::canHandle(Inst)) {
1024
13.4M
      // See if the instruction has an available value.  If so, use it.
1025
13.4M
      if (Value *V = AvailableValues.lookup(Inst)) {
1026
836k
        LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V
1027
836k
                          << '\n');
1028
836k
        if (!DebugCounter::shouldExecute(CSECounter)) {
1029
0
          LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1030
0
          continue;
1031
0
        }
1032
836k
        if (auto *I = dyn_cast<Instruction>(V))
1033
828k
          I->andIRFlags(Inst);
1034
836k
        Inst->replaceAllUsesWith(V);
1035
836k
        removeMSSA(Inst);
1036
836k
        Inst->eraseFromParent();
1037
836k
        Changed = true;
1038
836k
        ++NumCSE;
1039
836k
        continue;
1040
836k
      }
1041
12.5M
1042
12.5M
      // Otherwise, just remember that this value is available.
1043
12.5M
      AvailableValues.insert(Inst, Inst);
1044
12.5M
      continue;
1045
12.5M
    }
1046
15.9M
1047
15.9M
    ParseMemoryInst MemInst(Inst, TTI);
1048
15.9M
    // If this is a non-volatile load, process it.
1049
15.9M
    if (MemInst.isValid() && 
MemInst.isLoad()5.34M
) {
1050
3.23M
      // (conservatively) we can't peak past the ordering implied by this
1051
3.23M
      // operation, but we can add this load to our set of available values
1052
3.23M
      if (MemInst.isVolatile() || 
!MemInst.isUnordered()3.18M
) {
1053
47.7k
        LastStore = nullptr;
1054
47.7k
        ++CurrentGeneration;
1055
47.7k
      }
1056
3.23M
1057
3.23M
      if (MemInst.isInvariantLoad()) {
1058
792
        // If we pass an invariant load, we know that memory location is
1059
792
        // indefinitely constant from the moment of first dereferenceability.
1060
792
        // We conservatively treat the invariant_load as that moment.  If we
1061
792
        // pass a invariant load after already establishing a scope, don't
1062
792
        // restart it since we want to preserve the earliest point seen.
1063
792
        auto MemLoc = MemoryLocation::get(Inst);
1064
792
        if (!AvailableInvariants.count(MemLoc))
1065
691
          AvailableInvariants.insert(MemLoc, CurrentGeneration);
1066
792
      }
1067
3.23M
1068
3.23M
      // If we have an available version of this load, and if it is the right
1069
3.23M
      // generation or the load is known to be from an invariant location,
1070
3.23M
      // replace this instruction.
1071
3.23M
      //
1072
3.23M
      // If either the dominating load or the current load are invariant, then
1073
3.23M
      // we can assume the current load loads the same value as the dominating
1074
3.23M
      // load.
1075
3.23M
      LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1076
3.23M
      if (InVal.DefInst != nullptr &&
1077
3.23M
          
InVal.MatchingId == MemInst.getMatchingId()657k
&&
1078
3.23M
          // We don't yet handle removing loads with ordering of any kind.
1079
3.23M
          
!MemInst.isVolatile()657k
&&
MemInst.isUnordered()638k
&&
1080
3.23M
          // We can't replace an atomic load with one which isn't also atomic.
1081
3.23M
          
InVal.IsAtomic >= MemInst.isAtomic()638k
&&
1082
3.23M
          
(638k
isOperatingOnInvariantMemAt(Inst, InVal.Generation)638k
||
1083
638k
           isSameMemGeneration(InVal.Generation, CurrentGeneration,
1084
638k
                               InVal.DefInst, Inst))) {
1085
184k
        Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
1086
184k
        if (Op != nullptr) {
1087
184k
          LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
1088
184k
                            << "  to: " << *InVal.DefInst << '\n');
1089
184k
          if (!DebugCounter::shouldExecute(CSECounter)) {
1090
0
            LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1091
0
            continue;
1092
0
          }
1093
184k
          if (!Inst->use_empty())
1094
184k
            Inst->replaceAllUsesWith(Op);
1095
184k
          removeMSSA(Inst);
1096
184k
          Inst->eraseFromParent();
1097
184k
          Changed = true;
1098
184k
          ++NumCSELoad;
1099
184k
          continue;
1100
184k
        }
1101
184k
      }
1102
3.04M
1103
3.04M
      // Otherwise, remember that we have this instruction.
1104
3.04M
      AvailableLoads.insert(
1105
3.04M
          MemInst.getPointerOperand(),
1106
3.04M
          LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1107
3.04M
                    MemInst.isAtomic()));
1108
3.04M
      LastStore = nullptr;
1109
3.04M
      continue;
1110
3.04M
    }
1111
12.7M
1112
12.7M
    // If this instruction may read from memory or throw (and potentially read
1113
12.7M
    // from memory in the exception handler), forget LastStore.  Load/store
1114
12.7M
    // intrinsics will indicate both a read and a write to memory.  The target
1115
12.7M
    // may override this (e.g. so that a store intrinsic does not read from
1116
12.7M
    // memory, and thus will be treated the same as a regular store for
1117
12.7M
    // commoning purposes).
1118
12.7M
    if ((Inst->mayReadFromMemory() || 
Inst->mayThrow()8.66M
) &&
1119
12.7M
        
!(4.06M
MemInst.isValid()4.06M
&&
!MemInst.mayReadFromMemory()46.0k
))
1120
4.06M
      LastStore = nullptr;
1121
12.7M
1122
12.7M
    // If this is a read-only call, process it.
1123
12.7M
    if (CallValue::canHandle(Inst)) {
1124
46.3k
      // If we have an available version of this call, and if it is the right
1125
46.3k
      // generation, replace this instruction.
1126
46.3k
      std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1127
46.3k
      if (InVal.first != nullptr &&
1128
46.3k
          isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1129
2.02k
                              Inst)) {
1130
99
        LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1131
99
                          << "  to: " << *InVal.first << '\n');
1132
99
        if (!DebugCounter::shouldExecute(CSECounter)) {
1133
0
          LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1134
0
          continue;
1135
0
        }
1136
99
        if (!Inst->use_empty())
1137
98
          Inst->replaceAllUsesWith(InVal.first);
1138
99
        removeMSSA(Inst);
1139
99
        Inst->eraseFromParent();
1140
99
        Changed = true;
1141
99
        ++NumCSECall;
1142
99
        continue;
1143
99
      }
1144
46.2k
1145
46.2k
      // Otherwise, remember that we have this instruction.
1146
46.2k
      AvailableCalls.insert(
1147
46.2k
          Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1148
46.2k
      continue;
1149
46.2k
    }
1150
12.6M
1151
12.6M
    // A release fence requires that all stores complete before it, but does
1152
12.6M
    // not prevent the reordering of following loads 'before' the fence.  As a
1153
12.6M
    // result, we don't need to consider it as writing to memory and don't need
1154
12.6M
    // to advance the generation.  We do need to prevent DSE across the fence,
1155
12.6M
    // but that's handled above.
1156
12.6M
    if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1157
14.5k
      if (FI->getOrdering() == AtomicOrdering::Release) {
1158
857
        assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1159
857
        continue;
1160
857
      }
1161
12.6M
1162
12.6M
    // write back DSE - If we write back the same value we just loaded from
1163
12.6M
    // the same location and haven't passed any intervening writes or ordering
1164
12.6M
    // operations, we can remove the write.  The primary benefit is in allowing
1165
12.6M
    // the available load table to remain valid and value forward past where
1166
12.6M
    // the store originally was.
1167
12.6M
    if (MemInst.isValid() && 
MemInst.isStore()2.10M
) {
1168
2.10M
      LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1169
2.10M
      if (InVal.DefInst &&
1170
2.10M
          
InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType())489k
&&
1171
2.10M
          
InVal.MatchingId == MemInst.getMatchingId()12.6k
&&
1172
2.10M
          // We don't yet handle removing stores with ordering of any kind.
1173
2.10M
          
!MemInst.isVolatile()12.5k
&&
MemInst.isUnordered()12.4k
&&
1174
2.10M
          
(12.4k
isOperatingOnInvariantMemAt(Inst, InVal.Generation)12.4k
||
1175
12.4k
           isSameMemGeneration(InVal.Generation, CurrentGeneration,
1176
12.4k
                               InVal.DefInst, Inst))) {
1177
6.39k
        // It is okay to have a LastStore to a different pointer here if MemorySSA
1178
6.39k
        // tells us that the load and store are from the same memory generation.
1179
6.39k
        // In that case, LastStore should keep its present value since we're
1180
6.39k
        // removing the current store.
1181
6.39k
        assert((!LastStore ||
1182
6.39k
                ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1183
6.39k
                    MemInst.getPointerOperand() ||
1184
6.39k
                MSSA) &&
1185
6.39k
               "can't have an intervening store if not using MemorySSA!");
1186
6.39k
        LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1187
6.39k
        if (!DebugCounter::shouldExecute(CSECounter)) {
1188
0
          LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1189
0
          continue;
1190
0
        }
1191
6.39k
        removeMSSA(Inst);
1192
6.39k
        Inst->eraseFromParent();
1193
6.39k
        Changed = true;
1194
6.39k
        ++NumDSE;
1195
6.39k
        // We can avoid incrementing the generation count since we were able
1196
6.39k
        // to eliminate this store.
1197
6.39k
        continue;
1198
6.39k
      }
1199
2.10M
    }
1200
12.6M
1201
12.6M
    // Okay, this isn't something we can CSE at all.  Check to see if it is
1202
12.6M
    // something that could modify memory.  If so, our available memory values
1203
12.6M
    // cannot be used so bump the generation count.
1204
12.6M
    if (Inst->mayWriteToMemory()) {
1205
6.06M
      ++CurrentGeneration;
1206
6.06M
1207
6.06M
      if (MemInst.isValid() && 
MemInst.isStore()2.10M
) {
1208
2.10M
        // We do a trivial form of DSE if there are two stores to the same
1209
2.10M
        // location with no intervening loads.  Delete the earlier store.
1210
2.10M
        // At the moment, we don't remove ordered stores, but do remove
1211
2.10M
        // unordered atomic stores.  There's no special requirement (for
1212
2.10M
        // unordered atomics) about removing atomic stores only in favor of
1213
2.10M
        // other atomic stores since we were going to execute the non-atomic
1214
2.10M
        // one anyway and the atomic one might never have become visible.
1215
2.10M
        if (LastStore) {
1216
857k
          ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1217
857k
          assert(LastStoreMemInst.isUnordered() &&
1218
857k
                 !LastStoreMemInst.isVolatile() &&
1219
857k
                 "Violated invariant");
1220
857k
          if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1221
2.89k
            LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1222
2.89k
                              << "  due to: " << *Inst << '\n');
1223
2.89k
            if (!DebugCounter::shouldExecute(CSECounter)) {
1224
0
              LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1225
2.89k
            } else {
1226
2.89k
              removeMSSA(LastStore);
1227
2.89k
              LastStore->eraseFromParent();
1228
2.89k
              Changed = true;
1229
2.89k
              ++NumDSE;
1230
2.89k
              LastStore = nullptr;
1231
2.89k
            }
1232
2.89k
          }
1233
857k
          // fallthrough - we can exploit information about this store
1234
857k
        }
1235
2.10M
1236
2.10M
        // Okay, we just invalidated anything we knew about loaded values.  Try
1237
2.10M
        // to salvage *something* by remembering that the stored value is a live
1238
2.10M
        // version of the pointer.  It is safe to forward from volatile stores
1239
2.10M
        // to non-volatile loads, so we don't have to check for volatility of
1240
2.10M
        // the store.
1241
2.10M
        AvailableLoads.insert(
1242
2.10M
            MemInst.getPointerOperand(),
1243
2.10M
            LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1244
2.10M
                      MemInst.isAtomic()));
1245
2.10M
1246
2.10M
        // Remember that this was the last unordered store we saw for DSE. We
1247
2.10M
        // don't yet handle DSE on ordered or volatile stores since we don't
1248
2.10M
        // have a good way to model the ordering requirement for following
1249
2.10M
        // passes  once the store is removed.  We could insert a fence, but
1250
2.10M
        // since fences are slightly stronger than stores in their ordering,
1251
2.10M
        // it's not clear this is a profitable transform. Another option would
1252
2.10M
        // be to merge the ordering with that of the post dominating store.
1253
2.10M
        if (MemInst.isUnordered() && 
!MemInst.isVolatile()2.05M
)
1254
2.05M
          LastStore = Inst;
1255
45.4k
        else
1256
45.4k
          LastStore = nullptr;
1257
2.10M
      }
1258
6.06M
    }
1259
12.6M
  }
1260
5.28M
1261
5.28M
  return Changed;
1262
5.28M
}
1263
1264
1.15M
bool EarlyCSE::run() {
1265
1.15M
  // Note, deque is being used here because there is significant performance
1266
1.15M
  // gains over vector when the container becomes very large due to the
1267
1.15M
  // specific access patterns. For more information see the mailing list
1268
1.15M
  // discussion on this:
1269
1.15M
  // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1270
1.15M
  std::deque<StackNode *> nodesToProcess;
1271
1.15M
1272
1.15M
  bool Changed = false;
1273
1.15M
1274
1.15M
  // Process the root node.
1275
1.15M
  nodesToProcess.push_back(new StackNode(
1276
1.15M
      AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1277
1.15M
      CurrentGeneration, DT.getRootNode(),
1278
1.15M
      DT.getRootNode()->begin(), DT.getRootNode()->end()));
1279
1.15M
1280
1.15M
  assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1281
1.15M
1282
1.15M
  // Process the stack.
1283
15.8M
  while (!nodesToProcess.empty()) {
1284
14.7M
    // Grab the first item off the stack. Set the current generation, remove
1285
14.7M
    // the node from the stack, and process it.
1286
14.7M
    StackNode *NodeToProcess = nodesToProcess.back();
1287
14.7M
1288
14.7M
    // Initialize class members.
1289
14.7M
    CurrentGeneration = NodeToProcess->currentGeneration();
1290
14.7M
1291
14.7M
    // Check if the node needs to be processed.
1292
14.7M
    if (!NodeToProcess->isProcessed()) {
1293
5.28M
      // Process the node.
1294
5.28M
      Changed |= processNode(NodeToProcess->node());
1295
5.28M
      NodeToProcess->childGeneration(CurrentGeneration);
1296
5.28M
      NodeToProcess->process();
1297
9.41M
    } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1298
4.13M
      // Push the next child onto the stack.
1299
4.13M
      DomTreeNode *child = NodeToProcess->nextChild();
1300
4.13M
      nodesToProcess.push_back(
1301
4.13M
          new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1302
4.13M
                        AvailableCalls, NodeToProcess->childGeneration(),
1303
4.13M
                        child, child->begin(), child->end()));
1304
5.28M
    } else {
1305
5.28M
      // It has been processed, and there are no more children to process,
1306
5.28M
      // so delete it and pop it off the stack.
1307
5.28M
      delete NodeToProcess;
1308
5.28M
      nodesToProcess.pop_back();
1309
5.28M
    }
1310
14.7M
  } // while (!nodes...)
1311
1.15M
1312
1.15M
  return Changed;
1313
1.15M
}
1314
1315
PreservedAnalyses EarlyCSEPass::run(Function &F,
1316
2.40k
                                    FunctionAnalysisManager &AM) {
1317
2.40k
  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1318
2.40k
  auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1319
2.40k
  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1320
2.40k
  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1321
2.40k
  auto *MSSA =
1322
2.40k
      UseMemorySSA ? 
&AM.getResult<MemorySSAAnalysis>(F).getMSSA()1.17k
:
nullptr1.23k
;
1323
2.40k
1324
2.40k
  EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1325
2.40k
1326
2.40k
  if (!CSE.run())
1327
1.91k
    return PreservedAnalyses::all();
1328
489
1329
489
  PreservedAnalyses PA;
1330
489
  PA.preserveSet<CFGAnalyses>();
1331
489
  PA.preserve<GlobalsAA>();
1332
489
  if (UseMemorySSA)
1333
61
    PA.preserve<MemorySSAAnalysis>();
1334
489
  return PA;
1335
489
}
1336
1337
namespace {
1338
1339
/// A simple and fast domtree-based CSE pass.
1340
///
1341
/// This pass does a simple depth-first walk over the dominator tree,
1342
/// eliminating trivially redundant instructions and using instsimplify to
1343
/// canonicalize things as it goes. It is intended to be fast and catch obvious
1344
/// cases so that instcombine and other passes are more effective. It is
1345
/// expected that a later pass of GVN will catch the interesting/hard cases.
1346
template<bool UseMemorySSA>
1347
class EarlyCSELegacyCommonPass : public FunctionPass {
1348
public:
1349
  static char ID;
1350
1351
36.1k
  EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1352
36.1k
    if (UseMemorySSA)
1353
12.9k
      initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1354
23.1k
    else
1355
23.1k
      initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1356
36.1k
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<true>::EarlyCSELegacyCommonPass()
Line
Count
Source
1351
12.9k
  EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1352
12.9k
    if (UseMemorySSA)
1353
12.9k
      initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1354
0
    else
1355
0
      initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1356
12.9k
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<false>::EarlyCSELegacyCommonPass()
Line
Count
Source
1351
23.1k
  EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1352
23.1k
    if (UseMemorySSA)
1353
0
      initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1354
23.1k
    else
1355
23.1k
      initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1356
23.1k
  }
1357
1358
1.14M
  bool runOnFunction(Function &F) override {
1359
1.14M
    if (skipFunction(F))
1360
120
      return false;
1361
1.14M
1362
1.14M
    auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1363
1.14M
    auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1364
1.14M
    auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1365
1.14M
    auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1366
1.14M
    auto *MSSA =
1367
1.14M
        UseMemorySSA ? 
&getAnalysis<MemorySSAWrapperPass>().getMSSA()465k
:
nullptr683k
;
1368
1.14M
1369
1.14M
    EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1370
1.14M
1371
1.14M
    return CSE.run();
1372
1.14M
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<true>::runOnFunction(llvm::Function&)
Line
Count
Source
1358
465k
  bool runOnFunction(Function &F) override {
1359
465k
    if (skipFunction(F))
1360
44
      return false;
1361
465k
1362
465k
    auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1363
465k
    auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1364
465k
    auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1365
465k
    auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1366
465k
    auto *MSSA =
1367
18.4E
        UseMemorySSA ? 
&getAnalysis<MemorySSAWrapperPass>().getMSSA()465k
: nullptr;
1368
465k
1369
465k
    EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1370
465k
1371
465k
    return CSE.run();
1372
465k
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<false>::runOnFunction(llvm::Function&)
Line
Count
Source
1358
683k
  bool runOnFunction(Function &F) override {
1359
683k
    if (skipFunction(F))
1360
76
      return false;
1361
683k
1362
683k
    auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1363
683k
    auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1364
683k
    auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1365
683k
    auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1366
683k
    auto *MSSA =
1367
683k
        UseMemorySSA ? 
&getAnalysis<MemorySSAWrapperPass>().getMSSA()0
: nullptr;
1368
683k
1369
683k
    EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1370
683k
1371
683k
    return CSE.run();
1372
683k
  }
1373
1374
36.0k
  void getAnalysisUsage(AnalysisUsage &AU) const override {
1375
36.0k
    AU.addRequired<AssumptionCacheTracker>();
1376
36.0k
    AU.addRequired<DominatorTreeWrapperPass>();
1377
36.0k
    AU.addRequired<TargetLibraryInfoWrapperPass>();
1378
36.0k
    AU.addRequired<TargetTransformInfoWrapperPass>();
1379
36.0k
    if (UseMemorySSA) {
1380
12.9k
      AU.addRequired<MemorySSAWrapperPass>();
1381
12.9k
      AU.addPreserved<MemorySSAWrapperPass>();
1382
12.9k
    }
1383
36.0k
    AU.addPreserved<GlobalsAAWrapperPass>();
1384
36.0k
    AU.setPreservesCFG();
1385
36.0k
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<true>::getAnalysisUsage(llvm::AnalysisUsage&) const
Line
Count
Source
1374
12.9k
  void getAnalysisUsage(AnalysisUsage &AU) const override {
1375
12.9k
    AU.addRequired<AssumptionCacheTracker>();
1376
12.9k
    AU.addRequired<DominatorTreeWrapperPass>();
1377
12.9k
    AU.addRequired<TargetLibraryInfoWrapperPass>();
1378
12.9k
    AU.addRequired<TargetTransformInfoWrapperPass>();
1379
12.9k
    if (UseMemorySSA) {
1380
12.9k
      AU.addRequired<MemorySSAWrapperPass>();
1381
12.9k
      AU.addPreserved<MemorySSAWrapperPass>();
1382
12.9k
    }
1383
12.9k
    AU.addPreserved<GlobalsAAWrapperPass>();
1384
12.9k
    AU.setPreservesCFG();
1385
12.9k
  }
EarlyCSE.cpp:(anonymous namespace)::EarlyCSELegacyCommonPass<false>::getAnalysisUsage(llvm::AnalysisUsage&) const
Line
Count
Source
1374
23.0k
  void getAnalysisUsage(AnalysisUsage &AU) const override {
1375
23.0k
    AU.addRequired<AssumptionCacheTracker>();
1376
23.0k
    AU.addRequired<DominatorTreeWrapperPass>();
1377
23.0k
    AU.addRequired<TargetLibraryInfoWrapperPass>();
1378
23.0k
    AU.addRequired<TargetTransformInfoWrapperPass>();
1379
23.0k
    if (UseMemorySSA) {
1380
0
      AU.addRequired<MemorySSAWrapperPass>();
1381
0
      AU.addPreserved<MemorySSAWrapperPass>();
1382
0
    }
1383
23.0k
    AU.addPreserved<GlobalsAAWrapperPass>();
1384
23.0k
    AU.setPreservesCFG();
1385
23.0k
  }
1386
};
1387
1388
} // end anonymous namespace
1389
1390
using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1391
1392
template<>
1393
char EarlyCSELegacyPass::ID = 0;
1394
1395
48.4k
INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1396
48.4k
                      false)
1397
48.4k
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1398
48.4k
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1399
48.4k
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1400
48.4k
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1401
48.4k
INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1402
1403
using EarlyCSEMemSSALegacyPass =
1404
    EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1405
1406
template<>
1407
char EarlyCSEMemSSALegacyPass::ID = 0;
1408
1409
36.0k
FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1410
36.0k
  if (UseMemorySSA)
1411
12.9k
    return new EarlyCSEMemSSALegacyPass();
1412
23.1k
  else
1413
23.1k
    return new EarlyCSELegacyPass();
1414
36.0k
}
1415
1416
48.6k
INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1417
48.6k
                      "Early CSE w/ MemorySSA", false, false)
1418
48.6k
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1419
48.6k
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1420
48.6k
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1421
48.6k
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1422
48.6k
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1423
48.6k
INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1424
                    "Early CSE w/ MemorySSA", false, false)