Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
Line
Count
Source (jump to first uncovered line)
1
//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
/// \file
10
///
11
/// This file provides internal interfaces used to implement the InstCombine.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16
#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18
#include "llvm/ADT/ArrayRef.h"
19
#include "llvm/Analysis/AliasAnalysis.h"
20
#include "llvm/Analysis/InstructionSimplify.h"
21
#include "llvm/Analysis/TargetFolder.h"
22
#include "llvm/Analysis/ValueTracking.h"
23
#include "llvm/IR/Argument.h"
24
#include "llvm/IR/BasicBlock.h"
25
#include "llvm/IR/Constant.h"
26
#include "llvm/IR/Constants.h"
27
#include "llvm/IR/DerivedTypes.h"
28
#include "llvm/IR/IRBuilder.h"
29
#include "llvm/IR/InstVisitor.h"
30
#include "llvm/IR/InstrTypes.h"
31
#include "llvm/IR/Instruction.h"
32
#include "llvm/IR/IntrinsicInst.h"
33
#include "llvm/IR/Intrinsics.h"
34
#include "llvm/IR/PatternMatch.h"
35
#include "llvm/IR/Use.h"
36
#include "llvm/IR/Value.h"
37
#include "llvm/Support/Casting.h"
38
#include "llvm/Support/Compiler.h"
39
#include "llvm/Support/Debug.h"
40
#include "llvm/Support/KnownBits.h"
41
#include "llvm/Support/raw_ostream.h"
42
#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
43
#include "llvm/Transforms/Utils/Local.h"
44
#include <cassert>
45
#include <cstdint>
46
47
#define DEBUG_TYPE "instcombine"
48
49
using namespace llvm::PatternMatch;
50
51
namespace llvm {
52
53
class APInt;
54
class AssumptionCache;
55
class BlockFrequencyInfo;
56
class DataLayout;
57
class DominatorTree;
58
class GEPOperator;
59
class GlobalVariable;
60
class LoopInfo;
61
class OptimizationRemarkEmitter;
62
class ProfileSummaryInfo;
63
class TargetLibraryInfo;
64
class User;
65
66
/// Assign a complexity or rank value to LLVM Values. This is used to reduce
67
/// the amount of pattern matching needed for compares and commutative
68
/// instructions. For example, if we have:
69
///   icmp ugt X, Constant
70
/// or
71
///   xor (add X, Constant), cast Z
72
///
73
/// We do not have to consider the commuted variants of these patterns because
74
/// canonicalization based on complexity guarantees the above ordering.
75
///
76
/// This routine maps IR values to various complexity ranks:
77
///   0 -> undef
78
///   1 -> Constants
79
///   2 -> Other non-instructions
80
///   3 -> Arguments
81
///   4 -> Cast and (f)neg/not instructions
82
///   5 -> Other instructions
83
52.4M
static inline unsigned getComplexity(Value *V) {
84
52.4M
  if (isa<Instruction>(V)) {
85
31.9M
    if (isa<CastInst>(V) || 
match(V, m_Neg(m_Value()))28.9M
||
86
31.9M
        
match(V, m_Not(m_Value()))28.8M
||
match(V, m_FNeg(m_Value()))28.7M
)
87
3.22M
      return 4;
88
28.7M
    return 5;
89
28.7M
  }
90
20.5M
  if (isa<Argument>(V))
91
2.93M
    return 3;
92
17.5M
  return isa<Constant>(V) ? (isa<UndefValue>(V) ? 
011
:
117.5M
) :
20
;
93
17.5M
}
InstructionCombining.cpp:llvm::getComplexity(llvm::Value*)
Line
Count
Source
83
21.7M
static inline unsigned getComplexity(Value *V) {
84
21.7M
  if (isa<Instruction>(V)) {
85
14.7M
    if (isa<CastInst>(V) || 
match(V, m_Neg(m_Value()))13.0M
||
86
14.7M
        
match(V, m_Not(m_Value()))13.0M
||
match(V, m_FNeg(m_Value()))12.9M
)
87
1.86M
      return 4;
88
12.9M
    return 5;
89
12.9M
  }
90
6.96M
  if (isa<Argument>(V))
91
607k
    return 3;
92
18.4E
  
return isa<Constant>(V) 6.35M
?
(isa<UndefValue>(V) 6.35M
?
00
:
16.35M
) : 2;
93
6.35M
}
Unexecuted instantiation: InstCombineAddSub.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineAndOrXor.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineCalls.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::getComplexity(llvm::Value*)
InstCombineCompares.cpp:llvm::getComplexity(llvm::Value*)
Line
Count
Source
83
30.7M
static inline unsigned getComplexity(Value *V) {
84
30.7M
  if (isa<Instruction>(V)) {
85
17.1M
    if (isa<CastInst>(V) || 
match(V, m_Neg(m_Value()))15.8M
||
86
17.1M
        
match(V, m_Not(m_Value()))15.8M
||
match(V, m_FNeg(m_Value()))15.8M
)
87
1.36M
      return 4;
88
15.8M
    return 5;
89
15.8M
  }
90
13.5M
  if (isa<Argument>(V))
91
2.32M
    return 3;
92
11.2M
  return isa<Constant>(V) ? 
(isa<UndefValue>(V) 11.2M
?
011
:
111.2M
) :
21
;
93
11.2M
}
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::getComplexity(llvm::Value*)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::getComplexity(llvm::Value*)
94
95
/// Predicate canonicalization reduces the number of patterns that need to be
96
/// matched by other transforms. For example, we may swap the operands of a
97
/// conditional branch or select to create a compare with a canonical (inverted)
98
/// predicate which is then more likely to be matched with other values.
99
12.8M
static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) {
100
12.8M
  switch (Pred) {
101
12.8M
  case CmpInst::ICMP_NE:
102
232k
  case CmpInst::ICMP_ULE:
103
232k
  case CmpInst::ICMP_SLE:
104
232k
  case CmpInst::ICMP_UGE:
105
232k
  case CmpInst::ICMP_SGE:
106
232k
  // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
107
232k
  case CmpInst::FCMP_ONE:
108
232k
  case CmpInst::FCMP_OLE:
109
232k
  case CmpInst::FCMP_OGE:
110
232k
    return false;
111
12.6M
  default:
112
12.6M
    return true;
113
12.8M
  }
114
12.8M
}
InstructionCombining.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Line
Count
Source
99
12.0M
static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) {
100
12.0M
  switch (Pred) {
101
12.0M
  case CmpInst::ICMP_NE:
102
229k
  case CmpInst::ICMP_ULE:
103
229k
  case CmpInst::ICMP_SLE:
104
229k
  case CmpInst::ICMP_UGE:
105
229k
  case CmpInst::ICMP_SGE:
106
229k
  // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
107
229k
  case CmpInst::FCMP_ONE:
108
229k
  case CmpInst::FCMP_OLE:
109
229k
  case CmpInst::FCMP_OGE:
110
229k
    return false;
111
11.7M
  default:
112
11.7M
    return true;
113
12.0M
  }
114
12.0M
}
Unexecuted instantiation: InstCombineAddSub.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineAndOrXor.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineCalls.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
InstCombineSelect.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Line
Count
Source
99
855k
static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) {
100
855k
  switch (Pred) {
101
855k
  case CmpInst::ICMP_NE:
102
2.80k
  case CmpInst::ICMP_ULE:
103
2.80k
  case CmpInst::ICMP_SLE:
104
2.80k
  case CmpInst::ICMP_UGE:
105
2.80k
  case CmpInst::ICMP_SGE:
106
2.80k
  // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
107
2.80k
  case CmpInst::FCMP_ONE:
108
2.80k
  case CmpInst::FCMP_OLE:
109
2.80k
  case CmpInst::FCMP_OGE:
110
2.80k
    return false;
111
852k
  default:
112
852k
    return true;
113
855k
  }
114
855k
}
Unexecuted instantiation: InstCombineShifts.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::isCanonicalPredicate(llvm::CmpInst::Predicate)
115
116
/// Return the source operand of a potentially bitcasted value while optionally
117
/// checking if it has one use. If there is no bitcast or the one use check is
118
/// not met, return the input value itself.
119
16.2M
static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
120
16.2M
  if (auto *BitCast = dyn_cast<BitCastInst>(V))
121
1.46M
    if (!OneUseOnly || 
BitCast->hasOneUse()1.26M
)
122
1.27M
      return BitCast->getOperand(0);
123
14.9M
124
14.9M
  // V is not a bitcast or V has more than one use and OneUseOnly is true.
125
14.9M
  return V;
126
14.9M
}
Unexecuted instantiation: InstructionCombining.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineAddSub.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
InstCombineAndOrXor.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Line
Count
Source
119
444k
static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
120
444k
  if (auto *BitCast = dyn_cast<BitCastInst>(V))
121
1.69k
    if (!OneUseOnly || BitCast->hasOneUse())
122
1.69k
      return BitCast->getOperand(0);
123
442k
124
442k
  // V is not a bitcast or V has more than one use and OneUseOnly is true.
125
442k
  return V;
126
442k
}
InstCombineCalls.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Line
Count
Source
119
8
static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
120
8
  if (auto *BitCast = dyn_cast<BitCastInst>(V))
121
6
    if (!OneUseOnly || 
BitCast->hasOneUse()0
)
122
6
      return BitCast->getOperand(0);
123
2
124
2
  // V is not a bitcast or V has more than one use and OneUseOnly is true.
125
2
  return V;
126
2
}
Unexecuted instantiation: InstCombineCasts.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
InstCombineLoadStoreAlloca.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Line
Count
Source
119
15.8M
static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
120
15.8M
  if (auto *BitCast = dyn_cast<BitCastInst>(V))
121
1.46M
    if (!OneUseOnly || 
BitCast->hasOneUse()1.26M
)
122
1.27M
      return BitCast->getOperand(0);
123
14.5M
124
14.5M
  // V is not a bitcast or V has more than one use and OneUseOnly is true.
125
14.5M
  return V;
126
14.5M
}
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::peekThroughBitcast(llvm::Value*, bool)
127
128
/// Add one to a Constant
129
1.23k
static inline Constant *AddOne(Constant *C) {
130
1.23k
  return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
131
1.23k
}
Unexecuted instantiation: InstructionCombining.cpp:llvm::AddOne(llvm::Constant*)
InstCombineAddSub.cpp:llvm::AddOne(llvm::Constant*)
Line
Count
Source
129
96
static inline Constant *AddOne(Constant *C) {
130
96
  return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
131
96
}
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::AddOne(llvm::Constant*)
InstCombineAndOrXor.cpp:llvm::AddOne(llvm::Constant*)
Line
Count
Source
129
1.14k
static inline Constant *AddOne(Constant *C) {
130
1.14k
  return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
131
1.14k
}
Unexecuted instantiation: InstCombineCalls.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::AddOne(llvm::Constant*)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::AddOne(llvm::Constant*)
132
133
/// Subtract one from a Constant
134
517
static inline Constant *SubOne(Constant *C) {
135
517
  return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
136
517
}
Unexecuted instantiation: InstructionCombining.cpp:llvm::SubOne(llvm::Constant*)
InstCombineAddSub.cpp:llvm::SubOne(llvm::Constant*)
Line
Count
Source
134
176
static inline Constant *SubOne(Constant *C) {
135
176
  return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
136
176
}
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineAndOrXor.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineCalls.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::SubOne(llvm::Constant*)
InstCombineCompares.cpp:llvm::SubOne(llvm::Constant*)
Line
Count
Source
134
341
static inline Constant *SubOne(Constant *C) {
135
341
  return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
136
341
}
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::SubOne(llvm::Constant*)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::SubOne(llvm::Constant*)
137
138
/// Return true if the specified value is free to invert (apply ~ to).
139
/// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
140
/// is true, work under the assumption that the caller intends to remove all
141
/// uses of V and only keep uses of ~V.
142
78.8k
static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
143
78.8k
  // ~(~(X)) -> X.
144
78.8k
  if (match(V, m_Not(m_Value())))
145
108
    return true;
146
78.7k
147
78.7k
  // Constants can be considered to be not'ed values.
148
78.7k
  if (match(V, m_AnyIntegralConstant()))
149
145
    return true;
150
78.5k
151
78.5k
  // Compares can be inverted if all of their uses are being modified to use the
152
78.5k
  // ~V.
153
78.5k
  if (isa<CmpInst>(V))
154
10.2k
    return WillInvertAllUses;
155
68.3k
156
68.3k
  // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
157
68.3k
  // - Constant) - A` if we are willing to invert all of the uses.
158
68.3k
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
159
55.2k
    if (BO->getOpcode() == Instruction::Add ||
160
55.2k
        
BO->getOpcode() == Instruction::Sub49.5k
)
161
7.44k
      if (isa<Constant>(BO->getOperand(0)) || 
isa<Constant>(BO->getOperand(1))7.18k
)
162
699
        return WillInvertAllUses;
163
67.6k
164
67.6k
  // Selects with invertible operands are freely invertible
165
67.6k
  if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value()))))
166
3
    return WillInvertAllUses;
167
67.6k
168
67.6k
  return false;
169
67.6k
}
Unexecuted instantiation: InstructionCombining.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
InstCombineAddSub.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Line
Count
Source
142
11
static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
143
11
  // ~(~(X)) -> X.
144
11
  if (match(V, m_Not(m_Value())))
145
2
    return true;
146
9
147
9
  // Constants can be considered to be not'ed values.
148
9
  if (match(V, m_AnyIntegralConstant()))
149
4
    return true;
150
5
151
5
  // Compares can be inverted if all of their uses are being modified to use the
152
5
  // ~V.
153
5
  if (isa<CmpInst>(V))
154
0
    return WillInvertAllUses;
155
5
156
5
  // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
157
5
  // - Constant) - A` if we are willing to invert all of the uses.
158
5
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
159
0
    if (BO->getOpcode() == Instruction::Add ||
160
0
        BO->getOpcode() == Instruction::Sub)
161
0
      if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1)))
162
0
        return WillInvertAllUses;
163
5
164
5
  // Selects with invertible operands are freely invertible
165
5
  if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value()))))
166
1
    return WillInvertAllUses;
167
4
168
4
  return false;
169
4
}
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
InstCombineAndOrXor.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Line
Count
Source
142
19.6k
static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
143
19.6k
  // ~(~(X)) -> X.
144
19.6k
  if (match(V, m_Not(m_Value())))
145
56
    return true;
146
19.5k
147
19.5k
  // Constants can be considered to be not'ed values.
148
19.5k
  if (match(V, m_AnyIntegralConstant()))
149
35
    return true;
150
19.5k
151
19.5k
  // Compares can be inverted if all of their uses are being modified to use the
152
19.5k
  // ~V.
153
19.5k
  if (isa<CmpInst>(V))
154
10.2k
    return WillInvertAllUses;
155
9.25k
156
9.25k
  // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
157
9.25k
  // - Constant) - A` if we are willing to invert all of the uses.
158
9.25k
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
159
4.85k
    if (BO->getOpcode() == Instruction::Add ||
160
4.85k
        
BO->getOpcode() == Instruction::Sub4.77k
)
161
109
      if (isa<Constant>(BO->getOperand(0)) || 
isa<Constant>(BO->getOperand(1))74
)
162
73
        return WillInvertAllUses;
163
9.17k
164
9.17k
  // Selects with invertible operands are freely invertible
165
9.17k
  if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value()))))
166
0
    return WillInvertAllUses;
167
9.17k
168
9.17k
  return false;
169
9.17k
}
Unexecuted instantiation: InstCombineCalls.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
InstCombineSelect.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Line
Count
Source
142
59.2k
static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
143
59.2k
  // ~(~(X)) -> X.
144
59.2k
  if (match(V, m_Not(m_Value())))
145
50
    return true;
146
59.1k
147
59.1k
  // Constants can be considered to be not'ed values.
148
59.1k
  if (match(V, m_AnyIntegralConstant()))
149
106
    return true;
150
59.0k
151
59.0k
  // Compares can be inverted if all of their uses are being modified to use the
152
59.0k
  // ~V.
153
59.0k
  if (isa<CmpInst>(V))
154
0
    return WillInvertAllUses;
155
59.0k
156
59.0k
  // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
157
59.0k
  // - Constant) - A` if we are willing to invert all of the uses.
158
59.0k
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
159
50.4k
    if (BO->getOpcode() == Instruction::Add ||
160
50.4k
        
BO->getOpcode() == Instruction::Sub44.7k
)
161
7.33k
      if (isa<Constant>(BO->getOperand(0)) || 
isa<Constant>(BO->getOperand(1))7.11k
)
162
626
        return WillInvertAllUses;
163
58.4k
164
58.4k
  // Selects with invertible operands are freely invertible
165
58.4k
  if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value()))))
166
2
    return WillInvertAllUses;
167
58.4k
168
58.4k
  return false;
169
58.4k
}
Unexecuted instantiation: InstCombineShifts.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::IsFreeToInvert(llvm::Value*, bool)
170
171
/// Some binary operators require special handling to avoid poison and undefined
172
/// behavior. If a constant vector has undef elements, replace those undefs with
173
/// identity constants if possible because those are always safe to execute.
174
/// If no identity constant exists, replace undef with some other safe constant.
175
static inline Constant *getSafeVectorConstantForBinop(
176
29
      BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) {
177
29
  assert(In->getType()->isVectorTy() && "Not expecting scalars here");
178
29
179
29
  Type *EltTy = In->getType()->getVectorElementType();
180
29
  auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
181
29
  if (!SafeC) {
182
9
    // TODO: Should this be available as a constant utility function? It is
183
9
    // similar to getBinOpAbsorber().
184
9
    if (IsRHSConstant) {
185
2
      switch (Opcode) {
186
2
      case Instruction::SRem: // X % 1 = 0
187
2
      case Instruction::URem: // X %u 1 = 0
188
2
        SafeC = ConstantInt::get(EltTy, 1);
189
2
        break;
190
2
      case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
191
0
        SafeC = ConstantFP::get(EltTy, 1.0);
192
0
        break;
193
2
      default:
194
0
        llvm_unreachable("Only rem opcodes have no identity constant for RHS");
195
7
      }
196
7
    } else {
197
7
      switch (Opcode) {
198
7
      case Instruction::Shl:  // 0 << X = 0
199
7
      case Instruction::LShr: // 0 >>u X = 0
200
7
      case Instruction::AShr: // 0 >> X = 0
201
7
      case Instruction::SDiv: // 0 / X = 0
202
7
      case Instruction::UDiv: // 0 /u X = 0
203
7
      case Instruction::SRem: // 0 % X = 0
204
7
      case Instruction::URem: // 0 %u X = 0
205
7
      case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
206
7
      case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
207
7
      case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
208
7
      case Instruction::FRem: // 0.0 % X = 0
209
7
        SafeC = Constant::getNullValue(EltTy);
210
7
        break;
211
7
      default:
212
0
        llvm_unreachable("Expected to find identity constant for opcode");
213
29
      }
214
29
    }
215
9
  }
216
29
  assert(SafeC && "Must have safe constant for binop");
217
29
  unsigned NumElts = In->getType()->getVectorNumElements();
218
29
  SmallVector<Constant *, 16> Out(NumElts);
219
129
  for (unsigned i = 0; i != NumElts; 
++i100
) {
220
100
    Constant *C = In->getAggregateElement(i);
221
100
    Out[i] = isa<UndefValue>(C) ? 
SafeC38
:
C62
;
222
100
  }
223
29
  return ConstantVector::get(Out);
224
29
}
InstructionCombining.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Line
Count
Source
176
8
      BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) {
177
8
  assert(In->getType()->isVectorTy() && "Not expecting scalars here");
178
8
179
8
  Type *EltTy = In->getType()->getVectorElementType();
180
8
  auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
181
8
  if (!SafeC) {
182
2
    // TODO: Should this be available as a constant utility function? It is
183
2
    // similar to getBinOpAbsorber().
184
2
    if (IsRHSConstant) {
185
2
      switch (Opcode) {
186
2
      case Instruction::SRem: // X % 1 = 0
187
2
      case Instruction::URem: // X %u 1 = 0
188
2
        SafeC = ConstantInt::get(EltTy, 1);
189
2
        break;
190
2
      case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
191
0
        SafeC = ConstantFP::get(EltTy, 1.0);
192
0
        break;
193
2
      default:
194
0
        llvm_unreachable("Only rem opcodes have no identity constant for RHS");
195
0
      }
196
0
    } else {
197
0
      switch (Opcode) {
198
0
      case Instruction::Shl:  // 0 << X = 0
199
0
      case Instruction::LShr: // 0 >>u X = 0
200
0
      case Instruction::AShr: // 0 >> X = 0
201
0
      case Instruction::SDiv: // 0 / X = 0
202
0
      case Instruction::UDiv: // 0 /u X = 0
203
0
      case Instruction::SRem: // 0 % X = 0
204
0
      case Instruction::URem: // 0 %u X = 0
205
0
      case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
206
0
      case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
207
0
      case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
208
0
      case Instruction::FRem: // 0.0 % X = 0
209
0
        SafeC = Constant::getNullValue(EltTy);
210
0
        break;
211
0
      default:
212
0
        llvm_unreachable("Expected to find identity constant for opcode");
213
8
      }
214
8
    }
215
2
  }
216
8
  assert(SafeC && "Must have safe constant for binop");
217
8
  unsigned NumElts = In->getType()->getVectorNumElements();
218
8
  SmallVector<Constant *, 16> Out(NumElts);
219
24
  for (unsigned i = 0; i != NumElts; 
++i16
) {
220
16
    Constant *C = In->getAggregateElement(i);
221
16
    Out[i] = isa<UndefValue>(C) ? 
SafeC7
:
C9
;
222
16
  }
223
8
  return ConstantVector::get(Out);
224
8
}
Unexecuted instantiation: InstCombineAddSub.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineAtomicRMW.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineAndOrXor.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineCalls.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineCasts.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
InstCombineVectorOps.cpp:llvm::getSafeVectorConstantForBinop(llvm::Instruction::BinaryOps, llvm::Constant*, bool)
Line
Count
Source
176
21
      BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) {
177
21
  assert(In->getType()->isVectorTy() && "Not expecting scalars here");
178
21
179
21
  Type *EltTy = In->getType()->getVectorElementType();
180
21
  auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
181
21
  if (!SafeC) {
182
7
    // TODO: Should this be available as a constant utility function? It is
183
7
    // similar to getBinOpAbsorber().
184
7
    if (IsRHSConstant) {
185
0
      switch (Opcode) {
186
0
      case Instruction::SRem: // X % 1 = 0
187
0
      case Instruction::URem: // X %u 1 = 0
188
0
        SafeC = ConstantInt::get(EltTy, 1);
189
0
        break;
190
0
      case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
191
0
        SafeC = ConstantFP::get(EltTy, 1.0);
192
0
        break;
193
0
      default:
194
0
        llvm_unreachable("Only rem opcodes have no identity constant for RHS");
195
7
      }
196
7
    } else {
197
7
      switch (Opcode) {
198
7
      case Instruction::Shl:  // 0 << X = 0
199
7
      case Instruction::LShr: // 0 >>u X = 0
200
7
      case Instruction::AShr: // 0 >> X = 0
201
7
      case Instruction::SDiv: // 0 / X = 0
202
7
      case Instruction::UDiv: // 0 /u X = 0
203
7
      case Instruction::SRem: // 0 % X = 0
204
7
      case Instruction::URem: // 0 %u X = 0
205
7
      case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
206
7
      case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
207
7
      case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
208
7
      case Instruction::FRem: // 0.0 % X = 0
209
7
        SafeC = Constant::getNullValue(EltTy);
210
7
        break;
211
7
      default:
212
0
        llvm_unreachable("Expected to find identity constant for opcode");
213
21
      }
214
21
    }
215
7
  }
216
21
  assert(SafeC && "Must have safe constant for binop");
217
21
  unsigned NumElts = In->getType()->getVectorNumElements();
218
21
  SmallVector<Constant *, 16> Out(NumElts);
219
105
  for (unsigned i = 0; i != NumElts; 
++i84
) {
220
84
    Constant *C = In->getAggregateElement(i);
221
84
    Out[i] = isa<UndefValue>(C) ? 
SafeC31
:
C53
;
222
84
  }
223
21
  return ConstantVector::get(Out);
224
21
}
225
226
/// The core instruction combiner logic.
227
///
228
/// This class provides both the logic to recursively visit instructions and
229
/// combine them.
230
class LLVM_LIBRARY_VISIBILITY InstCombiner
231
    : public InstVisitor<InstCombiner, Instruction *> {
232
  // FIXME: These members shouldn't be public.
233
public:
234
  /// A worklist of the instructions that need to be simplified.
235
  InstCombineWorklist &Worklist;
236
237
  /// An IRBuilder that automatically inserts new instructions into the
238
  /// worklist.
239
  using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
240
  BuilderTy &Builder;
241
242
private:
243
  // Mode in which we are running the combiner.
244
  const bool MinimizeSize;
245
246
  /// Enable combines that trigger rarely but are costly in compiletime.
247
  const bool ExpensiveCombines;
248
249
  AliasAnalysis *AA;
250
251
  // Required analyses.
252
  AssumptionCache &AC;
253
  TargetLibraryInfo &TLI;
254
  DominatorTree &DT;
255
  const DataLayout &DL;
256
  const SimplifyQuery SQ;
257
  OptimizationRemarkEmitter &ORE;
258
  BlockFrequencyInfo *BFI;
259
  ProfileSummaryInfo *PSI;
260
261
  // Optional analyses. When non-null, these can both be used to do better
262
  // combining and will be updated to reflect any changes.
263
  LoopInfo *LI;
264
265
  bool MadeIRChange = false;
266
267
public:
268
  InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder,
269
               bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
270
               AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT,
271
               OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
272
               ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
273
      : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
274
        ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT),
275
3.72M
        DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}
276
277
  /// Run the combiner over the entire worklist until it is empty.
278
  ///
279
  /// \returns true if the IR is changed.
280
  bool run();
281
282
54.4k
  AssumptionCache &getAssumptionCache() const { return AC; }
283
284
21.5M
  const DataLayout &getDataLayout() const { return DL; }
285
286
54.4k
  DominatorTree &getDominatorTree() const { return DT; }
287
288
0
  LoopInfo *getLoopInfo() const { return LI; }
289
290
739
  TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
291
292
  // Visitation implementation - Implement instruction combining for different
293
  // instruction types.  The semantics are as follows:
294
  // Return Value:
295
  //    null        - No change was made
296
  //     I          - Change was made, I is still valid, I may be dead though
297
  //   otherwise    - Change was made, replace I with returned instruction
298
  //
299
  Instruction *visitFNeg(UnaryOperator &I);
300
  Instruction *visitAdd(BinaryOperator &I);
301
  Instruction *visitFAdd(BinaryOperator &I);
302
  Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
303
  Instruction *visitSub(BinaryOperator &I);
304
  Instruction *visitFSub(BinaryOperator &I);
305
  Instruction *visitMul(BinaryOperator &I);
306
  Instruction *visitFMul(BinaryOperator &I);
307
  Instruction *visitURem(BinaryOperator &I);
308
  Instruction *visitSRem(BinaryOperator &I);
309
  Instruction *visitFRem(BinaryOperator &I);
310
  bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
311
  Instruction *commonRemTransforms(BinaryOperator &I);
312
  Instruction *commonIRemTransforms(BinaryOperator &I);
313
  Instruction *commonDivTransforms(BinaryOperator &I);
314
  Instruction *commonIDivTransforms(BinaryOperator &I);
315
  Instruction *visitUDiv(BinaryOperator &I);
316
  Instruction *visitSDiv(BinaryOperator &I);
317
  Instruction *visitFDiv(BinaryOperator &I);
318
  Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
319
  Instruction *visitAnd(BinaryOperator &I);
320
  Instruction *visitOr(BinaryOperator &I);
321
  Instruction *visitXor(BinaryOperator &I);
322
  Instruction *visitShl(BinaryOperator &I);
323
  Instruction *visitAShr(BinaryOperator &I);
324
  Instruction *visitLShr(BinaryOperator &I);
325
  Instruction *commonShiftTransforms(BinaryOperator &I);
326
  Instruction *visitFCmpInst(FCmpInst &I);
327
  Instruction *visitICmpInst(ICmpInst &I);
328
  Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
329
                                   BinaryOperator &I);
330
  Instruction *commonCastTransforms(CastInst &CI);
331
  Instruction *commonPointerCastTransforms(CastInst &CI);
332
  Instruction *visitTrunc(TruncInst &CI);
333
  Instruction *visitZExt(ZExtInst &CI);
334
  Instruction *visitSExt(SExtInst &CI);
335
  Instruction *visitFPTrunc(FPTruncInst &CI);
336
  Instruction *visitFPExt(CastInst &CI);
337
  Instruction *visitFPToUI(FPToUIInst &FI);
338
  Instruction *visitFPToSI(FPToSIInst &FI);
339
  Instruction *visitUIToFP(CastInst &CI);
340
  Instruction *visitSIToFP(CastInst &CI);
341
  Instruction *visitPtrToInt(PtrToIntInst &CI);
342
  Instruction *visitIntToPtr(IntToPtrInst &CI);
343
  Instruction *visitBitCast(BitCastInst &CI);
344
  Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
345
  Instruction *FoldItoFPtoI(Instruction &FI);
346
  Instruction *visitSelectInst(SelectInst &SI);
347
  Instruction *visitCallInst(CallInst &CI);
348
  Instruction *visitInvokeInst(InvokeInst &II);
349
  Instruction *visitCallBrInst(CallBrInst &CBI);
350
351
  Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
352
  Instruction *visitPHINode(PHINode &PN);
353
  Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
354
  Instruction *visitAllocaInst(AllocaInst &AI);
355
  Instruction *visitAllocSite(Instruction &FI);
356
  Instruction *visitFree(CallInst &FI);
357
  Instruction *visitLoadInst(LoadInst &LI);
358
  Instruction *visitStoreInst(StoreInst &SI);
359
  Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
360
  Instruction *visitBranchInst(BranchInst &BI);
361
  Instruction *visitFenceInst(FenceInst &FI);
362
  Instruction *visitSwitchInst(SwitchInst &SI);
363
  Instruction *visitReturnInst(ReturnInst &RI);
364
  Instruction *visitInsertValueInst(InsertValueInst &IV);
365
  Instruction *visitInsertElementInst(InsertElementInst &IE);
366
  Instruction *visitExtractElementInst(ExtractElementInst &EI);
367
  Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
368
  Instruction *visitExtractValueInst(ExtractValueInst &EV);
369
  Instruction *visitLandingPadInst(LandingPadInst &LI);
370
  Instruction *visitVAStartInst(VAStartInst &I);
371
  Instruction *visitVACopyInst(VACopyInst &I);
372
373
  /// Specify what to return for unhandled instructions.
374
549k
  Instruction *visitInstruction(Instruction &I) { return nullptr; }
375
376
  /// True when DB dominates all uses of DI except UI.
377
  /// UI must be in the same block as DI.
378
  /// The routine checks that the DI parent and DB are different.
379
  bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
380
                        const BasicBlock *DB) const;
381
382
  /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
383
  bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
384
                                 const unsigned SIOpd);
385
386
  /// Try to replace instruction \p I with value \p V which are pointers
387
  /// in different address space.
388
  /// \return true if successful.
389
  bool replacePointer(Instruction &I, Value *V);
390
391
private:
392
  bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
393
  bool shouldChangeType(Type *From, Type *To) const;
394
  Value *dyn_castNegVal(Value *V) const;
395
  Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
396
                            SmallVectorImpl<Value *> &NewIndices);
397
398
  /// Classify whether a cast is worth optimizing.
399
  ///
400
  /// This is a helper to decide whether the simplification of
401
  /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
402
  ///
403
  /// \param CI The cast we are interested in.
404
  ///
405
  /// \return true if this cast actually results in any code being generated and
406
  /// if it cannot already be eliminated by some other transformation.
407
  bool shouldOptimizeCast(CastInst *CI);
408
409
  /// Try to optimize a sequence of instructions checking if an operation
410
  /// on LHS and RHS overflows.
411
  ///
412
  /// If this overflow check is done via one of the overflow check intrinsics,
413
  /// then CtxI has to be the call instruction calling that intrinsic.  If this
414
  /// overflow check is done by arithmetic followed by a compare, then CtxI has
415
  /// to be the arithmetic instruction.
416
  ///
417
  /// If a simplification is possible, stores the simplified result of the
418
  /// operation in OperationResult and result of the overflow check in
419
  /// OverflowResult, and return true.  If no simplification is possible,
420
  /// returns false.
421
  bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
422
                             Value *LHS, Value *RHS,
423
                             Instruction &CtxI, Value *&OperationResult,
424
                             Constant *&OverflowResult);
425
426
  Instruction *visitCallBase(CallBase &Call);
427
  Instruction *tryOptimizeCall(CallInst *CI);
428
  bool transformConstExprCastCall(CallBase &Call);
429
  Instruction *transformCallThroughTrampoline(CallBase &Call,
430
                                              IntrinsicInst &Tramp);
431
432
  Value *simplifyMaskedLoad(IntrinsicInst &II);
433
  Instruction *simplifyMaskedStore(IntrinsicInst &II);
434
  Instruction *simplifyMaskedGather(IntrinsicInst &II);
435
  Instruction *simplifyMaskedScatter(IntrinsicInst &II);
436
  
437
  /// Transform (zext icmp) to bitwise / integer operations in order to
438
  /// eliminate it.
439
  ///
440
  /// \param ICI The icmp of the (zext icmp) pair we are interested in.
441
  /// \parem CI The zext of the (zext icmp) pair we are interested in.
442
  /// \param DoTransform Pass false to just test whether the given (zext icmp)
443
  /// would be transformed. Pass true to actually perform the transformation.
444
  ///
445
  /// \return null if the transformation cannot be performed. If the
446
  /// transformation can be performed the new instruction that replaces the
447
  /// (zext icmp) pair will be returned (if \p DoTransform is false the
448
  /// unmodified \p ICI will be returned in this case).
449
  Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
450
                                 bool DoTransform = true);
451
452
  Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
453
454
  bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS,
455
2.26M
                                const Instruction &CxtI) const {
456
2.26M
    return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
457
2.26M
           OverflowResult::NeverOverflows;
458
2.26M
  }
459
460
  bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS,
461
3.89M
                                  const Instruction &CxtI) const {
462
3.89M
    return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
463
3.89M
           OverflowResult::NeverOverflows;
464
3.89M
  }
465
466
  bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
467
30.3k
                          const Instruction &CxtI, bool IsSigned) const {
468
30.3k
    return IsSigned ? 
willNotOverflowSignedAdd(LHS, RHS, CxtI)11.8k
469
30.3k
                    : 
willNotOverflowUnsignedAdd(LHS, RHS, CxtI)18.5k
;
470
30.3k
  }
471
472
  bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
473
848k
                                const Instruction &CxtI) const {
474
848k
    return computeOverflowForSignedSub(LHS, RHS, &CxtI) ==
475
848k
           OverflowResult::NeverOverflows;
476
848k
  }
477
478
  bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
479
1.39M
                                  const Instruction &CxtI) const {
480
1.39M
    return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) ==
481
1.39M
           OverflowResult::NeverOverflows;
482
1.39M
  }
483
484
  bool willNotOverflowSub(const Value *LHS, const Value *RHS,
485
24.8k
                          const Instruction &CxtI, bool IsSigned) const {
486
24.8k
    return IsSigned ? 
willNotOverflowSignedSub(LHS, RHS, CxtI)6.05k
487
24.8k
                    : 
willNotOverflowUnsignedSub(LHS, RHS, CxtI)18.7k
;
488
24.8k
  }
489
490
  bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
491
410k
                                const Instruction &CxtI) const {
492
410k
    return computeOverflowForSignedMul(LHS, RHS, &CxtI) ==
493
410k
           OverflowResult::NeverOverflows;
494
410k
  }
495
496
  bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
497
885k
                                  const Instruction &CxtI) const {
498
885k
    return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
499
885k
           OverflowResult::NeverOverflows;
500
885k
  }
501
502
  bool willNotOverflowMul(const Value *LHS, const Value *RHS,
503
101k
                          const Instruction &CxtI, bool IsSigned) const {
504
101k
    return IsSigned ? 
willNotOverflowSignedMul(LHS, RHS, CxtI)93.6k
505
101k
                    : 
willNotOverflowUnsignedMul(LHS, RHS, CxtI)7.44k
;
506
101k
  }
507
508
  bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
509
                       const Value *RHS, const Instruction &CxtI,
510
156k
                       bool IsSigned) const {
511
156k
    switch (Opcode) {
512
156k
    
case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned)30.3k
;
513
156k
    
case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned)24.8k
;
514
156k
    
case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned)101k
;
515
156k
    
default: 0
llvm_unreachable0
("Unexpected opcode for overflow query");
516
156k
    }
517
156k
  }
518
519
  Value *EmitGEPOffset(User *GEP);
520
  Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
521
  Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
522
  Instruction *narrowBinOp(TruncInst &Trunc);
523
  Instruction *narrowMaskedBinOp(BinaryOperator &And);
524
  Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
525
  Instruction *narrowRotate(TruncInst &Trunc);
526
  Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
527
528
  /// Determine if a pair of casts can be replaced by a single cast.
529
  ///
530
  /// \param CI1 The first of a pair of casts.
531
  /// \param CI2 The second of a pair of casts.
532
  ///
533
  /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
534
  /// Instruction::CastOps value for a cast that can replace the pair, casting
535
  /// CI1->getSrcTy() to CI2->getDstTy().
536
  ///
537
  /// \see CastInst::isEliminableCastPair
538
  Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
539
                                            const CastInst *CI2);
540
541
  Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI);
542
  Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI);
543
  Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS);
544
545
  /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
546
  /// NOTE: Unlike most of instcombine, this returns a Value which should
547
  /// already be inserted into the function.
548
  Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd);
549
550
  Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
551
                                       bool JoinedByAnd, Instruction &CxtI);
552
  Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D);
553
  Value *getSelectCondition(Value *A, Value *B);
554
555
  Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
556
557
public:
558
  /// Inserts an instruction \p New before instruction \p Old
559
  ///
560
  /// Also adds the new instruction to the worklist and returns \p New so that
561
  /// it is suitable for use as the return from the visitation patterns.
562
46.3k
  Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
563
46.3k
    assert(New && !New->getParent() &&
564
46.3k
           "New instruction already inserted into a basic block!");
565
46.3k
    BasicBlock *BB = Old.getParent();
566
46.3k
    BB->getInstList().insert(Old.getIterator(), New); // Insert inst
567
46.3k
    Worklist.Add(New);
568
46.3k
    return New;
569
46.3k
  }
570
571
  /// Same as InsertNewInstBefore, but also sets the debug loc.
572
18.5k
  Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
573
18.5k
    New->setDebugLoc(Old.getDebugLoc());
574
18.5k
    return InsertNewInstBefore(New, Old);
575
18.5k
  }
576
577
  /// A combiner-aware RAUW-like routine.
578
  ///
579
  /// This method is to be used when an instruction is found to be dead,
580
  /// replaceable with another preexisting expression. Here we add all uses of
581
  /// I to the worklist, replace all uses of I with the new value, then return
582
  /// I, so that the inst combiner will know that I was modified.
583
647k
  Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
584
647k
    // If there are no uses to replace, then we return nullptr to indicate that
585
647k
    // no changes were made to the program.
586
647k
    if (I.use_empty()) 
return nullptr134
;
587
647k
588
647k
    Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
589
647k
590
647k
    // If we are replacing the instruction with itself, this must be in a
591
647k
    // segment of unreachable code, so just clobber the instruction.
592
647k
    if (&I == V)
593
1
      V = UndefValue::get(I.getType());
594
647k
595
647k
    LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
596
647k
                      << "    with " << *V << '\n');
597
647k
598
647k
    I.replaceAllUsesWith(V);
599
647k
    return &I;
600
647k
  }
601
602
  /// Creates a result tuple for an overflow intrinsic \p II with a given
603
  /// \p Result and a constant \p Overflow value.
604
  Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result,
605
328
                                   Constant *Overflow) {
606
328
    Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
607
328
    StructType *ST = cast<StructType>(II->getType());
608
328
    Constant *Struct = ConstantStruct::get(ST, V);
609
328
    return InsertValueInst::Create(Struct, Result, 0);
610
328
  }
611
612
  /// Create and insert the idiom we use to indicate a block is unreachable
613
  /// without having to rewrite the CFG from within InstCombine.
614
16
  void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
615
16
    auto &Ctx = InsertAt->getContext();
616
16
    new StoreInst(ConstantInt::getTrue(Ctx),
617
16
                  UndefValue::get(Type::getInt1PtrTy(Ctx)),
618
16
                  InsertAt);
619
16
  }
620
621
622
  /// Combiner aware instruction erasure.
623
  ///
624
  /// When dealing with an instruction that has side effects or produces a void
625
  /// value, we can't rely on DCE to delete the instruction. Instead, visit
626
  /// methods should return the value returned by this function.
627
2.87M
  Instruction *eraseInstFromFunction(Instruction &I) {
628
2.87M
    LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
629
2.87M
    assert(I.use_empty() && "Cannot erase instruction that is used!");
630
2.87M
    salvageDebugInfo(I);
631
2.87M
632
2.87M
    // Make sure that we reprocess all operands now that we reduced their
633
2.87M
    // use counts.
634
2.87M
    if (I.getNumOperands() < 8) {
635
2.85M
      for (Use &Operand : I.operands())
636
5.39M
        if (auto *Inst = dyn_cast<Instruction>(Operand))
637
2.64M
          Worklist.Add(Inst);
638
2.85M
    }
639
2.87M
    Worklist.Remove(&I);
640
2.87M
    I.eraseFromParent();
641
2.87M
    MadeIRChange = true;
642
2.87M
    return nullptr; // Don't do anything with FI
643
2.87M
  }
644
645
  void computeKnownBits(const Value *V, KnownBits &Known,
646
44.8M
                        unsigned Depth, const Instruction *CxtI) const {
647
44.8M
    llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
648
44.8M
  }
649
650
  KnownBits computeKnownBits(const Value *V, unsigned Depth,
651
60.2M
                             const Instruction *CxtI) const {
652
60.2M
    return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
653
60.2M
  }
654
655
  bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
656
                              unsigned Depth = 0,
657
79.9k
                              const Instruction *CxtI = nullptr) {
658
79.9k
    return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
659
79.9k
  }
660
661
  bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
662
2.08M
                         const Instruction *CxtI = nullptr) const {
663
2.08M
    return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
664
2.08M
  }
665
666
  unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
667
1.12M
                              const Instruction *CxtI = nullptr) const {
668
1.12M
    return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
669
1.12M
  }
670
671
  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
672
                                               const Value *RHS,
673
897k
                                               const Instruction *CxtI) const {
674
897k
    return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
675
897k
  }
676
677
  OverflowResult computeOverflowForSignedMul(const Value *LHS,
678
                                             const Value *RHS,
679
410k
                                             const Instruction *CxtI) const {
680
410k
    return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
681
410k
  }
682
683
  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
684
                                               const Value *RHS,
685
3.90M
                                               const Instruction *CxtI) const {
686
3.90M
    return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
687
3.90M
  }
688
689
  OverflowResult computeOverflowForSignedAdd(const Value *LHS,
690
                                             const Value *RHS,
691
2.26M
                                             const Instruction *CxtI) const {
692
2.26M
    return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
693
2.26M
  }
694
695
  OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
696
                                               const Value *RHS,
697
1.39M
                                               const Instruction *CxtI) const {
698
1.39M
    return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
699
1.39M
  }
700
701
  OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
702
848k
                                             const Instruction *CxtI) const {
703
848k
    return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
704
848k
  }
705
706
  OverflowResult computeOverflow(
707
      Instruction::BinaryOps BinaryOp, bool IsSigned,
708
      Value *LHS, Value *RHS, Instruction *CxtI) const;
709
710
  /// Maximum size of array considered when transforming.
711
  uint64_t MaxArraySizeForCombine;
712
713
private:
714
  /// Performs a few simplifications for operators which are associative
715
  /// or commutative.
716
  bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
717
718
  /// Tries to simplify binary operations which some other binary
719
  /// operation distributes over.
720
  ///
721
  /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
722
  /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
723
  /// & (B | C) -> (A&B) | (A&C)" if this is a win).  Returns the simplified
724
  /// value, or null if it didn't simplify.
725
  Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
726
727
  /// Tries to simplify add operations using the definition of remainder.
728
  ///
729
  /// The definition of remainder is X % C = X - (X / C ) * C. The add
730
  /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
731
  /// X % (C0 * C1)
732
  Value *SimplifyAddWithRemainder(BinaryOperator &I);
733
734
  // Binary Op helper for select operations where the expression can be
735
  // efficiently reorganized.
736
  Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
737
                                        Value *RHS);
738
739
  /// This tries to simplify binary operations by factorizing out common terms
740
  /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
741
  Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *,
742
                          Value *, Value *, Value *);
743
744
  /// Match a select chain which produces one of three values based on whether
745
  /// the LHS is less than, equal to, or greater than RHS respectively.
746
  /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
747
  /// Equal and Greater values are saved in the matching process and returned to
748
  /// the caller.
749
  bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
750
                               ConstantInt *&Less, ConstantInt *&Equal,
751
                               ConstantInt *&Greater);
752
753
  /// Attempts to replace V with a simpler value based on the demanded
754
  /// bits.
755
  Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
756
                                 unsigned Depth, Instruction *CxtI);
757
  bool SimplifyDemandedBits(Instruction *I, unsigned Op,
758
                            const APInt &DemandedMask, KnownBits &Known,
759
                            unsigned Depth = 0);
760
761
  /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
762
  /// bits. It also tries to handle simplifications that can be done based on
763
  /// DemandedMask, but without modifying the Instruction.
764
  Value *SimplifyMultipleUseDemandedBits(Instruction *I,
765
                                         const APInt &DemandedMask,
766
                                         KnownBits &Known,
767
                                         unsigned Depth, Instruction *CxtI);
768
769
  /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
770
  /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
771
  Value *simplifyShrShlDemandedBits(
772
      Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
773
      const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
774
775
  /// Tries to simplify operands to an integer instruction based on its
776
  /// demanded bits.
777
  bool SimplifyDemandedInstructionBits(Instruction &Inst);
778
779
  Value *simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
780
                                               APInt DemandedElts,
781
                                               int DmaskIdx = -1);
782
783
  Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
784
                                    APInt &UndefElts, unsigned Depth = 0);
785
786
  /// Canonicalize the position of binops relative to shufflevector.
787
  Instruction *foldVectorBinop(BinaryOperator &Inst);
788
789
  /// Given a binary operator, cast instruction, or select which has a PHI node
790
  /// as operand #0, see if we can fold the instruction into the PHI (which is
791
  /// only possible if all operands to the PHI are constants).
792
  Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
793
794
  /// Given an instruction with a select as one operand and a constant as the
795
  /// other operand, try to fold the binary operator into the select arguments.
796
  /// This also works for Cast instructions, which obviously do not have a
797
  /// second operand.
798
  Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
799
800
  /// This is a convenience wrapper function for the above two functions.
801
  Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
802
803
  Instruction *foldAddWithConstant(BinaryOperator &Add);
804
805
  /// Try to rotate an operation below a PHI node, using PHI nodes for
806
  /// its operands.
807
  Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
808
  Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
809
  Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
810
  Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
811
  Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
812
813
  /// If an integer typed PHI has only one use which is an IntToPtr operation,
814
  /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
815
  /// insert a new pointer typed PHI and replace the original one.
816
  Instruction *FoldIntegerTypedPHI(PHINode &PN);
817
818
  /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
819
  /// folded operation.
820
  void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
821
822
  Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
823
                           ICmpInst::Predicate Cond, Instruction &I);
824
  Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca,
825
                             const Value *Other);
826
  Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
827
                                            GlobalVariable *GV, CmpInst &ICI,
828
                                            ConstantInt *AndCst = nullptr);
829
  Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
830
                                    Constant *RHSC);
831
  Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
832
                                  ICmpInst::Predicate Pred);
833
  Instruction *foldICmpWithCastAndCast(ICmpInst &ICI);
834
835
  Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
836
  Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp);
837
  Instruction *foldICmpWithConstant(ICmpInst &Cmp);
838
  Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
839
  Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
840
  Instruction *foldICmpBinOp(ICmpInst &Cmp);
841
  Instruction *foldICmpEquality(ICmpInst &Cmp);
842
  Instruction *foldICmpWithZero(ICmpInst &Cmp);
843
844
  Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
845
                                      ConstantInt *C);
846
  Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
847
                                     const APInt &C);
848
  Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
849
                                   const APInt &C);
850
  Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
851
                                   const APInt &C);
852
  Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
853
                                  const APInt &C);
854
  Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
855
                                   const APInt &C);
856
  Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
857
                                   const APInt &C);
858
  Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
859
                                   const APInt &C);
860
  Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
861
                                    const APInt &C);
862
  Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
863
                                   const APInt &C);
864
  Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
865
                                   const APInt &C);
866
  Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
867
                                   const APInt &C);
868
  Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
869
                                     const APInt &C1);
870
  Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
871
                                const APInt &C1, const APInt &C2);
872
  Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
873
                                     const APInt &C2);
874
  Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
875
                                     const APInt &C2);
876
877
  Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
878
                                                 BinaryOperator *BO,
879
                                                 const APInt &C);
880
  Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
881
                                             const APInt &C);
882
  Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
883
                                               const APInt &C);
884
885
  // Helpers of visitSelectInst().
886
  Instruction *foldSelectExtConst(SelectInst &Sel);
887
  Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
888
  Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
889
  Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
890
                            Value *A, Value *B, Instruction &Outer,
891
                            SelectPatternFlavor SPF2, Value *C);
892
  Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
893
894
  Instruction *OptAndOp(BinaryOperator *Op, ConstantInt *OpRHS,
895
                        ConstantInt *AndRHS, BinaryOperator &TheAnd);
896
897
  Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
898
                         bool isSigned, bool Inside);
899
  Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
900
  bool mergeStoreIntoSuccessor(StoreInst &SI);
901
902
  /// Given an 'or' instruction, check to see if it is part of a bswap idiom.
903
  /// If so, return the equivalent bswap intrinsic.
904
  Instruction *matchBSwap(BinaryOperator &Or);
905
906
  Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
907
  Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
908
909
  Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
910
911
  /// Returns a value X such that Val = X * Scale, or null if none.
912
  ///
913
  /// If the multiplication is known not to overflow then NoSignedWrap is set.
914
  Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
915
};
916
917
} // end namespace llvm
918
919
#undef DEBUG_TYPE
920
921
#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H