Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/include/llvm/Transforms/Utils/Local.h
Line
Count
Source
1
//===-- Local.h - Functions to perform local transformations ----*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This family of functions perform various local transformations to the
11
// program.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
16
#define LLVM_TRANSFORMS_UTILS_LOCAL_H
17
18
#include "llvm/ADT/SmallPtrSet.h"
19
#include "llvm/ADT/TinyPtrVector.h"
20
#include "llvm/Analysis/AliasAnalysis.h"
21
#include "llvm/IR/DataLayout.h"
22
#include "llvm/IR/Dominators.h"
23
#include "llvm/IR/GetElementPtrTypeIterator.h"
24
#include "llvm/IR/IRBuilder.h"
25
#include "llvm/IR/Operator.h"
26
27
namespace llvm {
28
29
class User;
30
class BasicBlock;
31
class Function;
32
class BranchInst;
33
class Instruction;
34
class CallInst;
35
class DbgDeclareInst;
36
class DbgInfoIntrinsic;
37
class DbgValueInst;
38
class StoreInst;
39
class LoadInst;
40
class Value;
41
class PHINode;
42
class AllocaInst;
43
class AssumptionCache;
44
class ConstantExpr;
45
class DataLayout;
46
class TargetLibraryInfo;
47
class TargetTransformInfo;
48
class DIBuilder;
49
class DominatorTree;
50
class LazyValueInfo;
51
52
template<typename T> class SmallVectorImpl;
53
54
/// A set of parameters used to control the transforms in the SimplifyCFG pass.
55
/// Options may change depending on the position in the optimization pipeline.
56
/// For example, canonical form that includes switches and branches may later be
57
/// replaced by lookup tables and selects.
58
struct SimplifyCFGOptions {
59
  int BonusInstThreshold;
60
  bool ConvertSwitchToLookupTable;
61
  bool NeedCanonicalLoop;
62
63
  SimplifyCFGOptions(int BonusThreshold = 1, bool SwitchToLookup = false,
64
                     bool CanonicalLoops = true)
65
      : BonusInstThreshold(BonusThreshold),
66
        ConvertSwitchToLookupTable(SwitchToLookup),
67
4.73M
        NeedCanonicalLoop(CanonicalLoops) {}
68
};
69
70
//===----------------------------------------------------------------------===//
71
//  Local constant propagation.
72
//
73
74
/// If a terminator instruction is predicated on a constant value, convert it
75
/// into an unconditional branch to the constant destination.
76
/// This is a nontrivial operation because the successors of this basic block
77
/// must have their PHI nodes updated.
78
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
79
/// conditions and indirectbr addresses this might make dead if
80
/// DeleteDeadConditions is true.
81
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
82
                            const TargetLibraryInfo *TLI = nullptr,
83
                            DominatorTree *DT = nullptr);
84
85
//===----------------------------------------------------------------------===//
86
//  Local dead code elimination.
87
//
88
89
/// Return true if the result produced by the instruction is not used, and the
90
/// instruction has no side effects.
91
bool isInstructionTriviallyDead(Instruction *I,
92
                                const TargetLibraryInfo *TLI = nullptr);
93
94
/// Return true if the result produced by the instruction would have no side
95
/// effects if it was not used. This is equivalent to checking whether
96
/// isInstructionTriviallyDead would be true if the use count was 0.
97
bool wouldInstructionBeTriviallyDead(Instruction *I,
98
                                     const TargetLibraryInfo *TLI = nullptr);
99
100
/// If the specified value is a trivially dead instruction, delete it.
101
/// If that makes any of its operands trivially dead, delete them too,
102
/// recursively. Return true if any instructions were deleted.
103
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
104
                                        const TargetLibraryInfo *TLI = nullptr);
105
106
/// If the specified value is an effectively dead PHI node, due to being a
107
/// def-use chain of single-use nodes that either forms a cycle or is terminated
108
/// by a trivially dead instruction, delete it. If that makes any of its
109
/// operands trivially dead, delete them too, recursively. Return true if a
110
/// change was made.
111
bool RecursivelyDeleteDeadPHINode(PHINode *PN,
112
                                  const TargetLibraryInfo *TLI = nullptr);
113
114
/// Scan the specified basic block and try to simplify any instructions in it
115
/// and recursively delete dead instructions.
116
///
117
/// This returns true if it changed the code, note that it can delete
118
/// instructions in other blocks as well in this block.
119
bool SimplifyInstructionsInBlock(BasicBlock *BB,
120
                                 const TargetLibraryInfo *TLI = nullptr);
121
122
//===----------------------------------------------------------------------===//
123
//  Control Flow Graph Restructuring.
124
//
125
126
/// Like BasicBlock::removePredecessor, this method is called when we're about
127
/// to delete Pred as a predecessor of BB. If BB contains any PHI nodes, this
128
/// drops the entries in the PHI nodes for Pred.
129
///
130
/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
131
/// nodes that collapse into identity values.  For example, if we have:
132
///   x = phi(1, 0, 0, 0)
133
///   y = and x, z
134
///
135
/// .. and delete the predecessor corresponding to the '1', this will attempt to
136
/// recursively fold the 'and' to 0.
137
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
138
                                  DominatorTree *DT = nullptr);
139
140
/// BB is a block with one predecessor and its predecessor is known to have one
141
/// successor (BB!). Eliminate the edge between them, moving the instructions in
142
/// the predecessor into BB. This deletes the predecessor block.
143
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
144
145
/// BB is known to contain an unconditional branch, and contains no instructions
146
/// other than PHI nodes, potential debug intrinsics and the branch. If
147
/// possible, eliminate BB by rewriting all the predecessors to branch to the
148
/// successor block and return true. If we can't transform, return false.
149
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
150
                                             DominatorTree *DT = nullptr);
151
152
/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
153
/// to be clever about PHI nodes which differ only in the order of the incoming
154
/// values, but instcombine orders them so it usually won't matter.
155
bool EliminateDuplicatePHINodes(BasicBlock *BB);
156
157
/// This function is used to do simplification of a CFG.  For example, it
158
/// adjusts branches to branches to eliminate the extra hop, it eliminates
159
/// unreachable basic blocks, and does other peephole optimization of the CFG.
160
/// It returns true if a modification was made, possibly deleting the basic
161
/// block that was pointed to. LoopHeaders is an optional input parameter
162
/// providing the set of loop headers that SimplifyCFG should not eliminate.
163
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
164
                 AssumptionCache *AC = nullptr,
165
                 const SimplifyCFGOptions &Options = {},
166
                 SmallPtrSetImpl<BasicBlock *> *LoopHeaders = nullptr);
167
168
/// This function is used to flatten a CFG. For example, it uses parallel-and
169
/// and parallel-or mode to collapse if-conditions and merge if-regions with
170
/// identical statements.
171
bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
172
173
/// If this basic block is ONLY a setcc and a branch, and if a predecessor
174
/// branches to us and one of our successors, fold the setcc into the
175
/// predecessor and use logical operations to pick the right destination.
176
bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
177
178
/// This function takes a virtual register computed by an Instruction and
179
/// replaces it with a slot in the stack frame, allocated via alloca.
180
/// This allows the CFG to be changed around without fear of invalidating the
181
/// SSA information for the value. It returns the pointer to the alloca inserted
182
/// to create a stack slot for X.
183
AllocaInst *DemoteRegToStack(Instruction &X,
184
                             bool VolatileLoads = false,
185
                             Instruction *AllocaPoint = nullptr);
186
187
/// This function takes a virtual register computed by a phi node and replaces
188
/// it with a slot in the stack frame, allocated via alloca. The phi node is
189
/// deleted and it returns the pointer to the alloca inserted.
190
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
191
192
/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
193
/// the owning object can be modified and has an alignment less than \p
194
/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
195
/// cannot be increased, the known alignment of the value is returned.
196
///
197
/// It is not always possible to modify the alignment of the underlying object,
198
/// so if alignment is important, a more reliable approach is to simply align
199
/// all global variables and allocation instructions to their preferred
200
/// alignment from the beginning.
201
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
202
                                    const DataLayout &DL,
203
                                    const Instruction *CxtI = nullptr,
204
                                    AssumptionCache *AC = nullptr,
205
                                    const DominatorTree *DT = nullptr);
206
207
/// Try to infer an alignment for the specified pointer.
208
static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
209
                                         const Instruction *CxtI = nullptr,
210
                                         AssumptionCache *AC = nullptr,
211
617k
                                         const DominatorTree *DT = nullptr) {
212
617k
  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
213
617k
}
Unexecuted instantiation: AMDGPUUnifyDivergentExitNodes.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SIAnnotateControlFlow.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: HexagonCommonGEP.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: HexagonLoopIdiomRecognition.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: PPCCTRLoops.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: PPCLoopPreIncPrep.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: MemoryBuiltins.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
CodeGenPrepare.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Line
Count
Source
211
1.85k
                                         const DominatorTree *DT = nullptr) {
212
1.85k
  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
213
1.85k
}
Unexecuted instantiation: DwarfEHPrepare.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SafeStack.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SjLjEHPrepare.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: WinEHPrepare.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: ConstantHoisting.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: ConstantProp.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: CorrelatedValuePropagation.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: DCE.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: DeadStoreElimination.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: EarlyCSE.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: FlattenCFGPass.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: GVN.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: GVNHoist.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: GVNSink.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: IndVarSimplify.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InferAddressSpaces.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: JumpThreading.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LICM.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopSink.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopDataPrefetch.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopIdiomRecognize.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopInstSimplify.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopRerollPass.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopRotation.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopSimplifyCFG.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopStrengthReduce.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopUnswitch.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: MemCpyOptimizer.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: NaryReassociate.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: NewGVN.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: PlaceSafepoints.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: Reassociate.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: Reg2Mem.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: RewriteStatepointsForGC.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SCCP.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SROA.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SeparateConstOffsetFromGEP.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SimplifyCFGPass.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: StraightLineStrengthReduce.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: TailRecursionElimination.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: BasicBlockUtils.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: BypassSlowDivision.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: CloneFunction.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: DemoteRegToStack.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: EscapeEnumerator.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: FlattenCFG.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
InlineFunction.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Line
Count
Source
211
18
                                         const DominatorTree *DT = nullptr) {
212
18
  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
213
18
}
Unexecuted instantiation: Local.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopSimplify.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopUnroll.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: PromoteMemoryToRegister.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SimplifyCFG.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: SimplifyInstructions.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
SimplifyLibCalls.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Line
Count
Source
211
1.59k
                                         const DominatorTree *DT = nullptr) {
212
1.59k
  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
213
1.59k
}
Unexecuted instantiation: LoadStoreVectorizer.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: LoopVectorize.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: GlobalOpt.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: Inliner.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: PruneEH.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstructionCombining.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineAddSub.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineAndOrXor.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
InstCombineCalls.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Line
Count
Source
211
614k
                                         const DominatorTree *DT = nullptr) {
212
614k
  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
213
614k
}
Unexecuted instantiation: InstCombineCasts.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineCompares.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineLoadStoreAlloca.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineMulDivRem.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombinePHI.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineSelect.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineShifts.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineSimplifyDemanded.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
Unexecuted instantiation: InstCombineVectorOps.cpp:llvm::getKnownAlignment(llvm::Value*, llvm::DataLayout const&, llvm::Instruction const*, llvm::AssumptionCache*, llvm::DominatorTree const*)
214
215
/// Given a getelementptr instruction/constantexpr, emit the code necessary to
216
/// compute the offset from the base pointer (without adding in the base
217
/// pointer). Return the result as a signed integer of intptr size.
218
/// When NoAssumptions is true, no assumptions about index computation not
219
/// overflowing is made.
220
template <typename IRBuilderTy>
221
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
222
111
                     bool NoAssumptions = false) {
223
111
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
224
111
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
225
111
  Value *Result = Constant::getNullValue(IntPtrTy);
226
111
227
111
  // If the GEP is inbounds, we know that none of the addressing operations will
228
111
  // overflow in an unsigned sense.
229
103
  bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
230
111
231
111
  // Build a mask for high order bits.
232
111
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
233
111
  uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
234
111
235
111
  gep_type_iterator GTI = gep_type_begin(GEP);
236
277
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
237
166
       
++i, ++GTI166
) {
238
166
    Value *Op = *i;
239
166
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
240
166
    if (Constant *
OpC166
= dyn_cast<Constant>(Op)) {
241
80
      if (OpC->isZeroValue())
242
50
        continue;
243
80
244
80
      // Handle a struct index, which adds its field offset to the pointer.
245
30
      
if (StructType *30
STy30
= GTI.getStructTypeOrNull()) {
246
6
        if (OpC->getType()->isVectorTy())
247
2
          OpC = OpC->getSplatValue();
248
6
249
6
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
250
6
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
251
6
252
6
        if (Size)
253
6
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
254
6
                                      GEP->getName()+".offs");
255
6
        continue;
256
6
      }
257
30
258
24
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
259
24
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
260
24
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
261
24
      // Emit an add instruction.
262
24
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
263
24
      continue;
264
80
    }
265
166
    // Convert to correct type.
266
86
    
if (86
Op->getType() != IntPtrTy86
)
267
1
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
268
86
    if (
Size != 186
) {
269
28
      // We'll let instcombine(mul) convert this to a shl if possible.
270
28
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
271
28
                              GEP->getName()+".idx", isInBounds /*NUW*/);
272
28
    }
273
166
274
166
    // Emit an add instruction.
275
166
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
276
166
  }
277
111
  return Result;
278
111
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderDefaultInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
222
10
                     bool NoAssumptions = false) {
223
10
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
224
10
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
225
10
  Value *Result = Constant::getNullValue(IntPtrTy);
226
10
227
10
  // If the GEP is inbounds, we know that none of the addressing operations will
228
10
  // overflow in an unsigned sense.
229
10
  bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
230
10
231
10
  // Build a mask for high order bits.
232
10
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
233
10
  uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
234
10
235
10
  gep_type_iterator GTI = gep_type_begin(GEP);
236
22
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
237
12
       
++i, ++GTI12
) {
238
12
    Value *Op = *i;
239
12
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
240
12
    if (Constant *
OpC12
= dyn_cast<Constant>(Op)) {
241
8
      if (OpC->isZeroValue())
242
2
        continue;
243
8
244
8
      // Handle a struct index, which adds its field offset to the pointer.
245
6
      
if (StructType *6
STy6
= GTI.getStructTypeOrNull()) {
246
0
        if (OpC->getType()->isVectorTy())
247
0
          OpC = OpC->getSplatValue();
248
0
249
0
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
250
0
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
251
0
252
0
        if (Size)
253
0
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
254
0
                                      GEP->getName()+".offs");
255
0
        continue;
256
0
      }
257
6
258
6
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
259
6
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
260
6
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
261
6
      // Emit an add instruction.
262
6
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
263
6
      continue;
264
8
    }
265
12
    // Convert to correct type.
266
4
    
if (4
Op->getType() != IntPtrTy4
)
267
1
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
268
4
    if (
Size != 14
) {
269
2
      // We'll let instcombine(mul) convert this to a shl if possible.
270
2
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
271
2
                              GEP->getName()+".idx", isInBounds /*NUW*/);
272
2
    }
273
12
274
12
    // Emit an add instruction.
275
12
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
276
12
  }
277
10
  return Result;
278
10
}
llvm::Value* llvm::EmitGEPOffset<llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter> >(llvm::IRBuilder<llvm::TargetFolder, llvm::IRBuilderCallbackInserter>*, llvm::DataLayout const&, llvm::User*, bool)
Line
Count
Source
222
101
                     bool NoAssumptions = false) {
223
101
  GEPOperator *GEPOp = cast<GEPOperator>(GEP);
224
101
  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
225
101
  Value *Result = Constant::getNullValue(IntPtrTy);
226
101
227
101
  // If the GEP is inbounds, we know that none of the addressing operations will
228
101
  // overflow in an unsigned sense.
229
93
  bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
230
101
231
101
  // Build a mask for high order bits.
232
101
  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
233
101
  uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
234
101
235
101
  gep_type_iterator GTI = gep_type_begin(GEP);
236
255
  for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
237
154
       
++i, ++GTI154
) {
238
154
    Value *Op = *i;
239
154
    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
240
154
    if (Constant *
OpC154
= dyn_cast<Constant>(Op)) {
241
72
      if (OpC->isZeroValue())
242
48
        continue;
243
72
244
72
      // Handle a struct index, which adds its field offset to the pointer.
245
24
      
if (StructType *24
STy24
= GTI.getStructTypeOrNull()) {
246
6
        if (OpC->getType()->isVectorTy())
247
2
          OpC = OpC->getSplatValue();
248
6
249
6
        uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
250
6
        Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
251
6
252
6
        if (Size)
253
6
          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
254
6
                                      GEP->getName()+".offs");
255
6
        continue;
256
6
      }
257
24
258
18
      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
259
18
      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
260
18
      Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
261
18
      // Emit an add instruction.
262
18
      Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
263
18
      continue;
264
72
    }
265
154
    // Convert to correct type.
266
82
    
if (82
Op->getType() != IntPtrTy82
)
267
0
      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
268
82
    if (
Size != 182
) {
269
26
      // We'll let instcombine(mul) convert this to a shl if possible.
270
26
      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
271
26
                              GEP->getName()+".idx", isInBounds /*NUW*/);
272
26
    }
273
154
274
154
    // Emit an add instruction.
275
154
    Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
276
154
  }
277
101
  return Result;
278
101
}
279
280
///===---------------------------------------------------------------------===//
281
///  Dbg Intrinsic utilities
282
///
283
284
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
285
/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
286
void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
287
                                     StoreInst *SI, DIBuilder &Builder);
288
289
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
290
/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
291
void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
292
                                     LoadInst *LI, DIBuilder &Builder);
293
294
/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
295
/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
296
void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
297
                                     PHINode *LI, DIBuilder &Builder);
298
299
/// Lowers llvm.dbg.declare intrinsics into appropriate set of
300
/// llvm.dbg.value intrinsics.
301
bool LowerDbgDeclare(Function &F);
302
303
/// Finds all intrinsics declaring local variables as living in the memory that
304
/// 'V' points to. This may include a mix of dbg.declare and
305
/// dbg.addr intrinsics.
306
TinyPtrVector<DbgInfoIntrinsic *> FindDbgAddrUses(Value *V);
307
308
/// Finds the llvm.dbg.value intrinsics describing a value.
309
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
310
311
/// Replaces llvm.dbg.declare instruction when the address it describes
312
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
313
/// prepended to the expression. If Offset is non-zero, a constant displacement
314
/// is added to the expression (after the optional Deref). Offset can be
315
/// negative.
316
bool replaceDbgDeclare(Value *Address, Value *NewAddress,
317
                       Instruction *InsertBefore, DIBuilder &Builder,
318
                       bool Deref, int Offset);
319
320
/// Replaces llvm.dbg.declare instruction when the alloca it describes
321
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
322
/// prepended to the expression. If Offset is non-zero, a constant displacement
323
/// is added to the expression (after the optional Deref). Offset can be
324
/// negative. New llvm.dbg.declare is inserted immediately before AI.
325
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
326
                                DIBuilder &Builder, bool Deref, int Offset = 0);
327
328
/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
329
/// is replaced with a new value. If Offset is non-zero, a constant displacement
330
/// is added to the expression (after the mandatory Deref). Offset can be
331
/// negative. New llvm.dbg.value instructions are inserted at the locations of
332
/// the instructions they replace.
333
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
334
                              DIBuilder &Builder, int Offset = 0);
335
336
/// Assuming the instruction \p I is going to be deleted, attempt to salvage any
337
/// dbg.value intrinsics referring to \p I by rewriting its effect into a
338
/// DIExpression.
339
void salvageDebugInfo(Instruction &I);
340
341
/// Remove all instructions from a basic block other than it's terminator
342
/// and any present EH pad instructions.
343
unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
344
345
/// Insert an unreachable instruction before the specified
346
/// instruction, making it and the rest of the code in the block dead.
347
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
348
                             bool PreserveLCSSA = false,
349
                             DominatorTree *DT = nullptr);
350
351
/// Convert the CallInst to InvokeInst with the specified unwind edge basic
352
/// block.  This also splits the basic block where CI is located, because
353
/// InvokeInst is a terminator instruction.  Returns the newly split basic
354
/// block.
355
BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
356
                                             BasicBlock *UnwindEdge);
357
358
/// Replace 'BB's terminator with one that does not have an unwind successor
359
/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
360
/// successor.
361
///
362
/// \param BB  Block whose terminator will be replaced.  Its terminator must
363
///            have an unwind successor.
364
void removeUnwindEdge(BasicBlock *BB, DominatorTree *DT = nullptr);
365
366
/// Remove all blocks that can not be reached from the function's entry.
367
///
368
/// Returns true if any basic block was removed.
369
bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
370
                             DominatorTree *DT = nullptr);
371
372
/// Combine the metadata of two instructions so that K can replace J
373
///
374
/// Metadata not listed as known via KnownIDs is removed
375
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
376
377
/// Combine the metadata of two instructions so that K can replace J. This
378
/// specifically handles the case of CSE-like transformations.
379
///
380
/// Unknown metadata is removed.
381
void combineMetadataForCSE(Instruction *K, const Instruction *J);
382
383
// Replace each use of 'From' with 'To', if that use does not belong to basic
384
// block where 'From' is defined. Returns the number of replacements made.
385
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
386
387
/// Replace each use of 'From' with 'To' if that use is dominated by
388
/// the given edge.  Returns the number of replacements made.
389
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
390
                                  const BasicBlockEdge &Edge);
391
/// Replace each use of 'From' with 'To' if that use is dominated by
392
/// the end of the given BasicBlock. Returns the number of replacements made.
393
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
394
                                  const BasicBlock *BB);
395
396
397
/// Return true if the CallSite CS calls a gc leaf function.
398
///
399
/// A leaf function is a function that does not safepoint the thread during its
400
/// execution.  During a call or invoke to such a function, the callers stack
401
/// does not have to be made parseable.
402
///
403
/// Most passes can and should ignore this information, and it is only used
404
/// during lowering by the GC infrastructure.
405
bool callsGCLeafFunction(ImmutableCallSite CS, const TargetLibraryInfo &TLI);
406
407
/// Copy a nonnull metadata node to a new load instruction.
408
///
409
/// This handles mapping it to range metadata if the new load is an integer
410
/// load instead of a pointer load.
411
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);
412
413
/// Copy a range metadata node to a new load instruction.
414
///
415
/// This handles mapping it to nonnull metadata if the new load is a pointer
416
/// load instead of an integer load and the range doesn't cover null.
417
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
418
                       LoadInst &NewLI);
419
420
//===----------------------------------------------------------------------===//
421
//  Intrinsic pattern matching
422
//
423
424
/// Try and match a bswap or bitreverse idiom.
425
///
426
/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
427
/// instructions are returned in \c InsertedInsts. They will all have been added
428
/// to a basic block.
429
///
430
/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
431
/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
432
/// to BW / 4 nodes to be searched, so is significantly faster.
433
///
434
/// This function returns true on a successful match or false otherwise.
435
bool recognizeBSwapOrBitReverseIdiom(
436
    Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
437
    SmallVectorImpl<Instruction *> &InsertedInsts);
438
439
//===----------------------------------------------------------------------===//
440
//  Sanitizer utilities
441
//
442
443
/// Given a CallInst, check if it calls a string function known to CodeGen,
444
/// and mark it with NoBuiltin if so.  To be used by sanitizers that intend
445
/// to intercept string functions and want to avoid converting them to target
446
/// specific instructions.
447
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
448
                                            const TargetLibraryInfo *TLI);
449
450
//===----------------------------------------------------------------------===//
451
//  Transform predicates
452
//
453
454
/// Given an instruction, is it legal to set operand OpIdx to a non-constant
455
/// value?
456
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
457
458
} // End llvm namespace
459
460
#endif