Coverage Report

Created: 2018-02-19 08:21

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/include/polly/Support/ScopHelper.h
Line
Count
Source (jump to first uncovered line)
1
//===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Small functions that help with LLVM-IR.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#ifndef POLLY_SUPPORT_IRHELPER_H
15
#define POLLY_SUPPORT_IRHELPER_H
16
17
#include "llvm/ADT/DenseMap.h"
18
#include "llvm/ADT/SetVector.h"
19
#include "llvm/IR/Instructions.h"
20
#include "llvm/IR/IntrinsicInst.h"
21
#include "llvm/IR/ValueHandle.h"
22
#include <tuple>
23
#include <vector>
24
25
namespace llvm {
26
class LoopInfo;
27
class Loop;
28
class ScalarEvolution;
29
class SCEV;
30
class Region;
31
class Pass;
32
class DominatorTree;
33
class RegionInfo;
34
class GetElementPtrInst;
35
} // namespace llvm
36
37
namespace polly {
38
class Scop;
39
40
/// Type to remap values.
41
using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>,
42
                                 llvm::AssertingVH<llvm::Value>>;
43
44
/// Type for a set of invariant loads.
45
using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>;
46
47
/// Set type for parameters.
48
using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>;
49
50
/// Set of loops (used to remember loops in non-affine subregions).
51
using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>;
52
53
/// Utility proxy to wrap the common members of LoadInst and StoreInst.
54
///
55
/// This works like the LLVM utility class CallSite, ie. it forwards all calls
56
/// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
57
/// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
58
/// MemTransferInst, etc. in that it offers a common interface, but does not act
59
/// as a fake base class.
60
/// It is similar to StringRef and ArrayRef in that it holds a pointer to the
61
/// referenced object and should be passed by-value as it is small enough.
62
///
63
/// This proxy can either represent a LoadInst instance, a StoreInst instance,
64
/// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
65
/// nullptr (only creatable using the default constructor); never an Instruction
66
/// that is neither of the above mentioned. When representing a nullptr, only
67
/// the following methods are defined:
68
/// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
69
/// operator bool(), operator!()
70
///
71
/// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
72
/// those from llvm/Support/Casting.h. Partial template function specialization
73
/// is currently not supported in C++ such that those cannot be used directly.
74
/// (llvm::isa could, but then llvm:cast etc. would not have the expected
75
/// behavior)
76
class MemAccInst {
77
private:
78
  llvm::Instruction *I;
79
80
public:
81
4.73k
  MemAccInst() : I(nullptr) {}
82
36.7k
  MemAccInst(const MemAccInst &Inst) : I(Inst.I) {}
83
0
  /* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {}
84
665
  /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
85
0
  /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
86
403
  /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
87
0
  /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
88
0
  /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
89
15.7k
  explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
90
4.89k
  explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
91
92
20.4k
  static bool isa(const llvm::Value &V) {
93
20.4k
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)12.5k
||
94
20.4k
           
llvm::isa<llvm::CallInst>(V)4.77k
||
llvm::isa<llvm::MemIntrinsic>(V)4.73k
;
95
20.4k
  }
96
1.60k
  static bool isa(const llvm::Value *V) {
97
1.60k
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)789
||
98
1.60k
           
llvm::isa<llvm::CallInst>(V)3
||
llvm::isa<llvm::MemIntrinsic>(V)1
;
99
1.60k
  }
100
0
  static MemAccInst cast(llvm::Value &V) {
101
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
102
0
  }
103
0
  static MemAccInst cast(llvm::Value *V) {
104
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
105
0
  }
106
0
  static MemAccInst cast_or_null(llvm::Value &V) {
107
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
108
0
  }
109
0
  static MemAccInst cast_or_null(llvm::Value *V) {
110
0
    if (!V)
111
0
      return MemAccInst();
112
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
113
0
  }
114
20.4k
  static MemAccInst dyn_cast(llvm::Value &V) {
115
20.4k
    if (isa(V))
116
15.7k
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
117
4.73k
    return MemAccInst();
118
4.73k
  }
119
1.60k
  static MemAccInst dyn_cast(llvm::Value *V) {
120
1.60k
    assert(V);
121
1.60k
    if (isa(V))
122
1.60k
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
123
1
    return MemAccInst();
124
1
  }
125
126
0
  MemAccInst &operator=(const MemAccInst &Inst) {
127
0
    I = Inst.I;
128
0
    return *this;
129
0
  }
130
0
  MemAccInst &operator=(llvm::LoadInst &LI) {
131
0
    I = &LI;
132
0
    return *this;
133
0
  }
134
0
  MemAccInst &operator=(llvm::LoadInst *LI) {
135
0
    I = LI;
136
0
    return *this;
137
0
  }
138
0
  MemAccInst &operator=(llvm::StoreInst &SI) {
139
0
    I = &SI;
140
0
    return *this;
141
0
  }
142
0
  MemAccInst &operator=(llvm::StoreInst *SI) {
143
0
    I = SI;
144
0
    return *this;
145
0
  }
146
0
  MemAccInst &operator=(llvm::MemIntrinsic &MI) {
147
0
    I = &MI;
148
0
    return *this;
149
0
  }
150
0
  MemAccInst &operator=(llvm::MemIntrinsic *MI) {
151
0
    I = MI;
152
0
    return *this;
153
0
  }
154
0
  MemAccInst &operator=(llvm::CallInst &CI) {
155
0
    I = &CI;
156
0
    return *this;
157
0
  }
158
0
  MemAccInst &operator=(llvm::CallInst *CI) {
159
0
    I = CI;
160
0
    return *this;
161
0
  }
162
163
22.5k
  llvm::Instruction *get() const {
164
22.5k
    assert(I && "Unexpected nullptr!");
165
22.5k
    return I;
166
22.5k
  }
167
23.3k
  operator llvm::Instruction *() const { return asInstruction(); }
168
22.5k
  llvm::Instruction *operator->() const { return get(); }
169
170
27.7k
  explicit operator bool() const { return isInstruction(); }
171
1.58k
  bool operator!() const { return isNull(); }
172
173
10.1k
  llvm::Value *getValueOperand() const {
174
10.1k
    if (isLoad())
175
4.99k
      return asLoad();
176
5.16k
    if (isStore())
177
5.14k
      return asStore()->getValueOperand();
178
24
    if (isMemIntrinsic())
179
24
      return nullptr;
180
0
    if (isCallInst())
181
0
      return nullptr;
182
0
    llvm_unreachable("Operation not supported on nullptr");
183
0
  }
184
28.1k
  llvm::Value *getPointerOperand() const {
185
28.1k
    if (isLoad())
186
14.1k
      return asLoad()->getPointerOperand();
187
14.0k
    if (isStore())
188
13.9k
      return asStore()->getPointerOperand();
189
63
    if (isMemIntrinsic())
190
18
      return asMemIntrinsic()->getRawDest();
191
45
    if (isCallInst())
192
45
      return nullptr;
193
0
    llvm_unreachable("Operation not supported on nullptr");
194
0
  }
195
196
5
  unsigned getAlignment() const {
197
5
    if (isLoad())
198
4
      return asLoad()->getAlignment();
199
1
    if (isStore())
200
1
      return asStore()->getAlignment();
201
0
    if (isMemTransferInst())
202
0
      return std::min(asMemTransferInst()->getDestAlignment(),
203
0
                      asMemTransferInst()->getSourceAlignment());
204
0
    if (isMemIntrinsic())
205
0
      return asMemIntrinsic()->getDestAlignment();
206
0
    if (isCallInst())
207
0
      return 0;
208
0
    llvm_unreachable("Operation not supported on nullptr");
209
0
  }
210
0
  bool isVolatile() const {
211
0
    if (isLoad())
212
0
      return asLoad()->isVolatile();
213
0
    if (isStore())
214
0
      return asStore()->isVolatile();
215
0
    if (isMemIntrinsic())
216
0
      return asMemIntrinsic()->isVolatile();
217
0
    if (isCallInst())
218
0
      return false;
219
0
    llvm_unreachable("Operation not supported on nullptr");
220
0
  }
221
12.3k
  bool isSimple() const {
222
12.3k
    if (isLoad())
223
6.43k
      return asLoad()->isSimple();
224
5.93k
    if (isStore())
225
5.93k
      return asStore()->isSimple();
226
0
    if (isMemIntrinsic())
227
0
      return !asMemIntrinsic()->isVolatile();
228
0
    if (isCallInst())
229
0
      return true;
230
0
    llvm_unreachable("Operation not supported on nullptr");
231
0
  }
232
0
  llvm::AtomicOrdering getOrdering() const {
233
0
    if (isLoad())
234
0
      return asLoad()->getOrdering();
235
0
    if (isStore())
236
0
      return asStore()->getOrdering();
237
0
    if (isMemIntrinsic())
238
0
      return llvm::AtomicOrdering::NotAtomic;
239
0
    if (isCallInst())
240
0
      return llvm::AtomicOrdering::NotAtomic;
241
0
    llvm_unreachable("Operation not supported on nullptr");
242
0
  }
243
0
  bool isUnordered() const {
244
0
    if (isLoad())
245
0
      return asLoad()->isUnordered();
246
0
    if (isStore())
247
0
      return asStore()->isUnordered();
248
0
    // Copied from the Load/Store implementation of isUnordered:
249
0
    if (isMemIntrinsic())
250
0
      return !asMemIntrinsic()->isVolatile();
251
0
    if (isCallInst())
252
0
      return true;
253
0
    llvm_unreachable("Operation not supported on nullptr");
254
0
  }
255
256
2.37k
  bool isNull() const { return !I; }
257
27.7k
  bool isInstruction() const { return I; }
258
259
67.1k
  llvm::Instruction *asInstruction() const { return I; }
260
261
private:
262
50.7k
  bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
263
25.1k
  bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
264
45
  bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
265
87
  bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
266
0
  bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
267
0
  bool isMemTransferInst() const {
268
0
    return I && llvm::isa<llvm::MemTransferInst>(I);
269
0
  }
270
271
25.5k
  llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
272
25.0k
  llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
273
0
  llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
274
18
  llvm::MemIntrinsic *asMemIntrinsic() const {
275
18
    return llvm::cast<llvm::MemIntrinsic>(I);
276
18
  }
277
0
  llvm::MemSetInst *asMemSetInst() const {
278
0
    return llvm::cast<llvm::MemSetInst>(I);
279
0
  }
280
0
  llvm::MemTransferInst *asMemTransferInst() const {
281
0
    return llvm::cast<llvm::MemTransferInst>(I);
282
0
  }
283
};
284
} // namespace polly
285
286
namespace llvm {
287
/// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
288
///        from a MemAccInst object.
289
template <> struct simplify_type<polly::MemAccInst> {
290
  typedef Instruction *SimpleType;
291
43.8k
  static SimpleType getSimplifiedValue(polly::MemAccInst &I) {
292
43.8k
    return I.asInstruction();
293
43.8k
  }
294
};
295
} // namespace llvm
296
297
namespace polly {
298
299
/// Simplify the region to have a single unconditional entry edge and a
300
/// single exit edge.
301
///
302
/// Although this function allows DT and RI to be null, regions only work
303
/// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
304
/// up-to-date.
305
///
306
/// @param R  The region to be simplified
307
/// @param DT DominatorTree to be updated.
308
/// @param LI LoopInfo to be updated.
309
/// @param RI RegionInfo to be updated.
310
void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT,
311
                    llvm::LoopInfo *LI, llvm::RegionInfo *RI);
312
313
/// Split the entry block of a function to store the newly inserted
314
///        allocations outside of all Scops.
315
///
316
/// @param EntryBlock The entry block of the current function.
317
/// @param P          The pass that currently running.
318
///
319
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P);
320
321
/// Split the entry block of a function to store the newly inserted
322
///        allocations outside of all Scops.
323
///
324
/// @param DT DominatorTree to be updated.
325
/// @param LI LoopInfo to be updated.
326
/// @param RI RegionInfo to be updated.
327
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock,
328
                              llvm::DominatorTree *DT, llvm::LoopInfo *LI,
329
                              llvm::RegionInfo *RI);
330
331
/// Wrapper for SCEVExpander extended to all Polly features.
332
///
333
/// This wrapper will internally call the SCEVExpander but also makes sure that
334
/// all additional features not represented in SCEV (e.g., SDiv/SRem are not
335
/// black boxes but can be part of the function) will be expanded correctly.
336
///
337
/// The parameters are the same as for the creation of a SCEVExpander as well
338
/// as the call to SCEVExpander::expandCodeFor:
339
///
340
/// @param S     The current Scop.
341
/// @param SE    The Scalar Evolution pass.
342
/// @param DL    The module data layout.
343
/// @param Name  The suffix added to the new instruction names.
344
/// @param E     The expression for which code is actually generated.
345
/// @param Ty    The type of the resulting code.
346
/// @param IP    The insertion point for the new code.
347
/// @param VMap  A remapping of values used in @p E.
348
/// @param RTCBB The last block of the RTC. Used to insert loop-invariant
349
///              instructions in rare cases.
350
llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE,
351
                           const llvm::DataLayout &DL, const char *Name,
352
                           const llvm::SCEV *E, llvm::Type *Ty,
353
                           llvm::Instruction *IP, ValueMapT *VMap,
354
                           llvm::BasicBlock *RTCBB);
355
356
/// Check if the block is a error block.
357
///
358
/// A error block is currently any block that fulfills at least one of
359
/// the following conditions:
360
///
361
///  - It is terminated by an unreachable instruction
362
///  - It contains a call to a non-pure function that is not immediately
363
///    dominated by a loop header and that does not dominate the region exit.
364
///    This is a heuristic to pick only error blocks that are conditionally
365
///    executed and can be assumed to be not executed at all without the domains
366
///    being available.
367
///
368
/// @param BB The block to check.
369
/// @param R  The analyzed region.
370
/// @param LI The loop info analysis.
371
/// @param DT The dominator tree of the function.
372
///
373
/// @return True if the block is a error block, false otherwise.
374
bool isErrorBlock(llvm::BasicBlock &BB, const llvm::Region &R,
375
                  llvm::LoopInfo &LI, const llvm::DominatorTree &DT);
376
377
/// Return the condition for the terminator @p TI.
378
///
379
/// For unconditional branches the "i1 true" condition will be returned.
380
///
381
/// @param TI The terminator to get the condition from.
382
///
383
/// @return The condition of @p TI and nullptr if none could be extracted.
384
llvm::Value *getConditionFromTerminator(llvm::TerminatorInst *TI);
385
386
/// Check if @p LInst can be hoisted in @p R.
387
///
388
/// @param LInst The load to check.
389
/// @param R     The analyzed region.
390
/// @param LI    The loop info.
391
/// @param SE    The scalar evolution analysis.
392
/// @param DT    The dominator tree of the function.
393
///
394
/// @return True if @p LInst can be hoisted in @p R.
395
bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI,
396
                     llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT);
397
398
/// Return true iff @p V is an intrinsic that we ignore during code
399
///        generation.
400
bool isIgnoredIntrinsic(const llvm::Value *V);
401
402
/// Check whether a value an be synthesized by the code generator.
403
///
404
/// Some value will be recalculated only from information that is code generated
405
/// from the polyhedral representation. For such instructions we do not need to
406
/// ensure that their operands are available during code generation.
407
///
408
/// @param V The value to check.
409
/// @param S The current SCoP.
410
/// @param SE The scalar evolution database.
411
/// @param Scope Location where the value would by synthesized.
412
/// @return If the instruction I can be regenerated from its
413
///         scalar evolution representation, return true,
414
///         otherwise return false.
415
bool canSynthesize(const llvm::Value *V, const Scop &S,
416
                   llvm::ScalarEvolution *SE, llvm::Loop *Scope);
417
418
/// Return the block in which a value is used.
419
///
420
/// For normal instructions, this is the instruction's parent block. For PHI
421
/// nodes, this is the incoming block of that use, because this is where the
422
/// operand must be defined (i.e. its definition dominates this block).
423
/// Non-instructions do not use operands at a specific point such that in this
424
/// case this function returns nullptr.
425
llvm::BasicBlock *getUseBlock(const llvm::Use &U);
426
427
/// Derive the individual index expressions from a GEP instruction.
428
///
429
/// This function optimistically assumes the GEP references into a fixed size
430
/// array. If this is actually true, this function returns a list of array
431
/// subscript expressions as SCEV as well as a list of integers describing
432
/// the size of the individual array dimensions. Both lists have either equal
433
/// length or the size list is one element shorter in case there is no known
434
/// size available for the outermost array dimension.
435
///
436
/// @param GEP The GetElementPtr instruction to analyze.
437
///
438
/// @return A tuple with the subscript expressions and the dimension sizes.
439
std::tuple<std::vector<const llvm::SCEV *>, std::vector<int>>
440
getIndexExpressionsFromGEP(llvm::GetElementPtrInst *GEP,
441
                           llvm::ScalarEvolution &SE);
442
443
// If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
444
// for Polly. If the loop is affine, return the loop itself.
445
//
446
// @param L             Pointer to the Loop object to analyze.
447
// @param LI            Reference to the LoopInfo.
448
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
449
llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
450
                                    const BoxedLoopsSetTy &BoxedLoops);
451
452
// If the Basic Block belongs to a loop that is nonaffine/boxed, return the
453
// first non-boxed surrounding loop for Polly. If the loop is affine, return
454
// the loop itself.
455
//
456
// @param BB            Pointer to the Basic Block to analyze.
457
// @param LI            Reference to the LoopInfo.
458
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
459
llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI,
460
                                    const BoxedLoopsSetTy &BoxedLoops);
461
} // namespace polly
462
#endif