Coverage Report

Created: 2019-04-21 11:35

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/include/polly/Support/ScopHelper.h
Line
Count
Source (jump to first uncovered line)
1
//===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// Small functions that help with LLVM-IR.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#ifndef POLLY_SUPPORT_IRHELPER_H
14
#define POLLY_SUPPORT_IRHELPER_H
15
16
#include "llvm/ADT/SetVector.h"
17
#include "llvm/IR/Instructions.h"
18
#include "llvm/IR/IntrinsicInst.h"
19
#include "llvm/IR/ValueHandle.h"
20
21
namespace llvm {
22
class LoopInfo;
23
class Loop;
24
class ScalarEvolution;
25
class SCEV;
26
class Region;
27
class Pass;
28
class DominatorTree;
29
class RegionInfo;
30
} // namespace llvm
31
32
namespace polly {
33
class Scop;
34
class ScopStmt;
35
36
/// Type to remap values.
37
using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>,
38
                                 llvm::AssertingVH<llvm::Value>>;
39
40
/// Type for a set of invariant loads.
41
using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>;
42
43
/// Set type for parameters.
44
using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>;
45
46
/// Set of loops (used to remember loops in non-affine subregions).
47
using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>;
48
49
/// Utility proxy to wrap the common members of LoadInst and StoreInst.
50
///
51
/// This works like the LLVM utility class CallSite, ie. it forwards all calls
52
/// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
53
/// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
54
/// MemTransferInst, etc. in that it offers a common interface, but does not act
55
/// as a fake base class.
56
/// It is similar to StringRef and ArrayRef in that it holds a pointer to the
57
/// referenced object and should be passed by-value as it is small enough.
58
///
59
/// This proxy can either represent a LoadInst instance, a StoreInst instance,
60
/// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
61
/// nullptr (only creatable using the default constructor); never an Instruction
62
/// that is neither of the above mentioned. When representing a nullptr, only
63
/// the following methods are defined:
64
/// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
65
/// operator bool(), operator!()
66
///
67
/// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
68
/// those from llvm/Support/Casting.h. Partial template function specialization
69
/// is currently not supported in C++ such that those cannot be used directly.
70
/// (llvm::isa could, but then llvm:cast etc. would not have the expected
71
/// behavior)
72
class MemAccInst {
73
private:
74
  llvm::Instruction *I;
75
76
public:
77
1.76k
  MemAccInst() : I(nullptr) {}
78
14.1k
  MemAccInst(const MemAccInst &Inst) : I(Inst.I) {}
79
0
  /* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {}
80
220
  /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
81
0
  /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
82
118
  /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
83
0
  /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
84
0
  /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
85
6.74k
  explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
86
1.58k
  explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
87
88
8.50k
  static bool isa(const llvm::Value &V) {
89
8.50k
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)5.15k
||
90
8.50k
           
llvm::isa<llvm::CallInst>(V)1.77k
||
llvm::isa<llvm::MemIntrinsic>(V)1.76k
;
91
8.50k
  }
92
505
  static bool isa(const llvm::Value *V) {
93
505
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)257
||
94
505
           
llvm::isa<llvm::CallInst>(V)0
||
llvm::isa<llvm::MemIntrinsic>(V)0
;
95
505
  }
96
0
  static MemAccInst cast(llvm::Value &V) {
97
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
98
0
  }
99
0
  static MemAccInst cast(llvm::Value *V) {
100
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
101
0
  }
102
0
  static MemAccInst cast_or_null(llvm::Value &V) {
103
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
104
0
  }
105
0
  static MemAccInst cast_or_null(llvm::Value *V) {
106
0
    if (!V)
107
0
      return MemAccInst();
108
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
109
0
  }
110
8.50k
  static MemAccInst dyn_cast(llvm::Value &V) {
111
8.50k
    if (isa(V))
112
6.74k
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
113
1.76k
    return MemAccInst();
114
1.76k
  }
115
505
  static MemAccInst dyn_cast(llvm::Value *V) {
116
505
    assert(V);
117
505
    if (isa(V))
118
505
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
119
0
    return MemAccInst();
120
0
  }
121
122
0
  MemAccInst &operator=(const MemAccInst &Inst) {
123
0
    I = Inst.I;
124
0
    return *this;
125
0
  }
126
0
  MemAccInst &operator=(llvm::LoadInst &LI) {
127
0
    I = &LI;
128
0
    return *this;
129
0
  }
130
0
  MemAccInst &operator=(llvm::LoadInst *LI) {
131
0
    I = LI;
132
0
    return *this;
133
0
  }
134
0
  MemAccInst &operator=(llvm::StoreInst &SI) {
135
0
    I = &SI;
136
0
    return *this;
137
0
  }
138
0
  MemAccInst &operator=(llvm::StoreInst *SI) {
139
0
    I = SI;
140
0
    return *this;
141
0
  }
142
0
  MemAccInst &operator=(llvm::MemIntrinsic &MI) {
143
0
    I = &MI;
144
0
    return *this;
145
0
  }
146
0
  MemAccInst &operator=(llvm::MemIntrinsic *MI) {
147
0
    I = MI;
148
0
    return *this;
149
0
  }
150
0
  MemAccInst &operator=(llvm::CallInst &CI) {
151
0
    I = &CI;
152
0
    return *this;
153
0
  }
154
0
  MemAccInst &operator=(llvm::CallInst *CI) {
155
0
    I = CI;
156
0
    return *this;
157
0
  }
158
159
9.13k
  llvm::Instruction *get() const {
160
9.13k
    assert(I && "Unexpected nullptr!");
161
9.13k
    return I;
162
9.13k
  }
163
9.31k
  operator llvm::Instruction *() const { return asInstruction(); }
164
9.13k
  llvm::Instruction *operator->() const { return get(); }
165
166
11.0k
  explicit operator bool() const { return isInstruction(); }
167
503
  bool operator!() const { return isNull(); }
168
169
3.56k
  llvm::Value *getValueOperand() const {
170
3.56k
    if (isLoad())
171
1.72k
      return asLoad();
172
1.84k
    if (isStore())
173
1.83k
      return asStore()->getValueOperand();
174
8
    if (isMemIntrinsic())
175
8
      return nullptr;
176
0
    if (isCallInst())
177
0
      return nullptr;
178
0
    llvm_unreachable("Operation not supported on nullptr");
179
0
  }
180
10.9k
  llvm::Value *getPointerOperand() const {
181
10.9k
    if (isLoad())
182
5.40k
      return asLoad()->getPointerOperand();
183
5.53k
    if (isStore())
184
5.50k
      return asStore()->getPointerOperand();
185
29
    if (isMemIntrinsic())
186
5
      return asMemIntrinsic()->getRawDest();
187
24
    if (isCallInst())
188
24
      return nullptr;
189
0
    llvm_unreachable("Operation not supported on nullptr");
190
0
  }
191
192
0
  unsigned getAlignment() const {
193
0
    if (isLoad())
194
0
      return asLoad()->getAlignment();
195
0
    if (isStore())
196
0
      return asStore()->getAlignment();
197
0
    if (isMemTransferInst())
198
0
      return std::min(asMemTransferInst()->getDestAlignment(),
199
0
                      asMemTransferInst()->getSourceAlignment());
200
0
    if (isMemIntrinsic())
201
0
      return asMemIntrinsic()->getDestAlignment();
202
0
    if (isCallInst())
203
0
      return 0;
204
0
    llvm_unreachable("Operation not supported on nullptr");
205
0
  }
206
0
  bool isVolatile() const {
207
0
    if (isLoad())
208
0
      return asLoad()->isVolatile();
209
0
    if (isStore())
210
0
      return asStore()->isVolatile();
211
0
    if (isMemIntrinsic())
212
0
      return asMemIntrinsic()->isVolatile();
213
0
    if (isCallInst())
214
0
      return false;
215
0
    llvm_unreachable("Operation not supported on nullptr");
216
0
  }
217
5.57k
  bool isSimple() const {
218
5.57k
    if (isLoad())
219
2.84k
      return asLoad()->isSimple();
220
2.72k
    if (isStore())
221
2.72k
      return asStore()->isSimple();
222
0
    if (isMemIntrinsic())
223
0
      return !asMemIntrinsic()->isVolatile();
224
0
    if (isCallInst())
225
0
      return true;
226
0
    llvm_unreachable("Operation not supported on nullptr");
227
0
  }
228
0
  llvm::AtomicOrdering getOrdering() const {
229
0
    if (isLoad())
230
0
      return asLoad()->getOrdering();
231
0
    if (isStore())
232
0
      return asStore()->getOrdering();
233
0
    if (isMemIntrinsic())
234
0
      return llvm::AtomicOrdering::NotAtomic;
235
0
    if (isCallInst())
236
0
      return llvm::AtomicOrdering::NotAtomic;
237
0
    llvm_unreachable("Operation not supported on nullptr");
238
0
  }
239
0
  bool isUnordered() const {
240
0
    if (isLoad())
241
0
      return asLoad()->isUnordered();
242
0
    if (isStore())
243
0
      return asStore()->isUnordered();
244
0
    // Copied from the Load/Store implementation of isUnordered:
245
0
    if (isMemIntrinsic())
246
0
      return !asMemIntrinsic()->isVolatile();
247
0
    if (isCallInst())
248
0
      return true;
249
0
    llvm_unreachable("Operation not supported on nullptr");
250
0
  }
251
252
730
  bool isNull() const { return !I; }
253
11.0k
  bool isInstruction() const { return I; }
254
255
27.1k
  llvm::Instruction *asInstruction() const { return I; }
256
257
private:
258
20.0k
  bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
259
10.0k
  bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
260
24
  bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
261
37
  bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
262
0
  bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
263
0
  bool isMemTransferInst() const {
264
0
    return I && llvm::isa<llvm::MemTransferInst>(I);
265
0
  }
266
267
9.97k
  llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
268
10.0k
  llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
269
0
  llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
270
5
  llvm::MemIntrinsic *asMemIntrinsic() const {
271
5
    return llvm::cast<llvm::MemIntrinsic>(I);
272
5
  }
273
0
  llvm::MemSetInst *asMemSetInst() const {
274
0
    return llvm::cast<llvm::MemSetInst>(I);
275
0
  }
276
0
  llvm::MemTransferInst *asMemTransferInst() const {
277
0
    return llvm::cast<llvm::MemTransferInst>(I);
278
0
  }
279
};
280
} // namespace polly
281
282
namespace llvm {
283
/// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
284
///        from a MemAccInst object.
285
template <> struct simplify_type<polly::MemAccInst> {
286
  typedef Instruction *SimpleType;
287
17.7k
  static SimpleType getSimplifiedValue(polly::MemAccInst &I) {
288
17.7k
    return I.asInstruction();
289
17.7k
  }
290
};
291
} // namespace llvm
292
293
namespace polly {
294
295
/// Simplify the region to have a single unconditional entry edge and a
296
/// single exit edge.
297
///
298
/// Although this function allows DT and RI to be null, regions only work
299
/// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
300
/// up-to-date.
301
///
302
/// @param R  The region to be simplified
303
/// @param DT DominatorTree to be updated.
304
/// @param LI LoopInfo to be updated.
305
/// @param RI RegionInfo to be updated.
306
void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT,
307
                    llvm::LoopInfo *LI, llvm::RegionInfo *RI);
308
309
/// Split the entry block of a function to store the newly inserted
310
///        allocations outside of all Scops.
311
///
312
/// @param EntryBlock The entry block of the current function.
313
/// @param P          The pass that currently running.
314
///
315
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P);
316
317
/// Split the entry block of a function to store the newly inserted
318
///        allocations outside of all Scops.
319
///
320
/// @param DT DominatorTree to be updated.
321
/// @param LI LoopInfo to be updated.
322
/// @param RI RegionInfo to be updated.
323
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock,
324
                              llvm::DominatorTree *DT, llvm::LoopInfo *LI,
325
                              llvm::RegionInfo *RI);
326
327
/// Wrapper for SCEVExpander extended to all Polly features.
328
///
329
/// This wrapper will internally call the SCEVExpander but also makes sure that
330
/// all additional features not represented in SCEV (e.g., SDiv/SRem are not
331
/// black boxes but can be part of the function) will be expanded correctly.
332
///
333
/// The parameters are the same as for the creation of a SCEVExpander as well
334
/// as the call to SCEVExpander::expandCodeFor:
335
///
336
/// @param S     The current Scop.
337
/// @param SE    The Scalar Evolution pass.
338
/// @param DL    The module data layout.
339
/// @param Name  The suffix added to the new instruction names.
340
/// @param E     The expression for which code is actually generated.
341
/// @param Ty    The type of the resulting code.
342
/// @param IP    The insertion point for the new code.
343
/// @param VMap  A remapping of values used in @p E.
344
/// @param RTCBB The last block of the RTC. Used to insert loop-invariant
345
///              instructions in rare cases.
346
llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE,
347
                           const llvm::DataLayout &DL, const char *Name,
348
                           const llvm::SCEV *E, llvm::Type *Ty,
349
                           llvm::Instruction *IP, ValueMapT *VMap,
350
                           llvm::BasicBlock *RTCBB);
351
352
/// Check if the block is a error block.
353
///
354
/// A error block is currently any block that fulfills at least one of
355
/// the following conditions:
356
///
357
///  - It is terminated by an unreachable instruction
358
///  - It contains a call to a non-pure function that is not immediately
359
///    dominated by a loop header and that does not dominate the region exit.
360
///    This is a heuristic to pick only error blocks that are conditionally
361
///    executed and can be assumed to be not executed at all without the domains
362
///    being available.
363
///
364
/// @param BB The block to check.
365
/// @param R  The analyzed region.
366
/// @param LI The loop info analysis.
367
/// @param DT The dominator tree of the function.
368
///
369
/// @return True if the block is a error block, false otherwise.
370
bool isErrorBlock(llvm::BasicBlock &BB, const llvm::Region &R,
371
                  llvm::LoopInfo &LI, const llvm::DominatorTree &DT);
372
373
/// Return the condition for the terminator @p TI.
374
///
375
/// For unconditional branches the "i1 true" condition will be returned.
376
///
377
/// @param TI The terminator to get the condition from.
378
///
379
/// @return The condition of @p TI and nullptr if none could be extracted.
380
llvm::Value *getConditionFromTerminator(llvm::Instruction *TI);
381
382
/// Check if @p LInst can be hoisted in @p R.
383
///
384
/// @param LInst The load to check.
385
/// @param R     The analyzed region.
386
/// @param LI    The loop info.
387
/// @param SE    The scalar evolution analysis.
388
/// @param DT    The dominator tree of the function.
389
/// @param KnownInvariantLoads The invariant load set.
390
///
391
/// @return True if @p LInst can be hoisted in @p R.
392
bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI,
393
                     llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT,
394
                     const InvariantLoadsSetTy &KnownInvariantLoads);
395
396
/// Return true iff @p V is an intrinsic that we ignore during code
397
///        generation.
398
bool isIgnoredIntrinsic(const llvm::Value *V);
399
400
/// Check whether a value an be synthesized by the code generator.
401
///
402
/// Some value will be recalculated only from information that is code generated
403
/// from the polyhedral representation. For such instructions we do not need to
404
/// ensure that their operands are available during code generation.
405
///
406
/// @param V The value to check.
407
/// @param S The current SCoP.
408
/// @param SE The scalar evolution database.
409
/// @param Scope Location where the value would by synthesized.
410
/// @return If the instruction I can be regenerated from its
411
///         scalar evolution representation, return true,
412
///         otherwise return false.
413
bool canSynthesize(const llvm::Value *V, const Scop &S,
414
                   llvm::ScalarEvolution *SE, llvm::Loop *Scope);
415
416
/// Return the block in which a value is used.
417
///
418
/// For normal instructions, this is the instruction's parent block. For PHI
419
/// nodes, this is the incoming block of that use, because this is where the
420
/// operand must be defined (i.e. its definition dominates this block).
421
/// Non-instructions do not use operands at a specific point such that in this
422
/// case this function returns nullptr.
423
llvm::BasicBlock *getUseBlock(const llvm::Use &U);
424
425
/// Derive the individual index expressions from a GEP instruction.
426
///
427
/// This function optimistically assumes the GEP references into a fixed size
428
/// array. If this is actually true, this function returns a list of array
429
/// subscript expressions as SCEV as well as a list of integers describing
430
/// the size of the individual array dimensions. Both lists have either equal
431
/// length or the size list is one element shorter in case there is no known
432
/// size available for the outermost array dimension.
433
///
434
/// @param GEP The GetElementPtr instruction to analyze.
435
///
436
/// @return A tuple with the subscript expressions and the dimension sizes.
437
std::tuple<std::vector<const llvm::SCEV *>, std::vector<int>>
438
getIndexExpressionsFromGEP(llvm::GetElementPtrInst *GEP,
439
                           llvm::ScalarEvolution &SE);
440
441
// If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
442
// for Polly. If the loop is affine, return the loop itself.
443
//
444
// @param L             Pointer to the Loop object to analyze.
445
// @param LI            Reference to the LoopInfo.
446
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
447
llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
448
                                    const BoxedLoopsSetTy &BoxedLoops);
449
450
// If the Basic Block belongs to a loop that is nonaffine/boxed, return the
451
// first non-boxed surrounding loop for Polly. If the loop is affine, return
452
// the loop itself.
453
//
454
// @param BB            Pointer to the Basic Block to analyze.
455
// @param LI            Reference to the LoopInfo.
456
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
457
llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI,
458
                                    const BoxedLoopsSetTy &BoxedLoops);
459
460
/// Is the given instruction a call to a debug function?
461
///
462
/// A debug function can be used to insert output in Polly-optimized code which
463
/// normally does not allow function calls with side-effects. For instance, a
464
/// printf can be inserted to check whether a value still has the expected value
465
/// after Polly generated code:
466
///
467
///     int sum = 0;
468
///     for (int i = 0; i < 16; i+=1) {
469
///       sum += i;
470
///       printf("The value of sum at i=%d is %d\n", sum, i);
471
///     }
472
bool isDebugCall(llvm::Instruction *Inst);
473
474
/// Does the statement contain a call to a debug function?
475
///
476
/// Such a statement must not be removed, even if has no side-effects.
477
bool hasDebugCall(ScopStmt *Stmt);
478
} // namespace polly
479
#endif