Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/include/polly/Support/ScopHelper.h
Line
Count
Source (jump to first uncovered line)
1
//===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// Small functions that help with LLVM-IR.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#ifndef POLLY_SUPPORT_IRHELPER_H
14
#define POLLY_SUPPORT_IRHELPER_H
15
16
#include "llvm/ADT/SetVector.h"
17
#include "llvm/IR/Instructions.h"
18
#include "llvm/IR/IntrinsicInst.h"
19
#include "llvm/IR/ValueHandle.h"
20
21
namespace llvm {
22
class LoopInfo;
23
class Loop;
24
class ScalarEvolution;
25
class SCEV;
26
class Region;
27
class Pass;
28
class DominatorTree;
29
class RegionInfo;
30
class RegionNode;
31
} // namespace llvm
32
33
namespace polly {
34
class Scop;
35
class ScopStmt;
36
37
/// Type to remap values.
38
using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>,
39
                                 llvm::AssertingVH<llvm::Value>>;
40
41
/// Type for a set of invariant loads.
42
using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>;
43
44
/// Set type for parameters.
45
using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>;
46
47
/// Set of loops (used to remember loops in non-affine subregions).
48
using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>;
49
50
/// Utility proxy to wrap the common members of LoadInst and StoreInst.
51
///
52
/// This works like the LLVM utility class CallSite, ie. it forwards all calls
53
/// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
54
/// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
55
/// MemTransferInst, etc. in that it offers a common interface, but does not act
56
/// as a fake base class.
57
/// It is similar to StringRef and ArrayRef in that it holds a pointer to the
58
/// referenced object and should be passed by-value as it is small enough.
59
///
60
/// This proxy can either represent a LoadInst instance, a StoreInst instance,
61
/// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
62
/// nullptr (only creatable using the default constructor); never an Instruction
63
/// that is neither of the above mentioned. When representing a nullptr, only
64
/// the following methods are defined:
65
/// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
66
/// operator bool(), operator!()
67
///
68
/// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
69
/// those from llvm/Support/Casting.h. Partial template function specialization
70
/// is currently not supported in C++ such that those cannot be used directly.
71
/// (llvm::isa could, but then llvm:cast etc. would not have the expected
72
/// behavior)
73
class MemAccInst {
74
private:
75
  llvm::Instruction *I;
76
77
public:
78
4.78k
  MemAccInst() : I(nullptr) {}
79
37.1k
  MemAccInst(const MemAccInst &Inst) : I(Inst.I) {}
80
0
  /* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {}
81
673
  /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
82
0
  /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
83
433
  /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
84
0
  /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
85
0
  /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
86
15.8k
  explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
87
5.09k
  explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
88
89
20.6k
  static bool isa(const llvm::Value &V) {
90
20.6k
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)12.6k
||
91
20.6k
           
llvm::isa<llvm::CallInst>(V)4.83k
||
llvm::isa<llvm::MemIntrinsic>(V)4.78k
;
92
20.6k
  }
93
1.76k
  static bool isa(const llvm::Value *V) {
94
1.76k
    return llvm::isa<llvm::LoadInst>(V) || 
llvm::isa<llvm::StoreInst>(V)884
||
95
1.76k
           
llvm::isa<llvm::CallInst>(V)3
||
llvm::isa<llvm::MemIntrinsic>(V)1
;
96
1.76k
  }
97
0
  static MemAccInst cast(llvm::Value &V) {
98
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
99
0
  }
100
0
  static MemAccInst cast(llvm::Value *V) {
101
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
102
0
  }
103
0
  static MemAccInst cast_or_null(llvm::Value &V) {
104
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
105
0
  }
106
0
  static MemAccInst cast_or_null(llvm::Value *V) {
107
0
    if (!V)
108
0
      return MemAccInst();
109
0
    return MemAccInst(llvm::cast<llvm::Instruction>(V));
110
0
  }
111
20.6k
  static MemAccInst dyn_cast(llvm::Value &V) {
112
20.6k
    if (isa(V))
113
15.8k
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
114
4.78k
    return MemAccInst();
115
4.78k
  }
116
1.76k
  static MemAccInst dyn_cast(llvm::Value *V) {
117
1.76k
    assert(V);
118
1.76k
    if (isa(V))
119
1.75k
      return MemAccInst(llvm::cast<llvm::Instruction>(V));
120
1
    return MemAccInst();
121
1
  }
122
123
0
  MemAccInst &operator=(const MemAccInst &Inst) {
124
0
    I = Inst.I;
125
0
    return *this;
126
0
  }
127
0
  MemAccInst &operator=(llvm::LoadInst &LI) {
128
0
    I = &LI;
129
0
    return *this;
130
0
  }
131
0
  MemAccInst &operator=(llvm::LoadInst *LI) {
132
0
    I = LI;
133
0
    return *this;
134
0
  }
135
0
  MemAccInst &operator=(llvm::StoreInst &SI) {
136
0
    I = &SI;
137
0
    return *this;
138
0
  }
139
0
  MemAccInst &operator=(llvm::StoreInst *SI) {
140
0
    I = SI;
141
0
    return *this;
142
0
  }
143
0
  MemAccInst &operator=(llvm::MemIntrinsic &MI) {
144
0
    I = &MI;
145
0
    return *this;
146
0
  }
147
0
  MemAccInst &operator=(llvm::MemIntrinsic *MI) {
148
0
    I = MI;
149
0
    return *this;
150
0
  }
151
0
  MemAccInst &operator=(llvm::CallInst &CI) {
152
0
    I = &CI;
153
0
    return *this;
154
0
  }
155
0
  MemAccInst &operator=(llvm::CallInst *CI) {
156
0
    I = CI;
157
0
    return *this;
158
0
  }
159
160
22.7k
  llvm::Instruction *get() const {
161
22.7k
    assert(I && "Unexpected nullptr!");
162
22.7k
    return I;
163
22.7k
  }
164
23.6k
  operator llvm::Instruction *() const { return asInstruction(); }
165
22.7k
  llvm::Instruction *operator->() const { return get(); }
166
167
28.0k
  explicit operator bool() const { return isInstruction(); }
168
1.74k
  bool operator!() const { return isNull(); }
169
170
10.2k
  llvm::Value *getValueOperand() const {
171
10.2k
    if (isLoad())
172
5.06k
      return asLoad();
173
5.18k
    if (isStore())
174
5.16k
      return asStore()->getValueOperand();
175
24
    if (isMemIntrinsic())
176
24
      return nullptr;
177
0
    if (isCallInst())
178
0
      return nullptr;
179
0
    llvm_unreachable("Operation not supported on nullptr");
180
0
  }
181
28.6k
  llvm::Value *getPointerOperand() const {
182
28.6k
    if (isLoad())
183
14.4k
      return asLoad()->getPointerOperand();
184
14.1k
    if (isStore())
185
14.1k
      return asStore()->getPointerOperand();
186
63
    if (isMemIntrinsic())
187
18
      return asMemIntrinsic()->getRawDest();
188
45
    if (isCallInst())
189
45
      return nullptr;
190
0
    llvm_unreachable("Operation not supported on nullptr");
191
0
  }
192
193
5
  unsigned getAlignment() const {
194
5
    if (isLoad())
195
4
      return asLoad()->getAlignment();
196
1
    if (isStore())
197
1
      return asStore()->getAlignment();
198
0
    if (isMemTransferInst())
199
0
      return std::min(asMemTransferInst()->getDestAlignment(),
200
0
                      asMemTransferInst()->getSourceAlignment());
201
0
    if (isMemIntrinsic())
202
0
      return asMemIntrinsic()->getDestAlignment();
203
0
    if (isCallInst())
204
0
      return 0;
205
0
    llvm_unreachable("Operation not supported on nullptr");
206
0
  }
207
0
  bool isVolatile() const {
208
0
    if (isLoad())
209
0
      return asLoad()->isVolatile();
210
0
    if (isStore())
211
0
      return asStore()->isVolatile();
212
0
    if (isMemIntrinsic())
213
0
      return asMemIntrinsic()->isVolatile();
214
0
    if (isCallInst())
215
0
      return false;
216
0
    llvm_unreachable("Operation not supported on nullptr");
217
0
  }
218
12.4k
  bool isSimple() const {
219
12.4k
    if (isLoad())
220
6.51k
      return asLoad()->isSimple();
221
5.95k
    if (isStore())
222
5.95k
      return asStore()->isSimple();
223
0
    if (isMemIntrinsic())
224
0
      return !asMemIntrinsic()->isVolatile();
225
0
    if (isCallInst())
226
0
      return true;
227
0
    llvm_unreachable("Operation not supported on nullptr");
228
0
  }
229
0
  llvm::AtomicOrdering getOrdering() const {
230
0
    if (isLoad())
231
0
      return asLoad()->getOrdering();
232
0
    if (isStore())
233
0
      return asStore()->getOrdering();
234
0
    if (isMemIntrinsic())
235
0
      return llvm::AtomicOrdering::NotAtomic;
236
0
    if (isCallInst())
237
0
      return llvm::AtomicOrdering::NotAtomic;
238
0
    llvm_unreachable("Operation not supported on nullptr");
239
0
  }
240
0
  bool isUnordered() const {
241
0
    if (isLoad())
242
0
      return asLoad()->isUnordered();
243
0
    if (isStore())
244
0
      return asStore()->isUnordered();
245
0
    // Copied from the Load/Store implementation of isUnordered:
246
0
    if (isMemIntrinsic())
247
0
      return !asMemIntrinsic()->isVolatile();
248
0
    if (isCallInst())
249
0
      return true;
250
0
    llvm_unreachable("Operation not supported on nullptr");
251
0
  }
252
253
2.56k
  bool isNull() const { return !I; }
254
28.0k
  bool isInstruction() const { return I; }
255
256
67.8k
  llvm::Instruction *asInstruction() const { return I; }
257
258
private:
259
51.3k
  bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
260
25.3k
  bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
261
45
  bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
262
87
  bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
263
0
  bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
264
0
  bool isMemTransferInst() const {
265
0
    return I && llvm::isa<llvm::MemTransferInst>(I);
266
0
  }
267
268
25.9k
  llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
269
25.2k
  llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
270
0
  llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
271
18
  llvm::MemIntrinsic *asMemIntrinsic() const {
272
18
    return llvm::cast<llvm::MemIntrinsic>(I);
273
18
  }
274
0
  llvm::MemSetInst *asMemSetInst() const {
275
0
    return llvm::cast<llvm::MemSetInst>(I);
276
0
  }
277
0
  llvm::MemTransferInst *asMemTransferInst() const {
278
0
    return llvm::cast<llvm::MemTransferInst>(I);
279
0
  }
280
};
281
} // namespace polly
282
283
namespace llvm {
284
/// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
285
///        from a MemAccInst object.
286
template <> struct simplify_type<polly::MemAccInst> {
287
  typedef Instruction *SimpleType;
288
44.2k
  static SimpleType getSimplifiedValue(polly::MemAccInst &I) {
289
44.2k
    return I.asInstruction();
290
44.2k
  }
291
};
292
} // namespace llvm
293
294
namespace polly {
295
296
/// Simplify the region to have a single unconditional entry edge and a
297
/// single exit edge.
298
///
299
/// Although this function allows DT and RI to be null, regions only work
300
/// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
301
/// up-to-date.
302
///
303
/// @param R  The region to be simplified
304
/// @param DT DominatorTree to be updated.
305
/// @param LI LoopInfo to be updated.
306
/// @param RI RegionInfo to be updated.
307
void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT,
308
                    llvm::LoopInfo *LI, llvm::RegionInfo *RI);
309
310
/// Split the entry block of a function to store the newly inserted
311
///        allocations outside of all Scops.
312
///
313
/// @param EntryBlock The entry block of the current function.
314
/// @param P          The pass that currently running.
315
///
316
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P);
317
318
/// Split the entry block of a function to store the newly inserted
319
///        allocations outside of all Scops.
320
///
321
/// @param DT DominatorTree to be updated.
322
/// @param LI LoopInfo to be updated.
323
/// @param RI RegionInfo to be updated.
324
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock,
325
                              llvm::DominatorTree *DT, llvm::LoopInfo *LI,
326
                              llvm::RegionInfo *RI);
327
328
/// Wrapper for SCEVExpander extended to all Polly features.
329
///
330
/// This wrapper will internally call the SCEVExpander but also makes sure that
331
/// all additional features not represented in SCEV (e.g., SDiv/SRem are not
332
/// black boxes but can be part of the function) will be expanded correctly.
333
///
334
/// The parameters are the same as for the creation of a SCEVExpander as well
335
/// as the call to SCEVExpander::expandCodeFor:
336
///
337
/// @param S     The current Scop.
338
/// @param SE    The Scalar Evolution pass.
339
/// @param DL    The module data layout.
340
/// @param Name  The suffix added to the new instruction names.
341
/// @param E     The expression for which code is actually generated.
342
/// @param Ty    The type of the resulting code.
343
/// @param IP    The insertion point for the new code.
344
/// @param VMap  A remapping of values used in @p E.
345
/// @param RTCBB The last block of the RTC. Used to insert loop-invariant
346
///              instructions in rare cases.
347
llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE,
348
                           const llvm::DataLayout &DL, const char *Name,
349
                           const llvm::SCEV *E, llvm::Type *Ty,
350
                           llvm::Instruction *IP, ValueMapT *VMap,
351
                           llvm::BasicBlock *RTCBB);
352
353
/// Check if the block is a error block.
354
///
355
/// A error block is currently any block that fulfills at least one of
356
/// the following conditions:
357
///
358
///  - It is terminated by an unreachable instruction
359
///  - It contains a call to a non-pure function that is not immediately
360
///    dominated by a loop header and that does not dominate the region exit.
361
///    This is a heuristic to pick only error blocks that are conditionally
362
///    executed and can be assumed to be not executed at all without the domains
363
///    being available.
364
///
365
/// @param BB The block to check.
366
/// @param R  The analyzed region.
367
/// @param LI The loop info analysis.
368
/// @param DT The dominator tree of the function.
369
///
370
/// @return True if the block is a error block, false otherwise.
371
bool isErrorBlock(llvm::BasicBlock &BB, const llvm::Region &R,
372
                  llvm::LoopInfo &LI, const llvm::DominatorTree &DT);
373
374
/// Return the condition for the terminator @p TI.
375
///
376
/// For unconditional branches the "i1 true" condition will be returned.
377
///
378
/// @param TI The terminator to get the condition from.
379
///
380
/// @return The condition of @p TI and nullptr if none could be extracted.
381
llvm::Value *getConditionFromTerminator(llvm::Instruction *TI);
382
383
/// Get the smallest loop that contains @p S but is not in @p S.
384
llvm::Loop *getLoopSurroundingScop(Scop &S, llvm::LoopInfo &LI);
385
386
/// Get the number of blocks in @p L.
387
///
388
/// The number of blocks in a loop are the number of basic blocks actually
389
/// belonging to the loop, as well as all single basic blocks that the loop
390
/// exits to and which terminate in an unreachable instruction. We do not
391
/// allow such basic blocks in the exit of a scop, hence they belong to the
392
/// scop and represent run-time conditions which we want to model and
393
/// subsequently speculate away.
394
///
395
/// @see getRegionNodeLoop for additional details.
396
unsigned getNumBlocksInLoop(llvm::Loop *L);
397
398
/// Get the number of blocks in @p RN.
399
unsigned getNumBlocksInRegionNode(llvm::RegionNode *RN);
400
401
/// Return the smallest loop surrounding @p RN.
402
llvm::Loop *getRegionNodeLoop(llvm::RegionNode *RN, llvm::LoopInfo &LI);
403
404
/// Check if @p LInst can be hoisted in @p R.
405
///
406
/// @param LInst The load to check.
407
/// @param R     The analyzed region.
408
/// @param LI    The loop info.
409
/// @param SE    The scalar evolution analysis.
410
/// @param DT    The dominator tree of the function.
411
/// @param KnownInvariantLoads The invariant load set.
412
///
413
/// @return True if @p LInst can be hoisted in @p R.
414
bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI,
415
                     llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT,
416
                     const InvariantLoadsSetTy &KnownInvariantLoads);
417
418
/// Return true iff @p V is an intrinsic that we ignore during code
419
///        generation.
420
bool isIgnoredIntrinsic(const llvm::Value *V);
421
422
/// Check whether a value an be synthesized by the code generator.
423
///
424
/// Some value will be recalculated only from information that is code generated
425
/// from the polyhedral representation. For such instructions we do not need to
426
/// ensure that their operands are available during code generation.
427
///
428
/// @param V The value to check.
429
/// @param S The current SCoP.
430
/// @param SE The scalar evolution database.
431
/// @param Scope Location where the value would by synthesized.
432
/// @return If the instruction I can be regenerated from its
433
///         scalar evolution representation, return true,
434
///         otherwise return false.
435
bool canSynthesize(const llvm::Value *V, const Scop &S,
436
                   llvm::ScalarEvolution *SE, llvm::Loop *Scope);
437
438
/// Return the block in which a value is used.
439
///
440
/// For normal instructions, this is the instruction's parent block. For PHI
441
/// nodes, this is the incoming block of that use, because this is where the
442
/// operand must be defined (i.e. its definition dominates this block).
443
/// Non-instructions do not use operands at a specific point such that in this
444
/// case this function returns nullptr.
445
llvm::BasicBlock *getUseBlock(const llvm::Use &U);
446
447
/// Derive the individual index expressions from a GEP instruction.
448
///
449
/// This function optimistically assumes the GEP references into a fixed size
450
/// array. If this is actually true, this function returns a list of array
451
/// subscript expressions as SCEV as well as a list of integers describing
452
/// the size of the individual array dimensions. Both lists have either equal
453
/// length or the size list is one element shorter in case there is no known
454
/// size available for the outermost array dimension.
455
///
456
/// @param GEP The GetElementPtr instruction to analyze.
457
///
458
/// @return A tuple with the subscript expressions and the dimension sizes.
459
std::tuple<std::vector<const llvm::SCEV *>, std::vector<int>>
460
getIndexExpressionsFromGEP(llvm::GetElementPtrInst *GEP,
461
                           llvm::ScalarEvolution &SE);
462
463
// If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
464
// for Polly. If the loop is affine, return the loop itself.
465
//
466
// @param L             Pointer to the Loop object to analyze.
467
// @param LI            Reference to the LoopInfo.
468
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
469
llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
470
                                    const BoxedLoopsSetTy &BoxedLoops);
471
472
// If the Basic Block belongs to a loop that is nonaffine/boxed, return the
473
// first non-boxed surrounding loop for Polly. If the loop is affine, return
474
// the loop itself.
475
//
476
// @param BB            Pointer to the Basic Block to analyze.
477
// @param LI            Reference to the LoopInfo.
478
// @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
479
llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI,
480
                                    const BoxedLoopsSetTy &BoxedLoops);
481
482
/// Is the given instruction a call to a debug function?
483
///
484
/// A debug function can be used to insert output in Polly-optimized code which
485
/// normally does not allow function calls with side-effects. For instance, a
486
/// printf can be inserted to check whether a value still has the expected value
487
/// after Polly generated code:
488
///
489
///     int sum = 0;
490
///     for (int i = 0; i < 16; i+=1) {
491
///       sum += i;
492
///       printf("The value of sum at i=%d is %d\n", sum, i);
493
///     }
494
bool isDebugCall(llvm::Instruction *Inst);
495
496
/// Does the statement contain a call to a debug function?
497
///
498
/// Such a statement must not be removed, even if has no side-effects.
499
bool hasDebugCall(ScopStmt *Stmt);
500
} // namespace polly
501
#endif