Coverage Report

Created: 2018-12-09 11:54

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/include/polly/ScopBuilder.h
Line
Count
Source
1
//===- polly/ScopBuilder.h --------------------------------------*- C++ -*-===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Create a polyhedral description for a static control flow region.
11
//
12
// The pass creates a polyhedral description of the Scops detected by the SCoP
13
// detection derived from their LLVM-IR code.
14
//
15
//===----------------------------------------------------------------------===//
16
17
#ifndef POLLY_SCOPBUILDER_H
18
#define POLLY_SCOPBUILDER_H
19
20
#include "polly/ScopInfo.h"
21
#include "polly/Support/ScopHelper.h"
22
#include "llvm/ADT/ArrayRef.h"
23
#include "llvm/ADT/SetVector.h"
24
#include "llvm/ADT/SmallVector.h"
25
#include <algorithm>
26
#include <memory>
27
#include <utility>
28
29
namespace llvm {
30
31
class AssumptionCache;
32
class BasicBlock;
33
class DataLayout;
34
class DominatorTree;
35
class Instruction;
36
class LoopInfo;
37
class PassRegistry;
38
class PHINode;
39
class Region;
40
class ScalarEvolution;
41
class SCEV;
42
class Type;
43
class Value;
44
45
void initializeScopInfoRegionPassPass(PassRegistry &);
46
void initializeScopInfoWrapperPassPass(PassRegistry &);
47
} // end namespace llvm
48
49
namespace polly {
50
51
class ScopDetection;
52
53
/// Command line switch whether to model read-only accesses.
54
extern bool ModelReadOnlyScalars;
55
56
/// Build the Polly IR (Scop and ScopStmt) on a Region.
57
class ScopBuilder {
58
  /// The AliasAnalysis to build AliasSetTracker.
59
  AliasAnalysis &AA;
60
61
  /// Target data for element size computing.
62
  const DataLayout &DL;
63
64
  /// DominatorTree to reason about guaranteed execution.
65
  DominatorTree &DT;
66
67
  /// LoopInfo for information about loops.
68
  LoopInfo &LI;
69
70
  /// Valid Regions for Scop
71
  ScopDetection &SD;
72
73
  /// The ScalarEvolution to help building Scop.
74
  ScalarEvolution &SE;
75
76
  /// Set of instructions that might read any memory location.
77
  SmallVector<std::pair<ScopStmt *, Instruction *>, 16> GlobalReads;
78
79
  /// Set of all accessed array base pointers.
80
  SmallSetVector<Value *, 16> ArrayBasePointers;
81
82
  // The Scop
83
  std::unique_ptr<Scop> scop;
84
85
  // Methods for pattern matching against Fortran code generated by dragonegg.
86
  // @{
87
88
  /// Try to match for the descriptor of a Fortran array whose allocation
89
  /// is not visible. That is, we can see the load/store into the memory, but
90
  /// we don't actually know where the memory is allocated. If ALLOCATE had been
91
  /// called on the Fortran array, then we will see the lowered malloc() call.
92
  /// If not, this is dubbed as an "invisible allocation".
93
  ///
94
  /// "<descriptor>" is the descriptor of the Fortran array.
95
  ///
96
  /// Pattern match for "@descriptor":
97
  ///  1. %mem = load double*, double** bitcast (%"struct.array1_real(kind=8)"*
98
  ///    <descriptor> to double**), align 32
99
  ///
100
  ///  2. [%slot = getelementptr inbounds i8, i8* %mem, i64 <index>]
101
  ///  2 is optional because if you are writing to the 0th index, you don't
102
  ///     need a GEP.
103
  ///
104
  ///  3.1 store/load <memtype> <val>, <memtype>* %slot
105
  ///  3.2 store/load <memtype> <val>, <memtype>* %mem
106
  ///
107
  /// @see polly::MemoryAccess, polly::ScopArrayInfo
108
  ///
109
  /// @note assumes -polly-canonicalize has been run.
110
  ///
111
  /// @param Inst The LoadInst/StoreInst that accesses the memory.
112
  ///
113
  /// @returns Reference to <descriptor> on success, nullptr on failure.
114
  Value *findFADAllocationInvisible(MemAccInst Inst);
115
116
  /// Try to match for the descriptor of a Fortran array whose allocation
117
  /// call is visible. When we have a Fortran array, we try to look for a
118
  /// Fortran array where we can see the lowered ALLOCATE call. ALLOCATE
119
  /// is materialized as a malloc(...) which we pattern match for.
120
  ///
121
  /// Pattern match for "%untypedmem":
122
  ///  1. %untypedmem = i8* @malloc(...)
123
  ///
124
  ///  2. %typedmem = bitcast i8* %untypedmem to <memtype>
125
  ///
126
  ///  3. [%slot = getelementptr inbounds i8, i8* %typedmem, i64 <index>]
127
  ///  3 is optional because if you are writing to the 0th index, you don't
128
  ///     need a GEP.
129
  ///
130
  ///  4.1 store/load <memtype> <val>, <memtype>* %slot, align 8
131
  ///  4.2 store/load <memtype> <val>, <memtype>* %mem, align 8
132
  ///
133
  /// @see polly::MemoryAccess, polly::ScopArrayInfo
134
  ///
135
  /// @note assumes -polly-canonicalize has been run.
136
  ///
137
  /// @param Inst The LoadInst/StoreInst that accesses the memory.
138
  ///
139
  /// @returns Reference to %untypedmem on success, nullptr on failure.
140
  Value *findFADAllocationVisible(MemAccInst Inst);
141
142
  // @}
143
144
  // Build the SCoP for Region @p R.
145
  void buildScop(Region &R, AssumptionCache &AC,
146
                 OptimizationRemarkEmitter &ORE);
147
148
  /// Try to build a multi-dimensional fixed sized MemoryAccess from the
149
  /// Load/Store instruction.
150
  ///
151
  /// @param Inst       The Load/Store instruction that access the memory
152
  /// @param Stmt       The parent statement of the instruction
153
  ///
154
  /// @returns True if the access could be built, False otherwise.
155
  bool buildAccessMultiDimFixed(MemAccInst Inst, ScopStmt *Stmt);
156
157
  /// Try to build a multi-dimensional parametric sized MemoryAccess.
158
  ///        from the Load/Store instruction.
159
  ///
160
  /// @param Inst       The Load/Store instruction that access the memory
161
  /// @param Stmt       The parent statement of the instruction
162
  ///
163
  /// @returns True if the access could be built, False otherwise.
164
  bool buildAccessMultiDimParam(MemAccInst Inst, ScopStmt *Stmt);
165
166
  /// Try to build a MemoryAccess for a memory intrinsic.
167
  ///
168
  /// @param Inst       The instruction that access the memory
169
  /// @param Stmt       The parent statement of the instruction
170
  ///
171
  /// @returns True if the access could be built, False otherwise.
172
  bool buildAccessMemIntrinsic(MemAccInst Inst, ScopStmt *Stmt);
173
174
  /// Try to build a MemoryAccess for a call instruction.
175
  ///
176
  /// @param Inst       The call instruction that access the memory
177
  /// @param Stmt       The parent statement of the instruction
178
  ///
179
  /// @returns True if the access could be built, False otherwise.
180
  bool buildAccessCallInst(MemAccInst Inst, ScopStmt *Stmt);
181
182
  /// Build a single-dimensional parametric sized MemoryAccess
183
  ///        from the Load/Store instruction.
184
  ///
185
  /// @param Inst       The Load/Store instruction that access the memory
186
  /// @param Stmt       The parent statement of the instruction
187
  void buildAccessSingleDim(MemAccInst Inst, ScopStmt *Stmt);
188
189
  /// Build an instance of MemoryAccess from the Load/Store instruction.
190
  ///
191
  /// @param Inst       The Load/Store instruction that access the memory
192
  /// @param Stmt       The parent statement of the instruction
193
  void buildMemoryAccess(MemAccInst Inst, ScopStmt *Stmt);
194
195
  /// Analyze and extract the cross-BB scalar dependences (or, dataflow
196
  /// dependencies) of an instruction.
197
  ///
198
  /// @param UserStmt The statement @p Inst resides in.
199
  /// @param Inst     The instruction to be analyzed.
200
  void buildScalarDependences(ScopStmt *UserStmt, Instruction *Inst);
201
202
  /// Build the escaping dependences for @p Inst.
203
  ///
204
  /// Search for uses of the llvm::Value defined by @p Inst that are not
205
  /// within the SCoP. If there is such use, add a SCALAR WRITE such that
206
  /// it is available after the SCoP as escaping value.
207
  ///
208
  /// @param Inst The instruction to be analyzed.
209
  void buildEscapingDependences(Instruction *Inst);
210
211
  /// Create MemoryAccesses for the given PHI node in the given region.
212
  ///
213
  /// @param PHIStmt            The statement @p PHI resides in.
214
  /// @param PHI                The PHI node to be handled
215
  /// @param NonAffineSubRegion The non affine sub-region @p PHI is in.
216
  /// @param IsExitBlock        Flag to indicate that @p PHI is in the exit BB.
217
  void buildPHIAccesses(ScopStmt *PHIStmt, PHINode *PHI,
218
                        Region *NonAffineSubRegion, bool IsExitBlock = false);
219
220
  /// Build the access functions for the subregion @p SR.
221
  void buildAccessFunctions();
222
223
  /// Should an instruction be modeled in a ScopStmt.
224
  ///
225
  /// @param Inst The instruction to check.
226
  /// @param L    The loop in which context the instruction is looked at.
227
  ///
228
  /// @returns True if the instruction should be modeled.
229
  bool shouldModelInst(Instruction *Inst, Loop *L);
230
231
  /// Create one or more ScopStmts for @p BB.
232
  ///
233
  /// Consecutive instructions are associated to the same statement until a
234
  /// separator is found.
235
  void buildSequentialBlockStmts(BasicBlock *BB, bool SplitOnStore = false);
236
237
  /// Create one or more ScopStmts for @p BB using equivalence classes.
238
  ///
239
  /// Instructions of a basic block that belong to the same equivalence class
240
  /// are added to the same statement.
241
  void buildEqivClassBlockStmts(BasicBlock *BB);
242
243
  /// Create ScopStmt for all BBs and non-affine subregions of @p SR.
244
  ///
245
  /// @param SR A subregion of @p R.
246
  ///
247
  /// Some of the statements might be optimized away later when they do not
248
  /// access any memory and thus have no effect.
249
  void buildStmts(Region &SR);
250
251
  /// Build the access functions for the statement @p Stmt in or represented by
252
  /// @p BB.
253
  ///
254
  /// @param Stmt               Statement to add MemoryAccesses to.
255
  /// @param BB                 A basic block in @p R.
256
  /// @param NonAffineSubRegion The non affine sub-region @p BB is in.
257
  void buildAccessFunctions(ScopStmt *Stmt, BasicBlock &BB,
258
                            Region *NonAffineSubRegion = nullptr);
259
260
  /// Create a new MemoryAccess object and add it to #AccFuncMap.
261
  ///
262
  /// @param Stmt        The statement where the access takes place.
263
  /// @param Inst        The instruction doing the access. It is not necessarily
264
  ///                    inside @p BB.
265
  /// @param AccType     The kind of access.
266
  /// @param BaseAddress The accessed array's base address.
267
  /// @param ElemType    The type of the accessed array elements.
268
  /// @param Affine      Whether all subscripts are affine expressions.
269
  /// @param AccessValue Value read or written.
270
  /// @param Subscripts  Access subscripts per dimension.
271
  /// @param Sizes       The array dimension's sizes.
272
  /// @param Kind        The kind of memory accessed.
273
  ///
274
  /// @return The created MemoryAccess, or nullptr if the access is not within
275
  ///         the SCoP.
276
  MemoryAccess *addMemoryAccess(ScopStmt *Stmt, Instruction *Inst,
277
                                MemoryAccess::AccessType AccType,
278
                                Value *BaseAddress, Type *ElemType, bool Affine,
279
                                Value *AccessValue,
280
                                ArrayRef<const SCEV *> Subscripts,
281
                                ArrayRef<const SCEV *> Sizes, MemoryKind Kind);
282
283
  /// Create a MemoryAccess that represents either a LoadInst or
284
  /// StoreInst.
285
  ///
286
  /// @param Stmt        The statement to add the MemoryAccess to.
287
  /// @param MemAccInst  The LoadInst or StoreInst.
288
  /// @param AccType     The kind of access.
289
  /// @param BaseAddress The accessed array's base address.
290
  /// @param ElemType    The type of the accessed array elements.
291
  /// @param IsAffine    Whether all subscripts are affine expressions.
292
  /// @param Subscripts  Access subscripts per dimension.
293
  /// @param Sizes       The array dimension's sizes.
294
  /// @param AccessValue Value read or written.
295
  ///
296
  /// @see MemoryKind
297
  void addArrayAccess(ScopStmt *Stmt, MemAccInst MemAccInst,
298
                      MemoryAccess::AccessType AccType, Value *BaseAddress,
299
                      Type *ElemType, bool IsAffine,
300
                      ArrayRef<const SCEV *> Subscripts,
301
                      ArrayRef<const SCEV *> Sizes, Value *AccessValue);
302
303
  /// Create a MemoryAccess for writing an llvm::Instruction.
304
  ///
305
  /// The access will be created at the position of @p Inst.
306
  ///
307
  /// @param Inst The instruction to be written.
308
  ///
309
  /// @see ensureValueRead()
310
  /// @see MemoryKind
311
  void ensureValueWrite(Instruction *Inst);
312
313
  /// Ensure an llvm::Value is available in the BB's statement, creating a
314
  /// MemoryAccess for reloading it if necessary.
315
  ///
316
  /// @param V        The value expected to be loaded.
317
  /// @param UserStmt Where to reload the value.
318
  ///
319
  /// @see ensureValueStore()
320
  /// @see MemoryKind
321
  void ensureValueRead(Value *V, ScopStmt *UserStmt);
322
323
  /// Create a write MemoryAccess for the incoming block of a phi node.
324
  ///
325
  /// Each of the incoming blocks write their incoming value to be picked in the
326
  /// phi's block.
327
  ///
328
  /// @param PHI           PHINode under consideration.
329
  /// @param IncomingStmt  The statement to add the MemoryAccess to.
330
  /// @param IncomingBlock Some predecessor block.
331
  /// @param IncomingValue @p PHI's value when coming from @p IncomingBlock.
332
  /// @param IsExitBlock   When true, uses the .s2a alloca instead of the
333
  ///                      .phiops one. Required for values escaping through a
334
  ///                      PHINode in the SCoP region's exit block.
335
  /// @see addPHIReadAccess()
336
  /// @see MemoryKind
337
  void ensurePHIWrite(PHINode *PHI, ScopStmt *IncomintStmt,
338
                      BasicBlock *IncomingBlock, Value *IncomingValue,
339
                      bool IsExitBlock);
340
341
  /// Create a MemoryAccess for reading the value of a phi.
342
  ///
343
  /// The modeling assumes that all incoming blocks write their incoming value
344
  /// to the same location. Thus, this access will read the incoming block's
345
  /// value as instructed by this @p PHI.
346
  ///
347
  /// @param PHIStmt Statement @p PHI resides in.
348
  /// @param PHI     PHINode under consideration; the READ access will be added
349
  ///                here.
350
  ///
351
  /// @see ensurePHIWrite()
352
  /// @see MemoryKind
353
  void addPHIReadAccess(ScopStmt *PHIStmt, PHINode *PHI);
354
355
  /// Build the domain of @p Stmt.
356
  void buildDomain(ScopStmt &Stmt);
357
358
  /// Fill NestLoops with loops surrounding @p Stmt.
359
  void collectSurroundingLoops(ScopStmt &Stmt);
360
361
  /// Check for reductions in @p Stmt.
362
  ///
363
  /// Iterate over all store memory accesses and check for valid binary
364
  /// reduction like chains. For all candidates we check if they have the same
365
  /// base address and there are no other accesses which overlap with them. The
366
  /// base address check rules out impossible reductions candidates early. The
367
  /// overlap check, together with the "only one user" check in
368
  /// collectCandidateReductionLoads, guarantees that none of the intermediate
369
  /// results will escape during execution of the loop nest. We basically check
370
  /// here that no other memory access can access the same memory as the
371
  /// potential reduction.
372
  void checkForReductions(ScopStmt &Stmt);
373
374
  /// Collect loads which might form a reduction chain with @p StoreMA.
375
  ///
376
  /// Check if the stored value for @p StoreMA is a binary operator with one or
377
  /// two loads as operands. If the binary operand is commutative & associative,
378
  /// used only once (by @p StoreMA) and its load operands are also used only
379
  /// once, we have found a possible reduction chain. It starts at an operand
380
  /// load and includes the binary operator and @p StoreMA.
381
  ///
382
  /// Note: We allow only one use to ensure the load and binary operator cannot
383
  ///       escape this block or into any other store except @p StoreMA.
384
  void collectCandidateReductionLoads(MemoryAccess *StoreMA,
385
                                      SmallVectorImpl<MemoryAccess *> &Loads);
386
387
  /// Build the access relation of all memory accesses of @p Stmt.
388
  void buildAccessRelations(ScopStmt &Stmt);
389
390
public:
391
  explicit ScopBuilder(Region *R, AssumptionCache &AC, AliasAnalysis &AA,
392
                       const DataLayout &DL, DominatorTree &DT, LoopInfo &LI,
393
                       ScopDetection &SD, ScalarEvolution &SE,
394
                       OptimizationRemarkEmitter &ORE);
395
  ScopBuilder(const ScopBuilder &) = delete;
396
  ScopBuilder &operator=(const ScopBuilder &) = delete;
397
1.19k
  ~ScopBuilder() = default;
398
399
  /// Try to build the Polly IR of static control part on the current
400
  /// SESE-Region.
401
  ///
402
  /// @return Give up the ownership of the scop object or static control part
403
  ///         for the region
404
1.19k
  std::unique_ptr<Scop> getScop() { return std::move(scop); }
405
};
406
} // end namespace polly
407
408
#endif // POLLY_SCOPBUILDER_H