Coverage Report

Created: 2019-02-23 12:57

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/include/polly/CodeGen/BlockGenerators.h
Line
Count
Source (jump to first uncovered line)
1
//===-BlockGenerators.h - Helper to generate code for statements-*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file defines the BlockGenerator and VectorBlockGenerator classes, which
10
// generate sequential code and vectorized code for a polyhedral statement,
11
// respectively.
12
//
13
//===----------------------------------------------------------------------===//
14
15
#ifndef POLLY_BLOCK_GENERATORS_H
16
#define POLLY_BLOCK_GENERATORS_H
17
18
#include "polly/CodeGen/IRBuilder.h"
19
#include "polly/Support/GICHelper.h"
20
#include "polly/Support/ScopHelper.h"
21
#include "llvm/ADT/MapVector.h"
22
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
23
#include "isl/map.h"
24
25
struct isl_ast_build;
26
struct isl_id_to_ast_expr;
27
28
namespace llvm {
29
class Pass;
30
class Region;
31
class ScalarEvolution;
32
} // namespace llvm
33
34
namespace polly {
35
using namespace llvm;
36
class ScopStmt;
37
class MemoryAccess;
38
class ScopArrayInfo;
39
class IslExprBuilder;
40
41
/// Generate a new basic block for a polyhedral statement.
42
class BlockGenerator {
43
public:
44
  typedef llvm::SmallVector<ValueMapT, 8> VectorValueMapT;
45
46
  /// Map types to resolve scalar dependences.
47
  ///
48
  ///@{
49
  using AllocaMapTy = DenseMap<const ScopArrayInfo *, AssertingVH<AllocaInst>>;
50
51
  /// Simple vector of instructions to store escape users.
52
  using EscapeUserVectorTy = SmallVector<Instruction *, 4>;
53
54
  /// Map type to resolve escaping users for scalar instructions.
55
  ///
56
  /// @see The EscapeMap member.
57
  using EscapeUsersAllocaMapTy =
58
      MapVector<Instruction *,
59
                std::pair<AssertingVH<Value>, EscapeUserVectorTy>>;
60
61
  ///@}
62
63
  /// Create a generator for basic blocks.
64
  ///
65
  /// @param Builder     The LLVM-IR Builder used to generate the statement. The
66
  ///                    code is generated at the location, the Builder points
67
  ///                    to.
68
  /// @param LI          The loop info for the current function
69
  /// @param SE          The scalar evolution info for the current function
70
  /// @param DT          The dominator tree of this function.
71
  /// @param ScalarMap   Map from scalars to their demoted location.
72
  /// @param EscapeMap   Map from scalars to their escape users and locations.
73
  /// @param GlobalMap   A mapping from llvm::Values used in the original scop
74
  ///                    region to a new set of llvm::Values. Each reference to
75
  ///                    an original value appearing in this mapping is replaced
76
  ///                    with the new value it is mapped to.
77
  /// @param ExprBuilder An expression builder to generate new access functions.
78
  /// @param StartBlock  The first basic block after the RTC.
79
  BlockGenerator(PollyIRBuilder &Builder, LoopInfo &LI, ScalarEvolution &SE,
80
                 DominatorTree &DT, AllocaMapTy &ScalarMap,
81
                 EscapeUsersAllocaMapTy &EscapeMap, ValueMapT &GlobalMap,
82
                 IslExprBuilder *ExprBuilder, BasicBlock *StartBlock);
83
84
  /// Copy the basic block.
85
  ///
86
  /// This copies the entire basic block and updates references to old values
87
  /// with references to new values, as defined by GlobalMap.
88
  ///
89
  /// @param Stmt        The block statement to code generate.
90
  /// @param LTS         A map from old loops to new induction variables as
91
  ///                    SCEVs.
92
  /// @param NewAccesses A map from memory access ids to new ast expressions,
93
  ///                    which may contain new access expressions for certain
94
  ///                    memory accesses.
95
  void copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
96
                isl_id_to_ast_expr *NewAccesses);
97
98
  /// Remove a ScopArrayInfo's allocation from the ScalarMap.
99
  ///
100
  /// This function allows to remove values from the ScalarMap. This is useful
101
  /// if the corresponding alloca instruction will be deleted (or moved into
102
  /// another module), as without removing these values the underlying
103
  /// AssertingVH will trigger due to us still keeping reference to this
104
  /// scalar.
105
  ///
106
  /// @param Array The array for which the alloca was generated.
107
0
  void freeScalarAlloc(ScopArrayInfo *Array) { ScalarMap.erase(Array); }
108
109
  /// Return the alloca for @p Access.
110
  ///
111
  /// If no alloca was mapped for @p Access a new one is created.
112
  ///
113
  /// @param Access    The memory access for which to generate the alloca.
114
  ///
115
  /// @returns The alloca for @p Access or a replacement value taken from
116
  ///          GlobalMap.
117
  Value *getOrCreateAlloca(const MemoryAccess &Access);
118
119
  /// Return the alloca for @p Array.
120
  ///
121
  /// If no alloca was mapped for @p Array a new one is created.
122
  ///
123
  /// @param Array The array for which to generate the alloca.
124
  ///
125
  /// @returns The alloca for @p Array or a replacement value taken from
126
  ///          GlobalMap.
127
  Value *getOrCreateAlloca(const ScopArrayInfo *Array);
128
129
  /// Finalize the code generation for the SCoP @p S.
130
  ///
131
  /// This will initialize and finalize the scalar variables we demoted during
132
  /// the code generation.
133
  ///
134
  /// @see createScalarInitialization(Scop &)
135
  /// @see createScalarFinalization(Region &)
136
  void finalizeSCoP(Scop &S);
137
138
  /// An empty destructor
139
606
  virtual ~BlockGenerator() {}
140
141
313
  BlockGenerator(const BlockGenerator &) = default;
142
143
protected:
144
  PollyIRBuilder &Builder;
145
  LoopInfo &LI;
146
  ScalarEvolution &SE;
147
  IslExprBuilder *ExprBuilder;
148
149
  /// The dominator tree of this function.
150
  DominatorTree &DT;
151
152
  /// The entry block of the current function.
153
  BasicBlock *EntryBB;
154
155
  /// Map to resolve scalar dependences for PHI operands and scalars.
156
  ///
157
  /// When translating code that contains scalar dependences as they result from
158
  /// inter-block scalar dependences (including the use of data carrying PHI
159
  /// nodes), we do not directly regenerate in-register SSA code, but instead
160
  /// allocate some stack memory through which these scalar values are passed.
161
  /// Only a later pass of -mem2reg will then (re)introduce in-register
162
  /// computations.
163
  ///
164
  /// To keep track of the memory location(s) used to store the data computed by
165
  /// a given SSA instruction, we use the map 'ScalarMap'. ScalarMap maps a
166
  /// given ScopArrayInfo to the junk of stack allocated memory, that is
167
  /// used for code generation.
168
  ///
169
  /// Up to two different ScopArrayInfo objects are associated with each
170
  /// llvm::Value:
171
  ///
172
  /// MemoryType::Value objects are used for normal scalar dependences that go
173
  /// from a scalar definition to its use. Such dependences are lowered by
174
  /// directly writing the value an instruction computes into the corresponding
175
  /// chunk of memory and reading it back from this chunk of memory right before
176
  /// every use of this original scalar value. The memory allocations for
177
  /// MemoryType::Value objects end with '.s2a'.
178
  ///
179
  /// MemoryType::PHI (and MemoryType::ExitPHI) objects are used to model PHI
180
  /// nodes. For each PHI nodes we introduce, besides the Array of type
181
  /// MemoryType::Value, a second chunk of memory into which we write at the end
182
  /// of each basic block preceding the PHI instruction the value passed
183
  /// through this basic block. At the place where the PHI node is executed, we
184
  /// replace the PHI node with a load from the corresponding MemoryType::PHI
185
  /// memory location. The memory allocations for MemoryType::PHI end with
186
  /// '.phiops'.
187
  ///
188
  /// Example:
189
  ///
190
  ///                              Input C Code
191
  ///                              ============
192
  ///
193
  ///                 S1:      x1 = ...
194
  ///                          for (i=0...N) {
195
  ///                 S2:           x2 = phi(x1, add)
196
  ///                 S3:           add = x2 + 42;
197
  ///                          }
198
  ///                 S4:      print(x1)
199
  ///                          print(x2)
200
  ///                          print(add)
201
  ///
202
  ///
203
  ///        Unmodified IR                         IR After expansion
204
  ///        =============                         ==================
205
  ///
206
  /// S1:   x1 = ...                     S1:    x1 = ...
207
  ///                                           x1.s2a = s1
208
  ///                                           x2.phiops = s1
209
  ///        |                                    |
210
  ///        |   <--<--<--<--<                    |   <--<--<--<--<
211
  ///        | /              \                   | /              \     .
212
  ///        V V               \                  V V               \    .
213
  /// S2:  x2 = phi (x1, add)   |        S2:    x2 = x2.phiops       |
214
  ///                           |               x2.s2a = x2          |
215
  ///                           |                                    |
216
  /// S3:  add = x2 + 42        |        S3:    add = x2 + 42        |
217
  ///                           |               add.s2a = add        |
218
  ///                           |               x2.phiops = add      |
219
  ///        | \               /                  | \               /
220
  ///        |  \             /                   |  \             /
221
  ///        |   >-->-->-->-->                    |   >-->-->-->-->
222
  ///        V                                    V
223
  ///
224
  ///                                    S4:    x1 = x1.s2a
225
  /// S4:  ... = x1                             ... = x1
226
  ///                                           x2 = x2.s2a
227
  ///      ... = x2                             ... = x2
228
  ///                                           add = add.s2a
229
  ///      ... = add                            ... = add
230
  ///
231
  ///      ScalarMap = { x1:Value -> x1.s2a, x2:Value -> x2.s2a,
232
  ///                    add:Value -> add.s2a, x2:PHI -> x2.phiops }
233
  ///
234
  ///  ??? Why does a PHI-node require two memory chunks ???
235
  ///
236
  ///  One may wonder why a PHI node requires two memory chunks and not just
237
  ///  all data is stored in a single location. The following example tries
238
  ///  to store all data in .s2a and drops the .phiops location:
239
  ///
240
  ///      S1:    x1 = ...
241
  ///             x1.s2a = s1
242
  ///             x2.s2a = s1             // use .s2a instead of .phiops
243
  ///               |
244
  ///               |   <--<--<--<--<
245
  ///               | /              \    .
246
  ///               V V               \   .
247
  ///      S2:    x2 = x2.s2a          |  // value is same as above, but read
248
  ///                                  |  // from .s2a
249
  ///                                  |
250
  ///             x2.s2a = x2          |  // store into .s2a as normal
251
  ///                                  |
252
  ///      S3:    add = x2 + 42        |
253
  ///             add.s2a = add        |
254
  ///             x2.s2a = add         |  // use s2a instead of .phiops
255
  ///               | \               /   // !!! This is wrong, as x2.s2a now
256
  ///               |   >-->-->-->-->     // contains add instead of x2.
257
  ///               V
258
  ///
259
  ///      S4:    x1 = x1.s2a
260
  ///             ... = x1
261
  ///             x2 = x2.s2a             // !!! We now read 'add' instead of
262
  ///             ... = x2                // 'x2'
263
  ///             add = add.s2a
264
  ///             ... = add
265
  ///
266
  ///  As visible in the example, the SSA value of the PHI node may still be
267
  ///  needed _after_ the basic block, which could conceptually branch to the
268
  ///  PHI node, has been run and has overwritten the PHI's old value. Hence, a
269
  ///  single memory location is not enough to code-generate a PHI node.
270
  ///
271
  /// Memory locations used for the special PHI node modeling.
272
  AllocaMapTy &ScalarMap;
273
274
  /// Map from instructions to their escape users as well as the alloca.
275
  EscapeUsersAllocaMapTy &EscapeMap;
276
277
  /// A map from llvm::Values referenced in the old code to a new set of
278
  ///        llvm::Values, which is used to replace these old values during
279
  ///        code generation.
280
  ValueMapT &GlobalMap;
281
282
  /// The first basic block after the RTC.
283
  BasicBlock *StartBlock;
284
285
  /// Split @p BB to create a new one we can use to clone @p BB in.
286
  BasicBlock *splitBB(BasicBlock *BB);
287
288
  /// Copy the given basic block.
289
  ///
290
  /// @param Stmt      The statement to code generate.
291
  /// @param BB        The basic block to code generate.
292
  /// @param BBMap     A mapping from old values to their new values in this
293
  /// block.
294
  /// @param LTS         A map from old loops to new induction variables as
295
  ///                    SCEVs.
296
  /// @param NewAccesses A map from memory access ids to new ast expressions,
297
  ///                    which may contain new access expressions for certain
298
  ///                    memory accesses.
299
  ///
300
  /// @returns The copy of the basic block.
301
  BasicBlock *copyBB(ScopStmt &Stmt, BasicBlock *BB, ValueMapT &BBMap,
302
                     LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses);
303
304
  /// Copy the given basic block.
305
  ///
306
  /// @param Stmt      The statement to code generate.
307
  /// @param BB        The basic block to code generate.
308
  /// @param BBCopy    The new basic block to generate code in.
309
  /// @param BBMap     A mapping from old values to their new values in this
310
  /// block.
311
  /// @param LTS         A map from old loops to new induction variables as
312
  ///                    SCEVs.
313
  /// @param NewAccesses A map from memory access ids to new ast expressions,
314
  ///                    which may contain new access expressions for certain
315
  ///                    memory accesses.
316
  void copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *BBCopy,
317
              ValueMapT &BBMap, LoopToScevMapT &LTS,
318
              isl_id_to_ast_expr *NewAccesses);
319
320
  /// Generate reload of scalars demoted to memory and needed by @p Stmt.
321
  ///
322
  /// @param Stmt  The statement we generate code for.
323
  /// @param LTS   A mapping from loops virtual canonical induction
324
  ///              variable to their new values.
325
  /// @param BBMap A mapping from old values to their new values in this block.
326
  /// @param NewAccesses A map from memory access ids to new ast expressions.
327
  void generateScalarLoads(ScopStmt &Stmt, LoopToScevMapT &LTS,
328
                           ValueMapT &BBMap,
329
                           __isl_keep isl_id_to_ast_expr *NewAccesses);
330
331
  /// When statement tracing is enabled, build the print instructions for
332
  /// printing the current statement instance.
333
  ///
334
  /// The printed output looks like:
335
  ///
336
  ///     Stmt1(0)
337
  ///
338
  /// If printing of scalars is enabled, it also appends the value of each
339
  /// scalar to the line:
340
  ///
341
  ///     Stmt1(0) %i=1 %sum=5
342
  ///
343
  /// @param Stmt  The statement we generate code for.
344
  /// @param LTS   A mapping from loops virtual canonical induction
345
  ///              variable to their new values.
346
  /// @param BBMap A mapping from old values to their new values in this block.
347
  void generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT &LTS,
348
                              ValueMapT &BBMap);
349
350
  /// Generate instructions that compute whether one instance of @p Set is
351
  /// executed.
352
  ///
353
  /// @param Stmt      The statement we generate code for.
354
  /// @param Subdomain A set in the space of @p Stmt's domain. Elements not in
355
  ///                  @p Stmt's domain are ignored.
356
  ///
357
  /// @return An expression of type i1, generated into the current builder
358
  ///         position, that evaluates to 1 if the executed instance is part of
359
  ///         @p Set.
360
  Value *buildContainsCondition(ScopStmt &Stmt, const isl::set &Subdomain);
361
362
  /// Generate code that executes in a subset of @p Stmt's domain.
363
  ///
364
  /// @param Stmt        The statement we generate code for.
365
  /// @param Subdomain   The condition for some code to be executed.
366
  /// @param Subject     A name for the code that is executed
367
  ///                    conditionally. Used to name new basic blocks and
368
  ///                    instructions.
369
  /// @param GenThenFunc Callback which generates the code to be executed
370
  ///                    when the current executed instance is in @p Set. The
371
  ///                    IRBuilder's position is moved to within the block that
372
  ///                    executes conditionally for this callback.
373
  void generateConditionalExecution(ScopStmt &Stmt, const isl::set &Subdomain,
374
                                    StringRef Subject,
375
                                    const std::function<void()> &GenThenFunc);
376
377
  /// Generate the scalar stores for the given statement.
378
  ///
379
  /// After the statement @p Stmt was copied all inner-SCoP scalar dependences
380
  /// starting in @p Stmt (hence all scalar write accesses in @p Stmt) need to
381
  /// be demoted to memory.
382
  ///
383
  /// @param Stmt  The statement we generate code for.
384
  /// @param LTS   A mapping from loops virtual canonical induction
385
  ///              variable to their new values
386
  ///              (for values recalculated in the new ScoP, but not
387
  ///               within this basic block)
388
  /// @param BBMap A mapping from old values to their new values in this block.
389
  /// @param NewAccesses A map from memory access ids to new ast expressions.
390
  virtual void generateScalarStores(ScopStmt &Stmt, LoopToScevMapT &LTS,
391
                                    ValueMapT &BBMap,
392
                                    __isl_keep isl_id_to_ast_expr *NewAccesses);
393
394
  /// Handle users of @p Array outside the SCoP.
395
  ///
396
  /// @param S         The current SCoP.
397
  /// @param Inst      The ScopArrayInfo to handle.
398
  void handleOutsideUsers(const Scop &S, ScopArrayInfo *Array);
399
400
  /// Find scalar statements that have outside users.
401
  ///
402
  /// We register these scalar values to later update subsequent scalar uses of
403
  /// these values to either use the newly computed value from within the scop
404
  /// (if the scop was executed) or the unchanged original code (if the run-time
405
  /// check failed).
406
  ///
407
  /// @param S The scop for which to find the outside users.
408
  void findOutsideUsers(Scop &S);
409
410
  /// Initialize the memory of demoted scalars.
411
  ///
412
  /// @param S The scop for which to generate the scalar initializers.
413
  void createScalarInitialization(Scop &S);
414
415
  /// Create exit PHI node merges for PHI nodes with more than two edges
416
  ///        from inside the scop.
417
  ///
418
  /// For scops which have a PHI node in the exit block that has more than two
419
  /// incoming edges from inside the scop region, we require some special
420
  /// handling to understand which of the possible values will be passed to the
421
  /// PHI node from inside the optimized version of the scop. To do so ScopInfo
422
  /// models the possible incoming values as write accesses of the ScopStmts.
423
  ///
424
  /// This function creates corresponding code to reload the computed outgoing
425
  /// value from the stack slot it has been stored into and to pass it on to the
426
  /// PHI node in the original exit block.
427
  ///
428
  /// @param S The scop for which to generate the exiting PHI nodes.
429
  void createExitPHINodeMerges(Scop &S);
430
431
  /// Promote the values of demoted scalars after the SCoP.
432
  ///
433
  /// If a scalar value was used outside the SCoP we need to promote the value
434
  /// stored in the memory cell allocated for that scalar and combine it with
435
  /// the original value in the non-optimized SCoP.
436
  void createScalarFinalization(Scop &S);
437
438
  /// Try to synthesize a new value
439
  ///
440
  /// Given an old value, we try to synthesize it in a new context from its
441
  /// original SCEV expression. We start from the original SCEV expression,
442
  /// then replace outdated parameter and loop references, and finally
443
  /// expand it to code that computes this updated expression.
444
  ///
445
  /// @param Stmt      The statement to code generate
446
  /// @param Old       The old Value
447
  /// @param BBMap     A mapping from old values to their new values
448
  ///                  (for values recalculated within this basic block)
449
  /// @param LTS       A mapping from loops virtual canonical induction
450
  ///                  variable to their new values
451
  ///                  (for values recalculated in the new ScoP, but not
452
  ///                   within this basic block)
453
  /// @param L         The loop that surrounded the instruction that referenced
454
  ///                  this value in the original code. This loop is used to
455
  ///                  evaluate the scalar evolution at the right scope.
456
  ///
457
  /// @returns  o A newly synthesized value.
458
  ///           o NULL, if synthesizing the value failed.
459
  Value *trySynthesizeNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
460
                               LoopToScevMapT &LTS, Loop *L) const;
461
462
  /// Get the new version of a value.
463
  ///
464
  /// Given an old value, we first check if a new version of this value is
465
  /// available in the BBMap or GlobalMap. In case it is not and the value can
466
  /// be recomputed using SCEV, we do so. If we can not recompute a value
467
  /// using SCEV, but we understand that the value is constant within the scop,
468
  /// we return the old value.  If the value can still not be derived, this
469
  /// function will assert.
470
  ///
471
  /// @param Stmt      The statement to code generate.
472
  /// @param Old       The old Value.
473
  /// @param BBMap     A mapping from old values to their new values
474
  ///                  (for values recalculated within this basic block).
475
  /// @param LTS       A mapping from loops virtual canonical induction
476
  ///                  variable to their new values
477
  ///                  (for values recalculated in the new ScoP, but not
478
  ///                   within this basic block).
479
  /// @param L         The loop that surrounded the instruction that referenced
480
  ///                  this value in the original code. This loop is used to
481
  ///                  evaluate the scalar evolution at the right scope.
482
  ///
483
  /// @returns  o The old value, if it is still valid.
484
  ///           o The new value, if available.
485
  ///           o NULL, if no value is found.
486
  Value *getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
487
                     LoopToScevMapT &LTS, Loop *L) const;
488
489
  void copyInstScalar(ScopStmt &Stmt, Instruction *Inst, ValueMapT &BBMap,
490
                      LoopToScevMapT &LTS);
491
492
  /// Get the innermost loop that surrounds the statement @p Stmt.
493
  Loop *getLoopForStmt(const ScopStmt &Stmt) const;
494
495
  /// Generate the operand address
496
  /// @param NewAccesses A map from memory access ids to new ast expressions,
497
  ///                    which may contain new access expressions for certain
498
  ///                    memory accesses.
499
  Value *generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst,
500
                                  ValueMapT &BBMap, LoopToScevMapT &LTS,
501
                                  isl_id_to_ast_expr *NewAccesses);
502
503
  /// Generate the operand address.
504
  ///
505
  /// @param Stmt         The statement to generate code for.
506
  /// @param L            The innermost loop that surrounds the statement.
507
  /// @param Pointer      If the access expression is not changed (ie. not found
508
  ///                     in @p LTS), use this Pointer from the original code
509
  ///                     instead.
510
  /// @param BBMap        A mapping from old values to their new values.
511
  /// @param LTS          A mapping from loops virtual canonical induction
512
  ///                     variable to their new values.
513
  /// @param NewAccesses  Ahead-of-time generated access expressions.
514
  /// @param Id           Identifier of the MemoryAccess to generate.
515
  /// @param ExpectedType The type the returned value should have.
516
  ///
517
  /// @return The generated address.
518
  Value *generateLocationAccessed(ScopStmt &Stmt, Loop *L, Value *Pointer,
519
                                  ValueMapT &BBMap, LoopToScevMapT &LTS,
520
                                  isl_id_to_ast_expr *NewAccesses,
521
                                  __isl_take isl_id *Id, Type *ExpectedType);
522
523
  /// Generate the pointer value that is accesses by @p Access.
524
  ///
525
  /// For write accesses, generate the target address. For read accesses,
526
  /// generate the source address.
527
  /// The access can be either an array access or a scalar access. In the first
528
  /// case, the returned address will point to an element into that array. In
529
  /// the scalar case, an alloca is used.
530
  /// If a new AccessRelation is set for the MemoryAccess, the new relation will
531
  /// be used.
532
  ///
533
  /// @param Access      The access to generate a pointer for.
534
  /// @param L           The innermost loop that surrounds the statement.
535
  /// @param LTS         A mapping from loops virtual canonical induction
536
  ///                    variable to their new values.
537
  /// @param BBMap       A mapping from old values to their new values.
538
  /// @param NewAccesses A map from memory access ids to new ast expressions.
539
  ///
540
  /// @return The generated address.
541
  Value *getImplicitAddress(MemoryAccess &Access, Loop *L, LoopToScevMapT &LTS,
542
                            ValueMapT &BBMap,
543
                            __isl_keep isl_id_to_ast_expr *NewAccesses);
544
545
  /// @param NewAccesses A map from memory access ids to new ast expressions,
546
  ///                    which may contain new access expressions for certain
547
  ///                    memory accesses.
548
  Value *generateArrayLoad(ScopStmt &Stmt, LoadInst *load, ValueMapT &BBMap,
549
                           LoopToScevMapT &LTS,
550
                           isl_id_to_ast_expr *NewAccesses);
551
552
  /// @param NewAccesses A map from memory access ids to new ast expressions,
553
  ///                    which may contain new access expressions for certain
554
  ///                    memory accesses.
555
  void generateArrayStore(ScopStmt &Stmt, StoreInst *store, ValueMapT &BBMap,
556
                          LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses);
557
558
  /// Copy a single PHI instruction.
559
  ///
560
  /// The implementation in the BlockGenerator is trivial, however it allows
561
  /// subclasses to handle PHIs different.
562
  virtual void copyPHIInstruction(ScopStmt &, PHINode *, ValueMapT &,
563
37
                                  LoopToScevMapT &) {}
564
565
  /// Copy a single Instruction.
566
  ///
567
  /// This copies a single Instruction and updates references to old values
568
  /// with references to new values, as defined by GlobalMap and BBMap.
569
  ///
570
  /// @param Stmt        The statement to code generate.
571
  /// @param Inst        The instruction to copy.
572
  /// @param BBMap       A mapping from old values to their new values
573
  ///                    (for values recalculated within this basic block).
574
  /// @param GlobalMap   A mapping from old values to their new values
575
  ///                    (for values recalculated in the new ScoP, but not
576
  ///                    within this basic block).
577
  /// @param LTS         A mapping from loops virtual canonical induction
578
  ///                    variable to their new values
579
  ///                    (for values recalculated in the new ScoP, but not
580
  ///                     within this basic block).
581
  /// @param NewAccesses A map from memory access ids to new ast expressions,
582
  ///                    which may contain new access expressions for certain
583
  ///                    memory accesses.
584
  void copyInstruction(ScopStmt &Stmt, Instruction *Inst, ValueMapT &BBMap,
585
                       LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses);
586
587
  /// Helper to determine if @p Inst can be synthesized in @p Stmt.
588
  ///
589
  /// @returns false, iff @p Inst can be synthesized in @p Stmt.
590
  bool canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst);
591
592
  /// Remove dead instructions generated for BB
593
  ///
594
  /// @param BB The basic block code for which code has been generated.
595
  /// @param BBMap A local map from old to new instructions.
596
  void removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap);
597
598
  /// Invalidate the scalar evolution expressions for a scop.
599
  ///
600
  /// This function invalidates the scalar evolution results for all
601
  /// instructions that are part of a given scop, and the loops
602
  /// surrounding the users of merge blocks. This is necessary to ensure that
603
  /// later scops do not obtain scalar evolution expressions that reference
604
  /// values that earlier dominated the later scop, but have been moved in the
605
  /// conditional part of an earlier scop and consequently do not any more
606
  /// dominate the later scop.
607
  ///
608
  /// @param S The scop to invalidate.
609
  void invalidateScalarEvolution(Scop &S);
610
};
611
612
/// Generate a new vector basic block for a polyhedral statement.
613
///
614
/// The only public function exposed is generate().
615
class VectorBlockGenerator : BlockGenerator {
616
public:
617
  /// Generate a new vector basic block for a ScoPStmt.
618
  ///
619
  /// This code generation is similar to the normal, scalar code generation,
620
  /// except that each instruction is code generated for several vector lanes
621
  /// at a time. If possible instructions are issued as actual vector
622
  /// instructions, but e.g. for address calculation instructions we currently
623
  /// generate scalar instructions for each vector lane.
624
  ///
625
  /// @param BlockGen    A block generator object used as parent.
626
  /// @param Stmt        The statement to code generate.
627
  /// @param VLTS        A mapping from loops virtual canonical induction
628
  ///                    variable to their new values
629
  ///                    (for values recalculated in the new ScoP, but not
630
  ///                     within this basic block), one for each lane.
631
  /// @param Schedule    A map from the statement to a schedule where the
632
  ///                    innermost dimension is the dimension of the innermost
633
  ///                    loop containing the statement.
634
  /// @param NewAccesses A map from memory access ids to new ast expressions,
635
  ///                    which may contain new access expressions for certain
636
  ///                    memory accesses.
637
  static void generate(BlockGenerator &BlockGen, ScopStmt &Stmt,
638
                       std::vector<LoopToScevMapT> &VLTS,
639
                       __isl_keep isl_map *Schedule,
640
20
                       __isl_keep isl_id_to_ast_expr *NewAccesses) {
641
20
    VectorBlockGenerator Generator(BlockGen, VLTS, Schedule);
642
20
    Generator.copyStmt(Stmt, NewAccesses);
643
20
  }
644
645
private:
646
  // This is a vector of loop->scev maps.  The first map is used for the first
647
  // vector lane, ...
648
  // Each map, contains information about Instructions in the old ScoP, which
649
  // are recalculated in the new SCoP. When copying the basic block, we replace
650
  // all references to the old instructions with their recalculated values.
651
  //
652
  // For example, when the code generator produces this AST:
653
  //
654
  //   for (int c1 = 0; c1 <= 1023; c1 += 1)
655
  //     for (int c2 = 0; c2 <= 1023; c2 += VF)
656
  //       for (int lane = 0; lane <= VF; lane += 1)
657
  //         Stmt(c2 + lane + 3, c1);
658
  //
659
  // VLTS[lane] contains a map:
660
  //   "outer loop in the old loop nest" -> SCEV("c2 + lane + 3"),
661
  //   "inner loop in the old loop nest" -> SCEV("c1").
662
  std::vector<LoopToScevMapT> &VLTS;
663
664
  // A map from the statement to a schedule where the innermost dimension is the
665
  // dimension of the innermost loop containing the statement.
666
  isl_map *Schedule;
667
668
  VectorBlockGenerator(BlockGenerator &BlockGen,
669
                       std::vector<LoopToScevMapT> &VLTS,
670
                       __isl_keep isl_map *Schedule);
671
672
  int getVectorWidth();
673
674
  Value *getVectorValue(ScopStmt &Stmt, Value *Old, ValueMapT &VectorMap,
675
                        VectorValueMapT &ScalarMaps, Loop *L);
676
677
  Type *getVectorPtrTy(const Value *V, int Width);
678
679
  /// Load a vector from a set of adjacent scalars
680
  ///
681
  /// In case a set of scalars is known to be next to each other in memory,
682
  /// create a vector load that loads those scalars
683
  ///
684
  /// %vector_ptr= bitcast double* %p to <4 x double>*
685
  /// %vec_full = load <4 x double>* %vector_ptr
686
  ///
687
  /// @param Stmt           The statement to code generate.
688
  /// @param NegativeStride This is used to indicate a -1 stride. In such
689
  ///                       a case we load the end of a base address and
690
  ///                       shuffle the accesses in reverse order into the
691
  ///                       vector. By default we would do only positive
692
  ///                       strides.
693
  ///
694
  /// @param NewAccesses    A map from memory access ids to new ast
695
  ///                       expressions, which may contain new access
696
  ///                       expressions for certain memory accesses.
697
  Value *generateStrideOneLoad(ScopStmt &Stmt, LoadInst *Load,
698
                               VectorValueMapT &ScalarMaps,
699
                               __isl_keep isl_id_to_ast_expr *NewAccesses,
700
                               bool NegativeStride);
701
702
  /// Load a vector initialized from a single scalar in memory
703
  ///
704
  /// In case all elements of a vector are initialized to the same
705
  /// scalar value, this value is loaded and shuffled into all elements
706
  /// of the vector.
707
  ///
708
  /// %splat_one = load <1 x double>* %p
709
  /// %splat = shufflevector <1 x double> %splat_one, <1 x
710
  ///       double> %splat_one, <4 x i32> zeroinitializer
711
  ///
712
  /// @param NewAccesses A map from memory access ids to new ast expressions,
713
  ///                    which may contain new access expressions for certain
714
  ///                    memory accesses.
715
  Value *generateStrideZeroLoad(ScopStmt &Stmt, LoadInst *Load,
716
                                ValueMapT &BBMap,
717
                                __isl_keep isl_id_to_ast_expr *NewAccesses);
718
719
  /// Load a vector from scalars distributed in memory
720
  ///
721
  /// In case some scalars a distributed randomly in memory. Create a vector
722
  /// by loading each scalar and by inserting one after the other into the
723
  /// vector.
724
  ///
725
  /// %scalar_1= load double* %p_1
726
  /// %vec_1 = insertelement <2 x double> undef, double %scalar_1, i32 0
727
  /// %scalar 2 = load double* %p_2
728
  /// %vec_2 = insertelement <2 x double> %vec_1, double %scalar_1, i32 1
729
  ///
730
  /// @param NewAccesses A map from memory access ids to new ast expressions,
731
  ///                    which may contain new access expressions for certain
732
  ///                    memory accesses.
733
  Value *generateUnknownStrideLoad(ScopStmt &Stmt, LoadInst *Load,
734
                                   VectorValueMapT &ScalarMaps,
735
                                   __isl_keep isl_id_to_ast_expr *NewAccesses);
736
737
  /// @param NewAccesses A map from memory access ids to new ast expressions,
738
  ///                    which may contain new access expressions for certain
739
  ///                    memory accesses.
740
  void generateLoad(ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap,
741
                    VectorValueMapT &ScalarMaps,
742
                    __isl_keep isl_id_to_ast_expr *NewAccesses);
743
744
  void copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst,
745
                     ValueMapT &VectorMap, VectorValueMapT &ScalarMaps);
746
747
  void copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst,
748
                      ValueMapT &VectorMap, VectorValueMapT &ScalarMaps);
749
750
  /// @param NewAccesses A map from memory access ids to new ast expressions,
751
  ///                    which may contain new access expressions for certain
752
  ///                    memory accesses.
753
  void copyStore(ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap,
754
                 VectorValueMapT &ScalarMaps,
755
                 __isl_keep isl_id_to_ast_expr *NewAccesses);
756
757
  /// @param NewAccesses A map from memory access ids to new ast expressions,
758
  ///                    which may contain new access expressions for certain
759
  ///                    memory accesses.
760
  void copyInstScalarized(ScopStmt &Stmt, Instruction *Inst,
761
                          ValueMapT &VectorMap, VectorValueMapT &ScalarMaps,
762
                          __isl_keep isl_id_to_ast_expr *NewAccesses);
763
764
  bool extractScalarValues(const Instruction *Inst, ValueMapT &VectorMap,
765
                           VectorValueMapT &ScalarMaps);
766
767
  bool hasVectorOperands(const Instruction *Inst, ValueMapT &VectorMap);
768
769
  /// Generate vector loads for scalars.
770
  ///
771
  /// @param Stmt           The scop statement for which to generate the loads.
772
  /// @param VectorBlockMap A map that will be updated to relate the original
773
  ///                       values with the newly generated vector loads.
774
  void generateScalarVectorLoads(ScopStmt &Stmt, ValueMapT &VectorBlockMap);
775
776
  /// Verify absence of scalar stores.
777
  ///
778
  /// @param Stmt The scop statement to check for scalar stores.
779
  void verifyNoScalarStores(ScopStmt &Stmt);
780
781
  /// @param NewAccesses A map from memory access ids to new ast expressions,
782
  ///                    which may contain new access expressions for certain
783
  ///                    memory accesses.
784
  void copyInstruction(ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
785
                       VectorValueMapT &ScalarMaps,
786
                       __isl_keep isl_id_to_ast_expr *NewAccesses);
787
788
  /// @param NewAccesses A map from memory access ids to new ast expressions,
789
  ///                    which may contain new access expressions for certain
790
  ///                    memory accesses.
791
  void copyStmt(ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses);
792
};
793
794
/// Generator for new versions of polyhedral region statements.
795
class RegionGenerator : public BlockGenerator {
796
public:
797
  /// Create a generator for regions.
798
  ///
799
  /// @param BlockGen A generator for basic blocks.
800
293
  RegionGenerator(BlockGenerator &BlockGen) : BlockGenerator(BlockGen) {}
801
802
293
  virtual ~RegionGenerator() {}
803
804
  /// Copy the region statement @p Stmt.
805
  ///
806
  /// This copies the entire region represented by @p Stmt and updates
807
  /// references to old values with references to new values, as defined by
808
  /// GlobalMap.
809
  ///
810
  /// @param Stmt      The statement to code generate.
811
  /// @param LTS       A map from old loops to new induction variables as SCEVs.
812
  void copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
813
                __isl_keep isl_id_to_ast_expr *IdToAstExp);
814
815
private:
816
  /// A map from old to the first new block in the region, that was created to
817
  /// model the old basic block.
818
  DenseMap<BasicBlock *, BasicBlock *> StartBlockMap;
819
820
  /// A map from old to the last new block in the region, that was created to
821
  /// model the old basic block.
822
  DenseMap<BasicBlock *, BasicBlock *> EndBlockMap;
823
824
  /// The "BBMaps" for the whole region (one for each block). In case a basic
825
  /// block is code generated to multiple basic blocks (e.g., for partial
826
  /// writes), the StartBasic is used as index for the RegionMap.
827
  DenseMap<BasicBlock *, ValueMapT> RegionMaps;
828
829
  /// Mapping to remember PHI nodes that still need incoming values.
830
  using PHINodePairTy = std::pair<PHINode *, PHINode *>;
831
  DenseMap<BasicBlock *, SmallVector<PHINodePairTy, 4>> IncompletePHINodeMap;
832
833
  /// Repair the dominance tree after we created a copy block for @p BB.
834
  ///
835
  /// @returns The immediate dominator in the DT for @p BBCopy if in the region.
836
  BasicBlock *repairDominance(BasicBlock *BB, BasicBlock *BBCopy);
837
838
  /// Add the new operand from the copy of @p IncomingBB to @p PHICopy.
839
  ///
840
  /// PHI nodes, which may have (multiple) edges that enter from outside the
841
  /// non-affine subregion and even from outside the scop, are code generated as
842
  /// follows:
843
  ///
844
  /// # Original
845
  ///
846
  ///   Region: %A-> %exit
847
  ///   NonAffine Stmt: %nonaffB -> %D (includes %nonaffB, %nonaffC)
848
  ///
849
  ///     pre:
850
  ///       %val = add i64 1, 1
851
  ///
852
  ///     A:
853
  ///      br label %nonaff
854
  ///
855
  ///     nonaffB:
856
  ///       %phi = phi i64 [%val, %A], [%valC, %nonAffC], [%valD, %D]
857
  ///       %cmp = <nonaff>
858
  ///       br i1 %cmp, label %C, label %nonaffC
859
  ///
860
  ///     nonaffC:
861
  ///       %valC = add i64 1, 1
862
  ///       br i1 undef, label %D, label %nonaffB
863
  ///
864
  ///     D:
865
  ///       %valD = ...
866
  ///       %exit_cond = <loopexit>
867
  ///       br i1 %exit_cond, label %nonaffB, label %exit
868
  ///
869
  ///     exit:
870
  ///       ...
871
  ///
872
  ///  - %start and %C enter from outside the non-affine region.
873
  ///  - %nonaffC enters from within the non-affine region.
874
  ///
875
  ///  # New
876
  ///
877
  ///    polly.A:
878
  ///       store i64 %val, i64* %phi.phiops
879
  ///       br label %polly.nonaffA.entry
880
  ///
881
  ///    polly.nonaffB.entry:
882
  ///       %phi.phiops.reload = load i64, i64* %phi.phiops
883
  ///       br label %nonaffB
884
  ///
885
  ///    polly.nonaffB:
886
  ///       %polly.phi = [%phi.phiops.reload, %nonaffB.entry],
887
  ///                    [%p.valC, %polly.nonaffC]
888
  ///
889
  ///    polly.nonaffC:
890
  ///       %p.valC = add i64 1, 1
891
  ///       br i1 undef, label %polly.D, label %polly.nonaffB
892
  ///
893
  ///    polly.D:
894
  ///        %p.valD = ...
895
  ///        store i64 %p.valD, i64* %phi.phiops
896
  ///        %p.exit_cond = <loopexit>
897
  ///        br i1 %p.exit_cond, label %polly.nonaffB, label %exit
898
  ///
899
  /// Values that enter the PHI from outside the non-affine region are stored
900
  /// into the stack slot %phi.phiops by statements %polly.A and %polly.D and
901
  /// reloaded in %polly.nonaffB.entry, a basic block generated before the
902
  /// actual non-affine region.
903
  ///
904
  /// When generating the PHI node of the non-affine region in %polly.nonaffB,
905
  /// incoming edges from outside the region are combined into a single branch
906
  /// from %polly.nonaffB.entry which has as incoming value the value reloaded
907
  /// from the %phi.phiops stack slot. Incoming edges from within the region
908
  /// refer to the copied instructions (%p.valC) and basic blocks
909
  /// (%polly.nonaffC) of the non-affine region.
910
  ///
911
  /// @param Stmt       The statement to code generate.
912
  /// @param PHI        The original PHI we copy.
913
  /// @param PHICopy    The copy of @p PHI.
914
  /// @param IncomingBB An incoming block of @p PHI.
915
  /// @param LTS        A map from old loops to new induction variables as
916
  /// SCEVs.
917
  void addOperandToPHI(ScopStmt &Stmt, PHINode *PHI, PHINode *PHICopy,
918
                       BasicBlock *IncomingBB, LoopToScevMapT &LTS);
919
920
  /// Create a PHI that combines the incoming values from all incoming blocks
921
  /// that are in the subregion.
922
  ///
923
  /// PHIs in the subregion's exit block can have incoming edges from within and
924
  /// outside the subregion. This function combines the incoming values from
925
  /// within the subregion to appear as if there is only one incoming edge from
926
  /// the subregion (an additional exit block is created by RegionGenerator).
927
  /// This is to avoid that a value is written to the .phiops location without
928
  /// leaving the subregion because the exiting block as an edge back into the
929
  /// subregion.
930
  ///
931
  /// @param MA    The WRITE of MemoryKind::PHI/MemoryKind::ExitPHI for a PHI in
932
  ///              the subregion's exit block.
933
  /// @param LTS   Virtual induction variable mapping.
934
  /// @param BBMap A mapping from old values to their new values in this block.
935
  /// @param L     Loop surrounding this region statement.
936
  ///
937
  /// @returns The constructed PHI node.
938
  PHINode *buildExitPHI(MemoryAccess *MA, LoopToScevMapT &LTS, ValueMapT &BBMap,
939
                        Loop *L);
940
941
  /// @param Return the new value of a scalar write, creating a PHINode if
942
  ///        necessary.
943
  ///
944
  /// @param MA    A scalar WRITE MemoryAccess.
945
  /// @param LTS   Virtual induction variable mapping.
946
  /// @param BBMap A mapping from old values to their new values in this block.
947
  ///
948
  /// @returns The effective value of @p MA's written value when leaving the
949
  ///          subregion.
950
  /// @see buildExitPHI
951
  Value *getExitScalar(MemoryAccess *MA, LoopToScevMapT &LTS, ValueMapT &BBMap);
952
953
  /// Generate the scalar stores for the given statement.
954
  ///
955
  /// After the statement @p Stmt was copied all inner-SCoP scalar dependences
956
  /// starting in @p Stmt (hence all scalar write accesses in @p Stmt) need to
957
  /// be demoted to memory.
958
  ///
959
  /// @param Stmt  The statement we generate code for.
960
  /// @param LTS   A mapping from loops virtual canonical induction variable to
961
  ///              their new values (for values recalculated in the new ScoP,
962
  ///              but not within this basic block)
963
  /// @param BBMap A mapping from old values to their new values in this block.
964
  /// @param LTS   A mapping from loops virtual canonical induction variable to
965
  /// their new values.
966
  virtual void
967
  generateScalarStores(ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMAp,
968
                       __isl_keep isl_id_to_ast_expr *NewAccesses) override;
969
970
  /// Copy a single PHI instruction.
971
  ///
972
  /// This copies a single PHI instruction and updates references to old values
973
  /// with references to new values, as defined by GlobalMap and BBMap.
974
  ///
975
  /// @param Stmt      The statement to code generate.
976
  /// @param PHI       The PHI instruction to copy.
977
  /// @param BBMap     A mapping from old values to their new values
978
  ///                  (for values recalculated within this basic block).
979
  /// @param LTS       A map from old loops to new induction variables as SCEVs.
980
  virtual void copyPHIInstruction(ScopStmt &Stmt, PHINode *Inst,
981
                                  ValueMapT &BBMap,
982
                                  LoopToScevMapT &LTS) override;
983
};
984
} // namespace polly
985
#endif