Coverage Report

Created: 2017-08-21 19:50

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/polly/lib/Transform/ScheduleOptimizer.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This pass generates an entirely new schedule tree from the data dependences
11
// and iteration domains. The new schedule tree is computed in two steps:
12
//
13
// 1) The isl scheduling optimizer is run
14
//
15
// The isl scheduling optimizer creates a new schedule tree that maximizes
16
// parallelism and tileability and minimizes data-dependence distances. The
17
// algorithm used is a modified version of the ``Pluto'' algorithm:
18
//
19
//   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
20
//   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
21
//   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
22
//   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
23
//
24
// 2) A set of post-scheduling transformations is applied on the schedule tree.
25
//
26
// These optimizations include:
27
//
28
//  - Tiling of the innermost tilable bands
29
//  - Prevectorization - The choice of a possible outer loop that is strip-mined
30
//                       to the innermost level to enable inner-loop
31
//                       vectorization.
32
//  - Some optimizations for spatial locality are also planned.
33
//
34
// For a detailed description of the schedule tree itself please see section 6
35
// of:
36
//
37
// Polyhedral AST generation is more than scanning polyhedra
38
// Tobias Grosser, Sven Verdoolaege, Albert Cohen
39
// ACM Transactions on Programming Languages and Systems (TOPLAS),
40
// 37(4), July 2015
41
// http://www.grosser.es/#pub-polyhedral-AST-generation
42
//
43
// This publication also contains a detailed discussion of the different options
44
// for polyhedral loop unrolling, full/partial tile separation and other uses
45
// of the schedule tree.
46
//
47
//===----------------------------------------------------------------------===//
48
49
#include "polly/ScheduleOptimizer.h"
50
#include "polly/CodeGen/CodeGeneration.h"
51
#include "polly/DependenceInfo.h"
52
#include "polly/LinkAllPasses.h"
53
#include "polly/Options.h"
54
#include "polly/ScopInfo.h"
55
#include "polly/Simplify.h"
56
#include "polly/Support/GICHelper.h"
57
#include "polly/Support/ISLOStream.h"
58
#include "llvm/Analysis/TargetTransformInfo.h"
59
#include "llvm/Support/Debug.h"
60
#include "isl/aff.h"
61
#include "isl/band.h"
62
#include "isl/constraint.h"
63
#include "isl/map.h"
64
#include "isl/options.h"
65
#include "isl/printer.h"
66
#include "isl/schedule.h"
67
#include "isl/schedule_node.h"
68
#include "isl/space.h"
69
#include "isl/union_map.h"
70
#include "isl/union_set.h"
71
72
using namespace llvm;
73
using namespace polly;
74
75
#define DEBUG_TYPE "polly-opt-isl"
76
77
static cl::opt<std::string>
78
    OptimizeDeps("polly-opt-optimize-only",
79
                 cl::desc("Only a certain kind of dependences (all/raw)"),
80
                 cl::Hidden, cl::init("all"), cl::ZeroOrMore,
81
                 cl::cat(PollyCategory));
82
83
static cl::opt<std::string>
84
    SimplifyDeps("polly-opt-simplify-deps",
85
                 cl::desc("Dependences should be simplified (yes/no)"),
86
                 cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
87
                 cl::cat(PollyCategory));
88
89
static cl::opt<int> MaxConstantTerm(
90
    "polly-opt-max-constant-term",
91
    cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
92
    cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
93
94
static cl::opt<int> MaxCoefficient(
95
    "polly-opt-max-coefficient",
96
    cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
97
    cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
98
99
static cl::opt<std::string> FusionStrategy(
100
    "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
101
    cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
102
103
static cl::opt<std::string>
104
    MaximizeBandDepth("polly-opt-maximize-bands",
105
                      cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
106
                      cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
107
108
static cl::opt<std::string> OuterCoincidence(
109
    "polly-opt-outer-coincidence",
110
    cl::desc("Try to construct schedules where the outer member of each band "
111
             "satisfies the coincidence constraints (yes/no)"),
112
    cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
113
114
static cl::opt<int> PrevectorWidth(
115
    "polly-prevect-width",
116
    cl::desc(
117
        "The number of loop iterations to strip-mine for pre-vectorization"),
118
    cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
119
120
static cl::opt<bool> FirstLevelTiling("polly-tiling",
121
                                      cl::desc("Enable loop tiling"),
122
                                      cl::init(true), cl::ZeroOrMore,
123
                                      cl::cat(PollyCategory));
124
125
static cl::opt<int> LatencyVectorFma(
126
    "polly-target-latency-vector-fma",
127
    cl::desc("The minimal number of cycles between issuing two "
128
             "dependent consecutive vector fused multiply-add "
129
             "instructions."),
130
    cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
131
132
static cl::opt<int> ThroughputVectorFma(
133
    "polly-target-throughput-vector-fma",
134
    cl::desc("A throughput of the processor floating-point arithmetic units "
135
             "expressed in the number of vector fused multiply-add "
136
             "instructions per clock cycle."),
137
    cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
138
139
// This option, along with --polly-target-2nd-cache-level-associativity,
140
// --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
141
// represent the parameters of the target cache, which do not have typical
142
// values that can be used by default. However, to apply the pattern matching
143
// optimizations, we use the values of the parameters of Intel Core i7-3820
144
// SandyBridge in case the parameters are not specified. Such an approach helps
145
// also to attain the high-performance on IBM POWER System S822 and IBM Power
146
// 730 Express server.
147
static cl::opt<int> FirstCacheLevelAssociativity(
148
    "polly-target-1st-cache-level-associativity",
149
    cl::desc("The associativity of the first cache level."), cl::Hidden,
150
    cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
151
152
static cl::opt<int> SecondCacheLevelAssociativity(
153
    "polly-target-2nd-cache-level-associativity",
154
    cl::desc("The associativity of the second cache level."), cl::Hidden,
155
    cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
156
157
static cl::opt<int> FirstCacheLevelSize(
158
    "polly-target-1st-cache-level-size",
159
    cl::desc("The size of the first cache level specified in bytes."),
160
    cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
161
162
static cl::opt<int> SecondCacheLevelSize(
163
    "polly-target-2nd-cache-level-size",
164
    cl::desc("The size of the second level specified in bytes."), cl::Hidden,
165
    cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
166
167
static cl::opt<int> VectorRegisterBitwidth(
168
    "polly-target-vector-register-bitwidth",
169
    cl::desc("The size in bits of a vector register (if not set, this "
170
             "information is taken from LLVM's target information."),
171
    cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
172
173
static cl::opt<int> FirstLevelDefaultTileSize(
174
    "polly-default-tile-size",
175
    cl::desc("The default tile size (if not enough were provided by"
176
             " --polly-tile-sizes)"),
177
    cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
178
179
static cl::list<int>
180
    FirstLevelTileSizes("polly-tile-sizes",
181
                        cl::desc("A tile size for each loop dimension, filled "
182
                                 "with --polly-default-tile-size"),
183
                        cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
184
                        cl::cat(PollyCategory));
185
186
static cl::opt<bool>
187
    SecondLevelTiling("polly-2nd-level-tiling",
188
                      cl::desc("Enable a 2nd level loop of loop tiling"),
189
                      cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
190
191
static cl::opt<int> SecondLevelDefaultTileSize(
192
    "polly-2nd-level-default-tile-size",
193
    cl::desc("The default 2nd-level tile size (if not enough were provided by"
194
             " --polly-2nd-level-tile-sizes)"),
195
    cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
196
197
static cl::list<int>
198
    SecondLevelTileSizes("polly-2nd-level-tile-sizes",
199
                         cl::desc("A tile size for each loop dimension, filled "
200
                                  "with --polly-default-tile-size"),
201
                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
202
                         cl::cat(PollyCategory));
203
204
static cl::opt<bool> RegisterTiling("polly-register-tiling",
205
                                    cl::desc("Enable register tiling"),
206
                                    cl::init(false), cl::ZeroOrMore,
207
                                    cl::cat(PollyCategory));
208
209
static cl::opt<int> RegisterDefaultTileSize(
210
    "polly-register-tiling-default-tile-size",
211
    cl::desc("The default register tile size (if not enough were provided by"
212
             " --polly-register-tile-sizes)"),
213
    cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
214
215
static cl::opt<int> PollyPatternMatchingNcQuotient(
216
    "polly-pattern-matching-nc-quotient",
217
    cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
218
             "macro-kernel, by Nr, the parameter of the micro-kernel"),
219
    cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
220
221
static cl::list<int>
222
    RegisterTileSizes("polly-register-tile-sizes",
223
                      cl::desc("A tile size for each loop dimension, filled "
224
                               "with --polly-register-tile-size"),
225
                      cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
226
                      cl::cat(PollyCategory));
227
228
static cl::opt<bool>
229
    PMBasedOpts("polly-pattern-matching-based-opts",
230
                cl::desc("Perform optimizations based on pattern matching"),
231
                cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
232
233
static cl::opt<bool> OptimizedScops(
234
    "polly-optimized-scops",
235
    cl::desc("Polly - Dump polyhedral description of Scops optimized with "
236
             "the isl scheduling optimizer and the set of post-scheduling "
237
             "transformations is applied on the schedule tree"),
238
    cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
239
240
/// Create an isl::union_set, which describes the isolate option based on
241
/// IsolateDomain.
242
///
243
/// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
244
///                      belong to the current band node.
245
/// @param OutDimsNum    A number of dimensions that should belong to
246
///                      the current band node.
247
static isl::union_set getIsolateOptions(isl::set IsolateDomain,
248
39
                                        unsigned OutDimsNum) {
249
39
  unsigned Dims = IsolateDomain.dim(isl::dim::set);
250
39
  assert(OutDimsNum <= Dims &&
251
39
         "The isl::set IsolateDomain is used to describe the range of schedule "
252
39
         "dimensions values, which should be isolated. Consequently, the "
253
39
         "number of its dimensions should be greater than or equal to the "
254
39
         "number of the schedule dimensions.");
255
39
  isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
256
39
  IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
257
39
                                              Dims - OutDimsNum, OutDimsNum);
258
39
  isl::set IsolateOption = IsolateRelation.wrap();
259
39
  isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
260
39
  IsolateOption = IsolateOption.set_tuple_id(Id);
261
39
  return isl::union_set(IsolateOption);
262
39
}
263
264
/// Create an isl::union_set, which describes the atomic option for the
265
/// dimension of the current node.
266
///
267
/// It may help to reduce the size of generated code.
268
///
269
/// @param Ctx An isl::ctx, which is used to create the isl::union_set.
270
27
static isl::union_set getAtomicOptions(isl::ctx Ctx) {
271
27
  isl::space Space(Ctx, 0, 1);
272
27
  isl::set AtomicOption = isl::set::universe(Space);
273
27
  isl::id Id = isl::id::alloc(Ctx, "atomic", nullptr);
274
27
  AtomicOption = AtomicOption.set_tuple_id(Id);
275
27
  return isl::union_set(AtomicOption);
276
27
}
277
278
/// Create an isl::union_set, which describes the option of the form
279
/// [isolate[] -> unroll[x]].
280
///
281
/// @param Ctx An isl::ctx, which is used to create the isl::union_set.
282
12
static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
283
12
  isl::space Space = isl::space(Ctx, 0, 0, 1);
284
12
  isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
285
12
  isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
286
12
  isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
287
12
  UnrollIsolatedSetOption =
288
12
      UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
289
12
  UnrollIsolatedSetOption =
290
12
      UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
291
12
  return UnrollIsolatedSetOption.wrap();
292
12
}
293
294
/// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
295
///
296
/// @param Set         A set, which should be modified.
297
/// @param VectorWidth A parameter, which determines the constraint.
298
42
static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
299
42
  unsigned Dims = Set.dim(isl::dim::set);
300
42
  isl::space Space = Set.get_space();
301
42
  isl::local_space LocalSpace = isl::local_space(Space);
302
42
  isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
303
42
  ExtConstr = ExtConstr.set_constant_si(0);
304
42
  ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
305
42
  Set = Set.add_constraint(ExtConstr);
306
42
  ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
307
42
  ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
308
42
  ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
309
42
  return Set.add_constraint(ExtConstr);
310
42
}
311
312
42
isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) {
313
42
  unsigned Dims = ScheduleRange.dim(isl::dim::set);
314
42
  isl::set LoopPrefixes =
315
42
      ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
316
42
  auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
317
42
  isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
318
42
  BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
319
42
  LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
320
42
  return LoopPrefixes.subtract(BadPrefixes);
321
42
}
322
323
isl::schedule_node
324
ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
325
15
                                               int VectorWidth) {
326
15
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
327
15
  Node = Node.child(0).child(0);
328
15
  isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
329
15
  isl::map ScheduleRelation = isl::map::from_union_map(SchedRelUMap);
330
15
  isl::set ScheduleRange = ScheduleRelation.range();
331
15
  isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
332
15
  isl::union_set AtomicOption = getAtomicOptions(IsolateDomain.get_ctx());
333
15
  isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
334
15
  Node = Node.parent().parent();
335
15
  isl::union_set Options = IsolateOption.unite(AtomicOption);
336
15
  Node = Node.band_set_ast_build_options(Options);
337
15
  return Node;
338
15
}
339
340
isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
341
15
    isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
342
15
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
343
15
344
15
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
345
15
  auto ScheduleDimensions = Space.dim(isl::dim::set);
346
15
  assert(DimToVectorize < ScheduleDimensions);
347
15
348
15
  if (
DimToVectorize > 015
)
{14
349
14
    Node = isl::manage(
350
14
        isl_schedule_node_band_split(Node.release(), DimToVectorize));
351
14
    Node = Node.child(0);
352
15
  }
353
15
  if (DimToVectorize < ScheduleDimensions - 1)
354
7
    Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
355
15
  Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
356
15
  auto Sizes = isl::multi_val::zero(Space);
357
15
  Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
358
15
  Node =
359
15
      isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
360
15
  Node = isolateFullPartialTiles(Node, VectorWidth);
361
15
  Node = Node.child(0);
362
15
  // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
363
15
  // we will have troubles to match it in the backend.
364
15
  Node = Node.band_set_ast_build_options(
365
15
      isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
366
15
  Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
367
15
  Node = Node.child(0);
368
15
  if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
369
8
    Node = Node.parent();
370
15
  auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
371
15
  return Node.insert_mark(LoopMarker);
372
15
}
373
374
isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
375
                                                   const char *Identifier,
376
                                                   ArrayRef<int> TileSizes,
377
57
                                                   int DefaultTileSize) {
378
57
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
379
57
  auto Dims = Space.dim(isl::dim::set);
380
57
  auto Sizes = isl::multi_val::zero(Space);
381
57
  std::string IdentifierString(Identifier);
382
203
  for (unsigned i = 0; 
i < Dims203
;
i++146
)
{146
383
146
    auto tileSize = i < TileSizes.size() ? 
TileSizes[i]84
:
DefaultTileSize62
;
384
146
    Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
385
146
  }
386
57
  auto TileLoopMarkerStr = IdentifierString + " - Tiles";
387
57
  auto TileLoopMarker =
388
57
      isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr.c_str(), nullptr);
389
57
  Node = Node.insert_mark(TileLoopMarker);
390
57
  Node = Node.child(0);
391
57
  Node =
392
57
      isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
393
57
  Node = Node.child(0);
394
57
  auto PointLoopMarkerStr = IdentifierString + " - Points";
395
57
  auto PointLoopMarker =
396
57
      isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr.c_str(), nullptr);
397
57
  Node = Node.insert_mark(PointLoopMarker);
398
57
  return Node.child(0);
399
57
}
400
401
isl::schedule_node
402
ScheduleTreeOptimizer::applyRegisterTiling(isl::schedule_node Node,
403
                                           llvm::ArrayRef<int> TileSizes,
404
16
                                           int DefaultTileSize) {
405
16
  Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
406
16
  auto Ctx = Node.get_ctx();
407
16
  return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
408
16
}
409
410
namespace {
411
75
bool isSimpleInnermostBand(const isl::schedule_node &Node) {
412
75
  assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
413
75
  assert(isl_schedule_node_n_children(Node.keep()) == 1);
414
75
415
75
  auto ChildType = isl_schedule_node_get_type(Node.child(0).keep());
416
75
417
75
  if (ChildType == isl_schedule_node_leaf)
418
43
    return true;
419
75
420
32
  
if (32
ChildType != isl_schedule_node_sequence32
)
421
31
    return false;
422
32
423
32
  auto Sequence = Node.child(0);
424
1
425
3
  for (int c = 0, nc = isl_schedule_node_n_children(Sequence.keep()); c < nc;
426
2
       
++c2
)
{2
427
2
    auto Child = Sequence.child(c);
428
2
    if (isl_schedule_node_get_type(Child.keep()) != isl_schedule_node_filter)
429
0
      return false;
430
2
    
if (2
isl_schedule_node_get_type(Child.child(0).keep()) !=2
431
2
        isl_schedule_node_leaf)
432
0
      return false;
433
2
  }
434
1
  return true;
435
75
}
436
} // namespace
437
438
492
bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
439
492
  if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
440
336
    return false;
441
492
442
156
  
if (156
isl_schedule_node_n_children(Node.get()) != 1156
)
443
0
    return false;
444
156
445
156
  
if (156
!isl_schedule_node_band_get_permutable(Node.get())156
)
446
39
    return false;
447
156
448
156
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
449
117
  auto Dims = Space.dim(isl::dim::set);
450
117
451
117
  if (Dims <= 1)
452
42
    return false;
453
117
454
75
  return isSimpleInnermostBand(Node);
455
492
}
456
457
__isl_give isl::schedule_node
458
30
ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
459
30
  if (FirstLevelTiling)
460
30
    Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
461
30
                    FirstLevelDefaultTileSize);
462
30
463
30
  if (SecondLevelTiling)
464
30
    Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
465
30
                    SecondLevelDefaultTileSize);
466
30
467
30
  if (RegisterTiling)
468
30
    Node =
469
30
        applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
470
30
471
30
  if (PollyVectorizerChoice == VECTORIZER_NONE)
472
15
    return Node;
473
30
474
30
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
475
15
  auto Dims = Space.dim(isl::dim::set);
476
15
477
22
  for (int i = Dims - 1; 
i >= 022
;
i--7
)
478
22
    
if (22
Node.band_member_get_coincident(i)22
)
{15
479
15
      Node = prevectSchedBand(Node, i, PrevectorWidth);
480
15
      break;
481
15
    }
482
15
483
30
  return Node;
484
30
}
485
486
/// Permute the two dimensions of the isl map.
487
///
488
/// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
489
/// have type @p DimType.
490
///
491
/// @param Map     The isl map to be modified.
492
/// @param DimType The type of the dimensions.
493
/// @param DstPos  The first dimension.
494
/// @param SrcPos  The second dimension.
495
/// @return        The modified map.
496
isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
497
42
                           unsigned SrcPos) {
498
42
  assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType));
499
42
  if (DstPos == SrcPos)
500
14
    return Map;
501
42
  isl::id DimId;
502
28
  if (Map.has_tuple_id(DimType))
503
0
    DimId = Map.get_tuple_id(DimType);
504
28
  auto FreeDim = DimType == isl::dim::in ? 
isl::dim::out0
:
isl::dim::in28
;
505
28
  isl::id FreeDimId;
506
28
  if (Map.has_tuple_id(FreeDim))
507
28
    FreeDimId = Map.get_tuple_id(FreeDim);
508
28
  auto MaxDim = std::max(DstPos, SrcPos);
509
28
  auto MinDim = std::min(DstPos, SrcPos);
510
28
  Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
511
28
  Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
512
28
  Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
513
28
  Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
514
28
  if (DimId)
515
0
    Map = Map.set_tuple_id(DimType, DimId);
516
28
  if (FreeDimId)
517
28
    Map = Map.set_tuple_id(FreeDim, FreeDimId);
518
42
  return Map;
519
42
}
520
521
/// Check the form of the access relation.
522
///
523
/// Check that the access relation @p AccMap has the form M[i][j], where i
524
/// is a @p FirstPos and j is a @p SecondPos.
525
///
526
/// @param AccMap    The access relation to be checked.
527
/// @param FirstPos  The index of the input dimension that is mapped to
528
///                  the first output dimension.
529
/// @param SecondPos The index of the input dimension that is mapped to the
530
///                  second output dimension.
531
/// @return          True in case @p AccMap has the expected form and false,
532
///                  otherwise.
533
static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
534
102
                               int &SecondPos) {
535
102
536
102
  isl::space Space = AccMap.get_space();
537
102
  isl::map Universe = isl::map::universe(Space);
538
102
539
102
  if (Space.dim(isl::dim::out) != 2)
540
4
    return false;
541
102
542
102
  // MatMul has the form:
543
102
  // for (i = 0; i < N; i++)
544
102
  //   for (j = 0; j < M; j++)
545
102
  //     for (k = 0; k < P; k++)
546
102
  //       C[i, j] += A[i, k] * B[k, j]
547
102
  //
548
102
  // Permutation of three outer loops: 3! = 6 possibilities.
549
102
  int FirstDims[] = {0, 0, 1, 1, 2, 2};
550
98
  int SecondDims[] = {1, 2, 2, 0, 0, 1};
551
434
  for (int i = 0; 
i < 6434
;
i += 1336
)
{392
552
392
    auto PossibleMatMul =
553
392
        Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
554
392
            .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
555
392
556
392
    AccMap = AccMap.intersect_domain(Domain);
557
392
    PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
558
392
559
392
    // If AccMap spans entire domain (Non-partial write),
560
392
    // compute FirstPos and SecondPos.
561
392
    // If AccMap != PossibleMatMul here (the two maps have been gisted at
562
392
    // this point), it means that the writes are not complete, or in other
563
392
    // words, it is a Partial write and Partial writes must be rejected.
564
392
    if (
AccMap.is_equal(PossibleMatMul)392
)
{98
565
98
      if (
FirstPos != -1 && 98
FirstPos != FirstDims[i]84
)
566
28
        continue;
567
98
      FirstPos = FirstDims[i];
568
70
      if (
SecondPos != -1 && 70
SecondPos != SecondDims[i]56
)
569
14
        continue;
570
70
      SecondPos = SecondDims[i];
571
70
      return true;
572
392
    }
573
392
  }
574
98
575
42
  return false;
576
102
}
577
578
/// Does the memory access represent a non-scalar operand of the matrix
579
/// multiplication.
580
///
581
/// Check that the memory access @p MemAccess is the read access to a non-scalar
582
/// operand of the matrix multiplication or its result.
583
///
584
/// @param MemAccess The memory access to be checked.
585
/// @param MMI       Parameters of the matrix multiplication operands.
586
/// @return          True in case the memory access represents the read access
587
///                  to a non-scalar operand of the matrix multiplication and
588
///                  false, otherwise.
589
static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
590
43
                                        MatMulInfoTy &MMI) {
591
43
  if (
!MemAccess->isLatestArrayKind() || 43
!MemAccess->isRead()43
)
592
0
    return false;
593
43
  auto AccMap = MemAccess->getLatestAccessRelation();
594
43
  isl::set StmtDomain = MemAccess->getStatement()->getDomain();
595
43
  if (
isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && 43
!MMI.ReadFromC14
)
{14
596
14
    MMI.ReadFromC = MemAccess;
597
14
    return true;
598
43
  }
599
29
  
if (29
isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && 29
!MMI.A14
)
{14
600
14
    MMI.A = MemAccess;
601
14
    return true;
602
29
  }
603
15
  
if (15
isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && 15
!MMI.B14
)
{14
604
14
    MMI.B = MemAccess;
605
14
    return true;
606
15
  }
607
1
  return false;
608
43
}
609
610
/// Check accesses to operands of the matrix multiplication.
611
///
612
/// Check that accesses of the SCoP statement, which corresponds to
613
/// the partial schedule @p PartialSchedule, are scalar in terms of loops
614
/// containing the matrix multiplication, in case they do not represent
615
/// accesses to the non-scalar operands of the matrix multiplication or
616
/// its result.
617
///
618
/// @param  PartialSchedule The partial schedule of the SCoP statement.
619
/// @param  MMI             Parameters of the matrix multiplication operands.
620
/// @return                 True in case the corresponding SCoP statement
621
///                         represents matrix multiplication and false,
622
///                         otherwise.
623
static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
624
14
                                    MatMulInfoTy &MMI) {
625
14
  auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
626
14
  auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
627
14
  unsigned OutDimNum = PartialSchedule.dim(isl::dim::out);
628
14
  assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
629
14
                          "and, consequently, the corresponding scheduling "
630
14
                          "functions have at least three dimensions.");
631
14
  auto MapI =
632
14
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
633
14
  auto MapJ =
634
14
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
635
14
  auto MapK =
636
14
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
637
14
638
14
  auto Accesses = getAccessesInOrder(*Stmt);
639
62
  for (auto *MemA = Accesses.begin(); 
MemA != Accesses.end() - 162
;
MemA++48
)
{48
640
48
    auto *MemAccessPtr = *MemA;
641
48
    if (
MemAccessPtr->isLatestArrayKind() && 48
MemAccessPtr != MMI.WriteToC43
&&
642
43
        !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
643
1
        !(MemAccessPtr->isStrideZero(MapI)) &&
644
48
        
MemAccessPtr->isStrideZero(MapJ)0
&&
MemAccessPtr->isStrideZero(MapK)0
)
645
0
      return false;
646
48
  }
647
14
  return true;
648
14
}
649
650
/// Check for dependencies corresponding to the matrix multiplication.
651
///
652
/// Check that there is only true dependence of the form
653
/// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
654
/// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
655
/// to the dependency produced by the matrix multiplication.
656
///
657
/// @param  Schedule The schedule of the SCoP statement.
658
/// @param  D The SCoP dependencies.
659
/// @param  Pos The parameter to describe an acceptable true dependence.
660
///             In case it has a negative value, try to determine its
661
///             acceptable value.
662
/// @return True in case dependencies correspond to the matrix multiplication
663
///         and false, otherwise.
664
static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
665
14
                                  int &Pos) {
666
14
  auto Dep = isl::manage(D->getDependences(Dependences::TYPE_RAW));
667
14
  auto Red = isl::manage(D->getDependences(Dependences::TYPE_RED));
668
14
  if (Red)
669
14
    Dep = Dep.unite(Red);
670
14
  auto DomainSpace = Schedule.get_space().domain();
671
14
  auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
672
14
  auto Deltas = Dep.extract_map(Space).deltas();
673
14
  int DeltasDimNum = Deltas.dim(isl::dim::set);
674
56
  for (int i = 0; 
i < DeltasDimNum56
;
i++42
)
{42
675
42
    auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
676
42
    Pos = Pos < 0 && 
Val.is_one()42
?
i14
:
Pos28
;
677
42
    if (
Val.is_nan() || 42
!(Val.is_zero() || 42
(i == Pos && 14
Val.is_one()14
)))
678
0
      return false;
679
42
  }
680
14
  
if (14
DeltasDimNum == 0 || 14
Pos < 014
)
681
0
    return false;
682
14
  return true;
683
14
}
684
685
/// Check if the SCoP statement could probably be optimized with analytical
686
/// modeling.
687
///
688
/// containsMatrMult tries to determine whether the following conditions
689
/// are true:
690
/// 1. The last memory access modeling an array, MA1, represents writing to
691
///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
692
///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
693
///    under consideration.
694
/// 2. There is only one loop-carried true dependency, and it has the
695
///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
696
///    loop-carried or anti dependencies.
697
/// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
698
///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
699
///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
700
///    and all memory accesses of the SCoP that are different from MA1, MA2,
701
///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
702
///    of loops i1, i2 and i3.
703
///
704
/// @param PartialSchedule The PartialSchedule that contains a SCoP statement
705
///        to check.
706
/// @D     The SCoP dependencies.
707
/// @MMI   Parameters of the matrix multiplication operands.
708
static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
709
15
                             MatMulInfoTy &MMI) {
710
15
  auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
711
15
  auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
712
15
  if (Stmt->size() <= 1)
713
0
    return false;
714
15
715
15
  auto Accesses = getAccessesInOrder(*Stmt);
716
15
  for (auto *MemA = Accesses.end() - 1; 
MemA != Accesses.begin()15
;
MemA--0
)
{15
717
15
    auto *MemAccessPtr = *MemA;
718
15
    if (!MemAccessPtr->isLatestArrayKind())
719
0
      continue;
720
15
    
if (15
!MemAccessPtr->isWrite()15
)
721
0
      return false;
722
15
    auto AccMap = MemAccessPtr->getLatestAccessRelation();
723
15
    if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
724
1
      return false;
725
15
    MMI.WriteToC = MemAccessPtr;
726
15
    break;
727
15
  }
728
15
729
14
  
if (14
!containsOnlyMatMulDep(PartialSchedule, D, MMI.k)14
)
730
0
    return false;
731
14
732
14
  
if (14
!MMI.WriteToC || 14
!containsOnlyMatrMultAcc(PartialSchedule, MMI)14
)
733
0
    return false;
734
14
735
14
  
if (14
!MMI.A || 14
!MMI.B14
||
!MMI.ReadFromC14
)
736
0
    return false;
737
14
  return true;
738
15
}
739
740
/// Permute two dimensions of the band node.
741
///
742
/// Permute FirstDim and SecondDim dimensions of the Node.
743
///
744
/// @param Node The band node to be modified.
745
/// @param FirstDim The first dimension to be permuted.
746
/// @param SecondDim The second dimension to be permuted.
747
static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
748
                                                    unsigned FirstDim,
749
80
                                                    unsigned SecondDim) {
750
80
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
751
80
         isl_schedule_node_band_n_member(Node.get()) >
752
80
             std::max(FirstDim, SecondDim));
753
80
  auto PartialSchedule =
754
80
      isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
755
80
  auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
756
80
  auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
757
80
  PartialSchedule =
758
80
      PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
759
80
  PartialSchedule =
760
80
      PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
761
80
  Node = isl::manage(isl_schedule_node_delete(Node.release()));
762
80
  return Node.insert_partial_schedule(PartialSchedule);
763
80
}
764
765
isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
766
14
    isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
767
14
  Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
768
14
                             1);
769
14
  Node = Node.parent().parent();
770
14
  return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
771
14
}
772
773
isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
774
14
    isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
775
14
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
776
14
  if (
MacroKernelParams.Mc == 1 && 14
MacroKernelParams.Nc == 12
&&
777
2
      MacroKernelParams.Kc == 1)
778
2
    return Node;
779
14
  int DimOutNum = isl_schedule_node_band_n_member(Node.get());
780
12
  std::vector<int> TileSizes(DimOutNum, 1);
781
12
  TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
782
12
  TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
783
12
  TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
784
12
  Node = tileNode(Node, "1st level tiling", TileSizes, 1);
785
12
  Node = Node.parent().parent();
786
12
  Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
787
12
  Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
788
14
  return Node.child(0).child(0);
789
14
}
790
791
/// Get the size of the widest type of the matrix multiplication operands
792
/// in bytes, including alignment padding.
793
///
794
/// @param MMI Parameters of the matrix multiplication operands.
795
/// @return The size of the widest type of the matrix multiplication operands
796
///         in bytes, including alignment padding.
797
12
static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
798
12
  auto *S = MMI.A->getStatement()->getParent();
799
12
  auto &DL = S->getFunction().getParent()->getDataLayout();
800
12
  auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
801
12
  auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
802
12
  auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
803
12
  return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
804
12
}
805
806
/// Get the size of the widest type of the matrix multiplication operands
807
/// in bits.
808
///
809
/// @param MMI Parameters of the matrix multiplication operands.
810
/// @return The size of the widest type of the matrix multiplication operands
811
///         in bits.
812
14
static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
813
14
  auto *S = MMI.A->getStatement()->getParent();
814
14
  auto &DL = S->getFunction().getParent()->getDataLayout();
815
14
  auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
816
14
  auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
817
14
  auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
818
14
  return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
819
14
}
820
821
/// Get parameters of the BLIS micro kernel.
822
///
823
/// We choose the Mr and Nr parameters of the micro kernel to be large enough
824
/// such that no stalls caused by the combination of latencies and dependencies
825
/// are introduced during the updates of the resulting matrix of the matrix
826
/// multiplication. However, they should also be as small as possible to
827
/// release more registers for entries of multiplied matrices.
828
///
829
/// @param TTI Target Transform Info.
830
/// @param MMI Parameters of the matrix multiplication operands.
831
/// @return The structure of type MicroKernelParamsTy.
832
/// @see MicroKernelParamsTy
833
static struct MicroKernelParamsTy
834
14
getMicroKernelParams(const llvm::TargetTransformInfo *TTI, MatMulInfoTy MMI) {
835
14
  assert(TTI && "The target transform info should be provided.");
836
14
837
14
  // Nvec - Number of double-precision floating-point numbers that can be hold
838
14
  // by a vector register. Use 2 by default.
839
14
  long RegisterBitwidth = VectorRegisterBitwidth;
840
14
841
14
  if (RegisterBitwidth == -1)
842
0
    RegisterBitwidth = TTI->getRegisterBitWidth(true);
843
14
  auto ElementSize = getMatMulTypeSize(MMI);
844
14
  assert(ElementSize > 0 && "The element size of the matrix multiplication "
845
14
                            "operands should be greater than zero.");
846
14
  auto Nvec = RegisterBitwidth / ElementSize;
847
14
  if (Nvec == 0)
848
0
    Nvec = 2;
849
14
  int Nr =
850
14
      ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
851
14
  int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
852
14
  return {Mr, Nr};
853
14
}
854
855
/// Get parameters of the BLIS macro kernel.
856
///
857
/// During the computation of matrix multiplication, blocks of partitioned
858
/// matrices are mapped to different layers of the memory hierarchy.
859
/// To optimize data reuse, blocks should be ideally kept in cache between
860
/// iterations. Since parameters of the macro kernel determine sizes of these
861
/// blocks, there are upper and lower bounds on these parameters.
862
///
863
/// @param MicroKernelParams Parameters of the micro-kernel
864
///                          to be taken into account.
865
/// @param MMI Parameters of the matrix multiplication operands.
866
/// @return The structure of type MacroKernelParamsTy.
867
/// @see MacroKernelParamsTy
868
/// @see MicroKernelParamsTy
869
static struct MacroKernelParamsTy
870
getMacroKernelParams(const MicroKernelParamsTy &MicroKernelParams,
871
14
                     MatMulInfoTy MMI) {
872
14
  // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
873
14
  // it requires information about the first two levels of a cache to determine
874
14
  // all the parameters of a macro-kernel. It also checks that an associativity
875
14
  // degree of a cache level is greater than two. Otherwise, another algorithm
876
14
  // for determination of the parameters should be used.
877
14
  if (
!(MicroKernelParams.Mr > 0 && 14
MicroKernelParams.Nr > 014
&&
878
14
        
FirstCacheLevelSize > 014
&&
SecondCacheLevelSize > 013
&&
879
14
        
FirstCacheLevelAssociativity > 213
&&
SecondCacheLevelAssociativity > 213
))
880
1
    return {1, 1, 1};
881
14
  // The quotient should be greater than zero.
882
13
  
if (13
PollyPatternMatchingNcQuotient <= 013
)
883
0
    return {1, 1, 1};
884
13
  int Car = floor(
885
13
      (FirstCacheLevelAssociativity - 1) /
886
13
      (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
887
13
888
13
  // Car can be computed to be zero since it is floor to int.
889
13
  // On Mac OS, division by 0 does not raise a signal. This causes negative
890
13
  // tile sizes to be computed. Prevent division by Cac==0 by early returning
891
13
  // if this happens.
892
13
  if (Car == 0)
893
1
    return {1, 1, 1};
894
13
895
13
  auto ElementSize = getMatMulAlignTypeSize(MMI);
896
12
  assert(ElementSize > 0 && "The element size of the matrix multiplication "
897
12
                            "operands should be greater than zero.");
898
12
  int Kc = (Car * FirstCacheLevelSize) /
899
12
           (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
900
12
  double Cac =
901
12
      static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
902
12
      SecondCacheLevelSize;
903
12
  int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
904
12
  int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
905
12
906
12
  assert(Mc > 0 && Nc > 0 && Kc > 0 &&
907
12
         "Matrix block sizes should be  greater than zero");
908
13
  return {Mc, Nc, Kc};
909
14
}
910
911
/// Create an access relation that is specific to
912
///        the matrix multiplication pattern.
913
///
914
/// Create an access relation of the following form:
915
/// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
916
/// where I is @p FirstDim, J is @p SecondDim.
917
///
918
/// It can be used, for example, to create relations that helps to consequently
919
/// access elements of operands of a matrix multiplication after creation of
920
/// the BLIS micro and macro kernels.
921
///
922
/// @see ScheduleTreeOptimizer::createMicroKernel
923
/// @see ScheduleTreeOptimizer::createMacroKernel
924
///
925
/// Subsequently, the described access relation is applied to the range of
926
/// @p MapOldIndVar, that is used to map original induction variables to
927
/// the ones, which are produced by schedule transformations. It helps to
928
/// define relations using a new space and, at the same time, keep them
929
/// in the original one.
930
///
931
/// @param MapOldIndVar The relation, which maps original induction variables
932
///                     to the ones, which are produced by schedule
933
///                     transformations.
934
/// @param FirstDim, SecondDim The input dimensions that are used to define
935
///        the specified access relation.
936
/// @return The specified access relation.
937
isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
938
24
                         unsigned SecondDim) {
939
24
  auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
940
24
  auto AccessRel = isl::map::universe(AccessRelSpace);
941
24
  AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
942
24
  AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
943
24
  AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
944
24
  return MapOldIndVar.apply_range(AccessRel);
945
24
}
946
947
isl::schedule_node createExtensionNode(isl::schedule_node Node,
948
24
                                       isl::map ExtensionMap) {
949
24
  auto Extension = isl::union_map(ExtensionMap);
950
24
  auto NewNode = isl::schedule_node::from_extension(Extension);
951
24
  return Node.graft_before(NewNode);
952
24
}
953
954
/// Apply the packing transformation.
955
///
956
/// The packing transformation can be described as a data-layout
957
/// transformation that requires to introduce a new array, copy data
958
/// to the array, and change memory access locations to reference the array.
959
/// It can be used to ensure that elements of the new array are read in-stride
960
/// access, aligned to cache lines boundaries, and preloaded into certain cache
961
/// levels.
962
///
963
/// As an example let us consider the packing of the array A that would help
964
/// to read its elements with in-stride access. An access to the array A
965
/// is represented by an access relation that has the form
966
/// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
967
/// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
968
/// k mod Kc, j mod Nr, i mod Mr].
969
///
970
/// To ensure that elements of the array A are read in-stride access, we add
971
/// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
972
/// Scop::createScopArrayInfo, change the access relation
973
/// S[i, j, k] -> A[i, k] to
974
/// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
975
/// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
976
/// the copy statement created by Scop::addScopStmt.
977
///
978
/// @param Node The schedule node to be optimized.
979
/// @param MapOldIndVar The relation, which maps original induction variables
980
///                     to the ones, which are produced by schedule
981
///                     transformations.
982
/// @param MicroParams, MacroParams Parameters of the BLIS kernel
983
///                                 to be taken into account.
984
/// @param MMI Parameters of the matrix multiplication operands.
985
/// @return The optimized schedule node.
986
static isl::schedule_node
987
optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
988
                                 MicroKernelParamsTy MicroParams,
989
                                 MacroKernelParamsTy MacroParams,
990
12
                                 MatMulInfoTy &MMI) {
991
12
  auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
992
12
  auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
993
12
994
12
  // Create a copy statement that corresponds to the memory access to the
995
12
  // matrix B, the second operand of the matrix multiplication.
996
12
  Node = Node.parent().parent().parent().parent().parent();
997
12
  Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
998
12
  auto AccRel = getMatMulAccRel(isl::manage(MapOldIndVar.copy()), 3, 7);
999
12
  unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1000
12
  unsigned SecondDimSize = MacroParams.Kc;
1001
12
  unsigned ThirdDimSize = MicroParams.Nr;
1002
12
  auto *SAI = Stmt->getParent()->createScopArrayInfo(
1003
12
      MMI.B->getElementType(), "Packed_B",
1004
12
      {FirstDimSize, SecondDimSize, ThirdDimSize});
1005
12
  AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1006
12
  auto OldAcc = MMI.B->getLatestAccessRelation();
1007
12
  MMI.B->setNewAccessRelation(AccRel);
1008
12
  auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1009
12
                                         MapOldIndVar.dim(isl::dim::out) - 2);
1010
12
  ExtMap = ExtMap.reverse();
1011
12
  ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1012
12
  auto Domain = Stmt->getDomain();
1013
12
1014
12
  // Restrict the domains of the copy statements to only execute when also its
1015
12
  // originating statement is executed.
1016
12
  auto DomainId = Domain.get_tuple_id();
1017
12
  auto *NewStmt = Stmt->getParent()->addScopStmt(
1018
12
      OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1019
12
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, isl::manage(DomainId.copy()));
1020
12
  ExtMap = ExtMap.intersect_range(isl::manage(Domain.copy()));
1021
12
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1022
12
  Node = createExtensionNode(Node, ExtMap);
1023
12
1024
12
  // Create a copy statement that corresponds to the memory access
1025
12
  // to the matrix A, the first operand of the matrix multiplication.
1026
12
  Node = Node.child(0);
1027
12
  AccRel = getMatMulAccRel(isl::manage(MapOldIndVar.copy()), 4, 6);
1028
12
  FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1029
12
  ThirdDimSize = MicroParams.Mr;
1030
12
  SAI = Stmt->getParent()->createScopArrayInfo(
1031
12
      MMI.A->getElementType(), "Packed_A",
1032
12
      {FirstDimSize, SecondDimSize, ThirdDimSize});
1033
12
  AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1034
12
  OldAcc = MMI.A->getLatestAccessRelation();
1035
12
  MMI.A->setNewAccessRelation(AccRel);
1036
12
  ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1037
12
                                    MapOldIndVar.dim(isl::dim::out) - 3);
1038
12
  ExtMap = ExtMap.reverse();
1039
12
  ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1040
12
  NewStmt = Stmt->getParent()->addScopStmt(
1041
12
      OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1042
12
1043
12
  // Restrict the domains of the copy statements to only execute when also its
1044
12
  // originating statement is executed.
1045
12
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1046
12
  ExtMap = ExtMap.intersect_range(Domain);
1047
12
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1048
12
  Node = createExtensionNode(Node, ExtMap);
1049
12
  return Node.child(0).child(0).child(0).child(0);
1050
12
}
1051
1052
/// Get a relation mapping induction variables produced by schedule
1053
/// transformations to the original ones.
1054
///
1055
/// @param Node The schedule node produced as the result of creation
1056
///        of the BLIS kernels.
1057
/// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1058
///                                             to be taken into account.
1059
/// @return  The relation mapping original induction variables to the ones
1060
///          produced by schedule transformation.
1061
/// @see ScheduleTreeOptimizer::createMicroKernel
1062
/// @see ScheduleTreeOptimizer::createMacroKernel
1063
/// @see getMacroKernelParams
1064
isl::map
1065
getInductionVariablesSubstitution(isl::schedule_node Node,
1066
                                  MicroKernelParamsTy MicroKernelParams,
1067
12
                                  MacroKernelParamsTy MacroKernelParams) {
1068
12
  auto Child = Node.child(0);
1069
12
  auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1070
12
  auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1071
12
  if (MapOldIndVar.dim(isl::dim::out) > 9)
1072
12
    return MapOldIndVar.project_out(isl::dim::out, 0,
1073
12
                                    MapOldIndVar.dim(isl::dim::out) - 9);
1074
12
  return MapOldIndVar;
1075
12
}
1076
1077
/// Isolate a set of partial tile prefixes and unroll the isolated part.
1078
///
1079
/// The set should ensure that it contains only partial tile prefixes that have
1080
/// exactly Mr x Nr iterations of the two innermost loops produced by
1081
/// the optimization of the matrix multiplication. Mr and Nr are parameters of
1082
/// the micro-kernel.
1083
///
1084
/// In case of parametric bounds, this helps to auto-vectorize the unrolled
1085
/// innermost loops, using the SLP vectorizer.
1086
///
1087
/// @param Node              The schedule node to be modified.
1088
/// @param MicroKernelParams Parameters of the micro-kernel
1089
///                          to be taken into account.
1090
/// @return The modified isl_schedule_node.
1091
static isl::schedule_node
1092
isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1093
12
                                 struct MicroKernelParamsTy MicroKernelParams) {
1094
12
  isl::schedule_node Child = Node.get_child(0);
1095
12
  isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1096
12
  isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1097
12
  unsigned Dims = Prefix.dim(isl::dim::set);
1098
12
  Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1099
12
  Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1100
12
  Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1101
12
1102
12
  isl::union_set IsolateOption =
1103
12
      getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1104
12
  isl::ctx Ctx = Node.get_ctx();
1105
12
  isl::union_set AtomicOption = getAtomicOptions(Ctx);
1106
12
  isl::union_set Options = IsolateOption.unite(AtomicOption);
1107
12
  Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1108
12
  Node = Node.band_set_ast_build_options(Options);
1109
12
  Node = Node.parent().parent();
1110
12
  IsolateOption = getIsolateOptions(Prefix, 3);
1111
12
  Options = IsolateOption.unite(AtomicOption);
1112
12
  Node = Node.band_set_ast_build_options(Options);
1113
12
  Node = Node.child(0).child(0);
1114
12
  return Node;
1115
12
}
1116
1117
/// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1118
///
1119
/// @param Node The child of the mark node to be inserted.
1120
/// @param BasePtr The pointer to be marked.
1121
/// @return The modified isl_schedule_node.
1122
static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1123
14
                                                      llvm::Value *BasePtr) {
1124
14
  if (!BasePtr)
1125
0
    return Node;
1126
14
1127
14
  auto Id =
1128
14
      isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1129
14
  return Node.insert_mark(Id).child(0);
1130
14
}
1131
1132
/// Restore the initial ordering of dimensions of the band node
1133
///
1134
/// In case the band node represents all the dimensions of the iteration
1135
/// domain, recreate the band node to restore the initial ordering of the
1136
/// dimensions.
1137
///
1138
/// @param Node The band node to be modified.
1139
/// @return The modified schedule node.
1140
namespace {
1141
14
isl::schedule_node getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1142
14
  assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
1143
14
  if (isl_schedule_node_get_type(Node.child(0).keep()) !=
1144
14
      isl_schedule_node_leaf)
1145
0
    return Node;
1146
14
  auto Domain = Node.get_universe_domain();
1147
14
  assert(isl_union_set_n_set(Domain.keep()) == 1);
1148
14
  if (Node.get_schedule_depth() != 0 ||
1149
14
      (isl::set(isl::manage(Domain.copy())).dim(isl::dim::set) !=
1150
14
       isl_schedule_node_band_n_member(Node.keep())))
1151
0
    return Node;
1152
14
  Node = isl::manage(isl_schedule_node_delete(Node.take()));
1153
14
  auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1154
14
  auto PartialScheduleMultiPwAff =
1155
14
      isl::multi_union_pw_aff(PartialSchedulePwAff);
1156
14
  PartialScheduleMultiPwAff =
1157
14
      PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1158
14
  return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1159
14
}
1160
} // namespace
1161
1162
isl::schedule_node ScheduleTreeOptimizer::optimizeMatMulPattern(
1163
    isl::schedule_node Node, const llvm::TargetTransformInfo *TTI,
1164
14
    MatMulInfoTy &MMI) {
1165
14
  assert(TTI && "The target transform info should be provided.");
1166
14
  Node = markInterIterationAliasFree(
1167
14
      Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1168
14
  int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1169
14
  assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1170
14
                          "and, consequently, the corresponding scheduling "
1171
14
                          "functions have at least three dimensions.");
1172
14
  Node = getBandNodeWithOriginDimOrder(Node);
1173
14
  Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1174
14
  int NewJ = MMI.j == DimOutNum - 3 ? 
MMI.i0
:
MMI.j14
;
1175
14
  int NewK = MMI.k == DimOutNum - 3 ? 
MMI.i0
:
MMI.k14
;
1176
14
  Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1177
14
  NewK = NewK == DimOutNum - 2 ? 
NewJ0
:
NewK14
;
1178
14
  Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1179
14
  auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1180
14
  auto MacroKernelParams = getMacroKernelParams(MicroKernelParams, MMI);
1181
14
  Node = createMacroKernel(Node, MacroKernelParams);
1182
14
  Node = createMicroKernel(Node, MicroKernelParams);
1183
14
  if (
MacroKernelParams.Mc == 1 || 14
MacroKernelParams.Nc == 112
||
1184
12
      MacroKernelParams.Kc == 1)
1185
2
    return Node;
1186
14
  auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1187
12
                                                        MacroKernelParams);
1188
12
  if (!MapOldIndVar)
1189
0
    return Node;
1190
12
  Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1191
12
  return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1192
12
                                          MacroKernelParams, MMI);
1193
14
}
1194
1195
bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1196
                                              const Dependences *D,
1197
35
                                              MatMulInfoTy &MMI) {
1198
35
  auto PartialSchedule = isl::manage(
1199
35
      isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1200
35
  Node = Node.child(0);
1201
35
  auto LeafType = isl_schedule_node_get_type(Node.get());
1202
35
  Node = Node.parent();
1203
35
  if (LeafType != isl_schedule_node_leaf ||
1204
34
      isl_schedule_node_band_n_member(Node.get()) < 3 ||
1205
15
      Node.get_schedule_depth() != 0 ||
1206
15
      isl_union_map_n_map(PartialSchedule.get()) != 1)
1207
20
    return false;
1208
35
  auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1209
15
  if (containsMatrMult(NewPartialSchedule, D, MMI))
1210
14
    return true;
1211
1
  return false;
1212
35
}
1213
1214
__isl_give isl_schedule_node *
1215
ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1216
492
                                    void *User) {
1217
492
  if (!isTileableBandNode(isl::manage(isl_schedule_node_copy(Node))))
1218
448
    return Node;
1219
492
1220
492
  const OptimizerAdditionalInfoTy *OAI =
1221
44
      static_cast<const OptimizerAdditionalInfoTy *>(User);
1222
44
1223
44
  MatMulInfoTy MMI;
1224
44
  if (
PMBasedOpts && 44
User35
&&
1225
44
      isMatrMultPattern(isl::manage(isl_schedule_node_copy(Node)), OAI->D,
1226
44
                        MMI)) {
1227
14
    DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1228
14
    return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1229
44
  }
1230
44
1231
30
  return standardBandOpts(isl::manage(Node), User).release();
1232
492
}
1233
1234
isl::schedule
1235
ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1236
37
                                        const OptimizerAdditionalInfoTy *OAI) {
1237
37
  auto Root = Schedule.get_root();
1238
37
  Root = optimizeScheduleNode(Root, OAI);
1239
37
  return Root.get_schedule();
1240
37
}
1241
1242
isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1243
37
    isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1244
37
  Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1245
37
      Node.release(), optimizeBand,
1246
37
      const_cast<void *>(static_cast<const void *>(OAI))));
1247
37
  return Node;
1248
37
}
1249
1250
bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1251
37
                                                 isl::schedule NewSchedule) {
1252
37
  // To understand if the schedule has been optimized we check if the schedule
1253
37
  // has changed at all.
1254
37
  // TODO: We can improve this by tracking if any necessarily beneficial
1255
37
  // transformations have been performed. This can e.g. be tiling, loop
1256
37
  // interchange, or ...) We can track this either at the place where the
1257
37
  // transformation has been performed or, in case of automatic ILP based
1258
37
  // optimizations, by comparing (yet to be defined) performance metrics
1259
37
  // before/after the scheduling optimizer
1260
37
  // (e.g., #stride-one accesses)
1261
37
  if (S.containsExtensionNode(NewSchedule.get()))
1262
12
    return true;
1263
37
  auto NewScheduleMap = NewSchedule.get_map();
1264
25
  auto OldSchedule = S.getSchedule();
1265
25
  assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1266
25
                        "that make Scop::getSchedule() return nullptr.");
1267
25
  bool changed = !OldSchedule.is_equal(NewScheduleMap);
1268
37
  return changed;
1269
37
}
1270
1271
namespace {
1272
class IslScheduleOptimizer : public ScopPass {
1273
public:
1274
  static char ID;
1275
39
  explicit IslScheduleOptimizer() : ScopPass(ID) { LastSchedule = nullptr; }
1276
1277
39
  ~IslScheduleOptimizer() { isl_schedule_free(LastSchedule); }
1278
1279
  /// Optimize the schedule of the SCoP @p S.
1280
  bool runOnScop(Scop &S) override;
1281
1282
  /// Print the new schedule for the SCoP @p S.
1283
  void printScop(raw_ostream &OS, Scop &S) const override;
1284
1285
  /// Register all analyses and transformation required.
1286
  void getAnalysisUsage(AnalysisUsage &AU) const override;
1287
1288
  /// Release the internal memory.
1289
171
  void releaseMemory() override {
1290
171
    isl_schedule_free(LastSchedule);
1291
171
    LastSchedule = nullptr;
1292
171
  }
1293
1294
private:
1295
  isl_schedule *LastSchedule;
1296
};
1297
} // namespace
1298
1299
char IslScheduleOptimizer::ID = 0;
1300
1301
38
bool IslScheduleOptimizer::runOnScop(Scop &S) {
1302
38
1303
38
  // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1304
38
  if (S.isToBeSkipped())
1305
0
    return false;
1306
38
1307
38
  // Skip empty SCoPs but still allow code generation as it will delete the
1308
38
  // loops present but not needed.
1309
38
  
if (38
S.getSize() == 038
)
{0
1310
0
    S.markAsOptimized();
1311
0
    return false;
1312
38
  }
1313
38
1314
38
  const Dependences &D =
1315
38
      getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1316
38
1317
38
  if (!D.hasValidDependences())
1318
1
    return false;
1319
38
1320
38
  isl_schedule_free(LastSchedule);
1321
37
  LastSchedule = nullptr;
1322
37
1323
37
  // Build input data.
1324
37
  int ValidityKinds =
1325
37
      Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1326
37
  int ProximityKinds;
1327
37
1328
37
  if (OptimizeDeps == "all")
1329
37
    ProximityKinds =
1330
37
        Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1331
0
  else 
if (0
OptimizeDeps == "raw"0
)
1332
0
    ProximityKinds = Dependences::TYPE_RAW;
1333
0
  else {
1334
0
    errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1335
0
           << " Falling back to optimizing all dependences.\n";
1336
0
    ProximityKinds =
1337
0
        Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1338
37
  }
1339
37
1340
37
  isl::union_set Domain = S.getDomains();
1341
37
1342
37
  if (!Domain)
1343
0
    return false;
1344
37
1345
37
  isl::union_map Validity = give(D.getDependences(ValidityKinds));
1346
37
  isl::union_map Proximity = give(D.getDependences(ProximityKinds));
1347
37
1348
37
  // Simplify the dependences by removing the constraints introduced by the
1349
37
  // domains. This can speed up the scheduling time significantly, as large
1350
37
  // constant coefficients will be removed from the dependences. The
1351
37
  // introduction of some additional dependences reduces the possible
1352
37
  // transformations, but in most cases, such transformation do not seem to be
1353
37
  // interesting anyway. In some cases this option may stop the scheduler to
1354
37
  // find any schedule.
1355
37
  if (
SimplifyDeps == "yes"37
)
{37
1356
37
    Validity = Validity.gist_domain(Domain);
1357
37
    Validity = Validity.gist_range(Domain);
1358
37
    Proximity = Proximity.gist_domain(Domain);
1359
37
    Proximity = Proximity.gist_range(Domain);
1360
37
  } else 
if (0
SimplifyDeps != "no"0
)
{0
1361
0
    errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1362
0
              "or 'no'. Falling back to default: 'yes'\n";
1363
37
  }
1364
37
1365
37
  DEBUG(dbgs() << "\n\nCompute schedule from: ");
1366
37
  DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1367
37
  DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1368
37
  DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1369
37
1370
37
  unsigned IslSerializeSCCs;
1371
37
1372
37
  if (
FusionStrategy == "max"37
)
{2
1373
2
    IslSerializeSCCs = 0;
1374
37
  } else 
if (35
FusionStrategy == "min"35
)
{35
1375
35
    IslSerializeSCCs = 1;
1376
35
  } else {
1377
0
    errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1378
0
              "fusion.\n";
1379
0
    IslSerializeSCCs = 0;
1380
37
  }
1381
37
1382
37
  int IslMaximizeBands;
1383
37
1384
37
  if (
MaximizeBandDepth == "yes"37
)
{37
1385
37
    IslMaximizeBands = 1;
1386
37
  } else 
if (0
MaximizeBandDepth == "no"0
)
{0
1387
0
    IslMaximizeBands = 0;
1388
0
  } else {
1389
0
    errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1390
0
              " or 'no'. Falling back to default: 'yes'\n";
1391
0
    IslMaximizeBands = 1;
1392
37
  }
1393
37
1394
37
  int IslOuterCoincidence;
1395
37
1396
37
  if (
OuterCoincidence == "yes"37
)
{1
1397
1
    IslOuterCoincidence = 1;
1398
37
  } else 
if (36
OuterCoincidence == "no"36
)
{36
1399
36
    IslOuterCoincidence = 0;
1400
36
  } else {
1401
0
    errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1402
0
              "'yes' or 'no'. Falling back to default: 'no'\n";
1403
0
    IslOuterCoincidence = 0;
1404
37
  }
1405
37
1406
37
  isl_ctx *Ctx = S.getIslCtx();
1407
37
1408
37
  isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1409
37
  isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1410
37
  isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1411
37
  isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1412
37
  isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1413
37
  isl_options_set_tile_scale_tile_loops(Ctx, 0);
1414
37
1415
37
  auto OnErrorStatus = isl_options_get_on_error(Ctx);
1416
37
  isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1417
37
1418
37
  auto SC = isl::schedule_constraints::on_domain(Domain);
1419
37
  SC = SC.set_proximity(Proximity);
1420
37
  SC = SC.set_validity(Validity);
1421
37
  SC = SC.set_coincidence(Validity);
1422
37
  auto Schedule = SC.compute_schedule();
1423
37
  isl_options_set_on_error(Ctx, OnErrorStatus);
1424
37
1425
37
  // In cases the scheduler is not able to optimize the code, we just do not
1426
37
  // touch the schedule.
1427
37
  if (!Schedule)
1428
0
    return false;
1429
37
1430
37
  
DEBUG37
({37
1431
37
    auto *P = isl_printer_to_str(Ctx);
1432
37
    P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1433
37
    P = isl_printer_print_schedule(P, Schedule.get());
1434
37
    auto *str = isl_printer_get_str(P);
1435
37
    dbgs() << "NewScheduleTree: \n" << str << "\n";
1436
37
    free(str);
1437
37
    isl_printer_free(P);
1438
37
  });
1439
37
1440
37
  Function &F = S.getFunction();
1441
37
  auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1442
37
  const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1443
37
  auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1444
37
1445
37
  if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule))
1446
4
    return false;
1447
37
1448
37
  S.setScheduleTree(NewSchedule.release());
1449
33
  S.markAsOptimized();
1450
33
1451
33
  if (OptimizedScops)
1452
1
    errs() << S;
1453
33
1454
37
  return false;
1455
38
}
1456
1457
29
void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1458
29
  isl_printer *p;
1459
29
  char *ScheduleStr;
1460
29
1461
29
  OS << "Calculated schedule:\n";
1462
29
1463
29
  if (
!LastSchedule29
)
{29
1464
29
    OS << "n/a\n";
1465
29
    return;
1466
29
  }
1467
29
1468
29
  p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1469
0
  p = isl_printer_print_schedule(p, LastSchedule);
1470
0
  ScheduleStr = isl_printer_get_str(p);
1471
0
  isl_printer_free(p);
1472
0
1473
0
  OS << ScheduleStr << "\n";
1474
29
}
1475
1476
39
void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1477
39
  ScopPass::getAnalysisUsage(AU);
1478
39
  AU.addRequired<DependenceInfo>();
1479
39
  AU.addRequired<TargetTransformInfoWrapperPass>();
1480
39
}
1481
1482
0
Pass *polly::createIslScheduleOptimizerPass() {
1483
0
  return new IslScheduleOptimizer();
1484
0
}
1485
1486
41.9k
INITIALIZE_PASS_BEGIN41.9k
(IslScheduleOptimizer, "polly-opt-isl",41.9k
1487
41.9k
                      "Polly - Optimize schedule of SCoP", false, false);
1488
41.9k
INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1489
41.9k
INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1490
41.9k
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1491
41.9k
INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1492
                    "Polly - Optimize schedule of SCoP", false, false)