Coverage Report

Created: 2017-11-21 16:49

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/Transform/ScheduleOptimizer.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This pass generates an entirely new schedule tree from the data dependences
11
// and iteration domains. The new schedule tree is computed in two steps:
12
//
13
// 1) The isl scheduling optimizer is run
14
//
15
// The isl scheduling optimizer creates a new schedule tree that maximizes
16
// parallelism and tileability and minimizes data-dependence distances. The
17
// algorithm used is a modified version of the ``Pluto'' algorithm:
18
//
19
//   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
20
//   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
21
//   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
22
//   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
23
//
24
// 2) A set of post-scheduling transformations is applied on the schedule tree.
25
//
26
// These optimizations include:
27
//
28
//  - Tiling of the innermost tilable bands
29
//  - Prevectorization - The choice of a possible outer loop that is strip-mined
30
//                       to the innermost level to enable inner-loop
31
//                       vectorization.
32
//  - Some optimizations for spatial locality are also planned.
33
//
34
// For a detailed description of the schedule tree itself please see section 6
35
// of:
36
//
37
// Polyhedral AST generation is more than scanning polyhedra
38
// Tobias Grosser, Sven Verdoolaege, Albert Cohen
39
// ACM Transactions on Programming Languages and Systems (TOPLAS),
40
// 37(4), July 2015
41
// http://www.grosser.es/#pub-polyhedral-AST-generation
42
//
43
// This publication also contains a detailed discussion of the different options
44
// for polyhedral loop unrolling, full/partial tile separation and other uses
45
// of the schedule tree.
46
//
47
//===----------------------------------------------------------------------===//
48
49
#include "polly/ScheduleOptimizer.h"
50
#include "polly/CodeGen/CodeGeneration.h"
51
#include "polly/DependenceInfo.h"
52
#include "polly/LinkAllPasses.h"
53
#include "polly/Options.h"
54
#include "polly/ScopInfo.h"
55
#include "polly/ScopPass.h"
56
#include "polly/Simplify.h"
57
#include "polly/Support/GICHelper.h"
58
#include "polly/Support/ISLOStream.h"
59
#include "llvm/ADT/Statistic.h"
60
#include "llvm/Analysis/TargetTransformInfo.h"
61
#include "llvm/IR/Function.h"
62
#include "llvm/Pass.h"
63
#include "llvm/Support/CommandLine.h"
64
#include "llvm/Support/Debug.h"
65
#include "llvm/Support/raw_ostream.h"
66
#include "isl/constraint.h"
67
#include "isl/ctx.h"
68
#include "isl/map.h"
69
#include "isl/options.h"
70
#include "isl/printer.h"
71
#include "isl/schedule.h"
72
#include "isl/schedule_node.h"
73
#include "isl/space.h"
74
#include "isl/union_map.h"
75
#include "isl/union_set.h"
76
#include <algorithm>
77
#include <cassert>
78
#include <cmath>
79
#include <cstdint>
80
#include <cstdlib>
81
#include <string>
82
#include <vector>
83
84
using namespace llvm;
85
using namespace polly;
86
87
#define DEBUG_TYPE "polly-opt-isl"
88
89
static cl::opt<std::string>
90
    OptimizeDeps("polly-opt-optimize-only",
91
                 cl::desc("Only a certain kind of dependences (all/raw)"),
92
                 cl::Hidden, cl::init("all"), cl::ZeroOrMore,
93
                 cl::cat(PollyCategory));
94
95
static cl::opt<std::string>
96
    SimplifyDeps("polly-opt-simplify-deps",
97
                 cl::desc("Dependences should be simplified (yes/no)"),
98
                 cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
99
                 cl::cat(PollyCategory));
100
101
static cl::opt<int> MaxConstantTerm(
102
    "polly-opt-max-constant-term",
103
    cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
104
    cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
105
106
static cl::opt<int> MaxCoefficient(
107
    "polly-opt-max-coefficient",
108
    cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
109
    cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
110
111
static cl::opt<std::string> FusionStrategy(
112
    "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
113
    cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
114
115
static cl::opt<std::string>
116
    MaximizeBandDepth("polly-opt-maximize-bands",
117
                      cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
118
                      cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
119
120
static cl::opt<std::string> OuterCoincidence(
121
    "polly-opt-outer-coincidence",
122
    cl::desc("Try to construct schedules where the outer member of each band "
123
             "satisfies the coincidence constraints (yes/no)"),
124
    cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
125
126
static cl::opt<int> PrevectorWidth(
127
    "polly-prevect-width",
128
    cl::desc(
129
        "The number of loop iterations to strip-mine for pre-vectorization"),
130
    cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
131
132
static cl::opt<bool> FirstLevelTiling("polly-tiling",
133
                                      cl::desc("Enable loop tiling"),
134
                                      cl::init(true), cl::ZeroOrMore,
135
                                      cl::cat(PollyCategory));
136
137
static cl::opt<int> LatencyVectorFma(
138
    "polly-target-latency-vector-fma",
139
    cl::desc("The minimal number of cycles between issuing two "
140
             "dependent consecutive vector fused multiply-add "
141
             "instructions."),
142
    cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
143
144
static cl::opt<int> ThroughputVectorFma(
145
    "polly-target-throughput-vector-fma",
146
    cl::desc("A throughput of the processor floating-point arithmetic units "
147
             "expressed in the number of vector fused multiply-add "
148
             "instructions per clock cycle."),
149
    cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
150
151
// This option, along with --polly-target-2nd-cache-level-associativity,
152
// --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
153
// represent the parameters of the target cache, which do not have typical
154
// values that can be used by default. However, to apply the pattern matching
155
// optimizations, we use the values of the parameters of Intel Core i7-3820
156
// SandyBridge in case the parameters are not specified or not provided by the
157
// TargetTransformInfo.
158
static cl::opt<int> FirstCacheLevelAssociativity(
159
    "polly-target-1st-cache-level-associativity",
160
    cl::desc("The associativity of the first cache level."), cl::Hidden,
161
    cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
162
163
static cl::opt<int> FirstCacheLevelDefaultAssociativity(
164
    "polly-target-1st-cache-level-default-associativity",
165
    cl::desc("The default associativity of the first cache level"
166
             " (if not enough were provided by the TargetTransformInfo)."),
167
    cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
168
169
static cl::opt<int> SecondCacheLevelAssociativity(
170
    "polly-target-2nd-cache-level-associativity",
171
    cl::desc("The associativity of the second cache level."), cl::Hidden,
172
    cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
173
174
static cl::opt<int> SecondCacheLevelDefaultAssociativity(
175
    "polly-target-2nd-cache-level-default-associativity",
176
    cl::desc("The default associativity of the second cache level"
177
             " (if not enough were provided by the TargetTransformInfo)."),
178
    cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
179
180
static cl::opt<int> FirstCacheLevelSize(
181
    "polly-target-1st-cache-level-size",
182
    cl::desc("The size of the first cache level specified in bytes."),
183
    cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
184
185
static cl::opt<int> FirstCacheLevelDefaultSize(
186
    "polly-target-1st-cache-level-default-size",
187
    cl::desc("The default size of the first cache level specified in bytes"
188
             " (if not enough were provided by the TargetTransformInfo)."),
189
    cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
190
191
static cl::opt<int> SecondCacheLevelSize(
192
    "polly-target-2nd-cache-level-size",
193
    cl::desc("The size of the second level specified in bytes."), cl::Hidden,
194
    cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
195
196
static cl::opt<int> SecondCacheLevelDefaultSize(
197
    "polly-target-2nd-cache-level-default-size",
198
    cl::desc("The default size of the second cache level specified in bytes"
199
             " (if not enough were provided by the TargetTransformInfo)."),
200
    cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
201
202
static cl::opt<int> VectorRegisterBitwidth(
203
    "polly-target-vector-register-bitwidth",
204
    cl::desc("The size in bits of a vector register (if not set, this "
205
             "information is taken from LLVM's target information."),
206
    cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
207
208
static cl::opt<int> FirstLevelDefaultTileSize(
209
    "polly-default-tile-size",
210
    cl::desc("The default tile size (if not enough were provided by"
211
             " --polly-tile-sizes)"),
212
    cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
213
214
static cl::list<int>
215
    FirstLevelTileSizes("polly-tile-sizes",
216
                        cl::desc("A tile size for each loop dimension, filled "
217
                                 "with --polly-default-tile-size"),
218
                        cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
219
                        cl::cat(PollyCategory));
220
221
static cl::opt<bool>
222
    SecondLevelTiling("polly-2nd-level-tiling",
223
                      cl::desc("Enable a 2nd level loop of loop tiling"),
224
                      cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
225
226
static cl::opt<int> SecondLevelDefaultTileSize(
227
    "polly-2nd-level-default-tile-size",
228
    cl::desc("The default 2nd-level tile size (if not enough were provided by"
229
             " --polly-2nd-level-tile-sizes)"),
230
    cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
231
232
static cl::list<int>
233
    SecondLevelTileSizes("polly-2nd-level-tile-sizes",
234
                         cl::desc("A tile size for each loop dimension, filled "
235
                                  "with --polly-default-tile-size"),
236
                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
237
                         cl::cat(PollyCategory));
238
239
static cl::opt<bool> RegisterTiling("polly-register-tiling",
240
                                    cl::desc("Enable register tiling"),
241
                                    cl::init(false), cl::ZeroOrMore,
242
                                    cl::cat(PollyCategory));
243
244
static cl::opt<int> RegisterDefaultTileSize(
245
    "polly-register-tiling-default-tile-size",
246
    cl::desc("The default register tile size (if not enough were provided by"
247
             " --polly-register-tile-sizes)"),
248
    cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
249
250
static cl::opt<int> PollyPatternMatchingNcQuotient(
251
    "polly-pattern-matching-nc-quotient",
252
    cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
253
             "macro-kernel, by Nr, the parameter of the micro-kernel"),
254
    cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
255
256
static cl::list<int>
257
    RegisterTileSizes("polly-register-tile-sizes",
258
                      cl::desc("A tile size for each loop dimension, filled "
259
                               "with --polly-register-tile-size"),
260
                      cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
261
                      cl::cat(PollyCategory));
262
263
static cl::opt<bool>
264
    PMBasedOpts("polly-pattern-matching-based-opts",
265
                cl::desc("Perform optimizations based on pattern matching"),
266
                cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
267
268
static cl::opt<bool> OptimizedScops(
269
    "polly-optimized-scops",
270
    cl::desc("Polly - Dump polyhedral description of Scops optimized with "
271
             "the isl scheduling optimizer and the set of post-scheduling "
272
             "transformations is applied on the schedule tree"),
273
    cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
274
275
STATISTIC(ScopsProcessed, "Number of scops processed");
276
STATISTIC(ScopsRescheduled, "Number of scops rescheduled");
277
STATISTIC(ScopsOptimized, "Number of scops optimized");
278
279
STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized");
280
STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized");
281
282
#define THREE_STATISTICS(VARNAME, DESC)                                        \
283
  static Statistic VARNAME[3] = {                                              \
284
      {DEBUG_TYPE, #VARNAME "0", DESC " (original)", {0}, false},              \
285
      {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)", {0}, false},       \
286
      {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)", {0}, false}}
287
288
THREE_STATISTICS(NumBands, "Number of bands");
289
THREE_STATISTICS(NumBandMembers, "Number of band members");
290
THREE_STATISTICS(NumCoincident, "Number of coincident band members");
291
THREE_STATISTICS(NumPermutable, "Number of permutable bands");
292
THREE_STATISTICS(NumFilters, "Number of filter nodes");
293
THREE_STATISTICS(NumExtension, "Number of extension nodes");
294
295
STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied");
296
STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied");
297
STATISTIC(RegisterTileOpts, "Number of register tiling applied");
298
STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied");
299
STATISTIC(MatMulOpts,
300
          "Number of matrix multiplication patterns detected and optimized");
301
302
/// Create an isl::union_set, which describes the isolate option based on
303
/// IsolateDomain.
304
///
305
/// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
306
///                      belong to the current band node.
307
/// @param OutDimsNum    A number of dimensions that should belong to
308
///                      the current band node.
309
static isl::union_set getIsolateOptions(isl::set IsolateDomain,
310
15
                                        unsigned OutDimsNum) {
311
15
  unsigned Dims = IsolateDomain.dim(isl::dim::set);
312
15
  assert(OutDimsNum <= Dims &&
313
15
         "The isl::set IsolateDomain is used to describe the range of schedule "
314
15
         "dimensions values, which should be isolated. Consequently, the "
315
15
         "number of its dimensions should be greater than or equal to the "
316
15
         "number of the schedule dimensions.");
317
15
  isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
318
15
  IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
319
15
                                              Dims - OutDimsNum, OutDimsNum);
320
15
  isl::set IsolateOption = IsolateRelation.wrap();
321
15
  isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
322
15
  IsolateOption = IsolateOption.set_tuple_id(Id);
323
15
  return isl::union_set(IsolateOption);
324
15
}
325
326
namespace {
327
/// Create an isl::union_set, which describes the specified option for the
328
/// dimension of the current node.
329
///
330
/// @param Ctx    An isl::ctx, which is used to create the isl::union_set.
331
/// @param Option The name of the option.
332
15
isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) {
333
15
  isl::space Space(Ctx, 0, 1);
334
15
  auto DimOption = isl::set::universe(Space);
335
15
  auto Id = isl::id::alloc(Ctx, Option, nullptr);
336
15
  DimOption = DimOption.set_tuple_id(Id);
337
15
  return isl::union_set(DimOption);
338
15
}
339
} // namespace
340
341
/// Create an isl::union_set, which describes the option of the form
342
/// [isolate[] -> unroll[x]].
343
///
344
/// @param Ctx An isl::ctx, which is used to create the isl::union_set.
345
6
static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
346
6
  isl::space Space = isl::space(Ctx, 0, 0, 1);
347
6
  isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
348
6
  isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
349
6
  isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
350
6
  UnrollIsolatedSetOption =
351
6
      UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
352
6
  UnrollIsolatedSetOption =
353
6
      UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
354
6
  return UnrollIsolatedSetOption.wrap();
355
6
}
356
357
/// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
358
///
359
/// @param Set         A set, which should be modified.
360
/// @param VectorWidth A parameter, which determines the constraint.
361
18
static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
362
18
  unsigned Dims = Set.dim(isl::dim::set);
363
18
  isl::space Space = Set.get_space();
364
18
  isl::local_space LocalSpace = isl::local_space(Space);
365
18
  isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
366
18
  ExtConstr = ExtConstr.set_constant_si(0);
367
18
  ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
368
18
  Set = Set.add_constraint(ExtConstr);
369
18
  ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
370
18
  ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
371
18
  ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
372
18
  return Set.add_constraint(ExtConstr);
373
18
}
374
375
18
isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) {
376
18
  unsigned Dims = ScheduleRange.dim(isl::dim::set);
377
18
  isl::set LoopPrefixes =
378
18
      ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
379
18
  auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
380
18
  isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
381
18
  BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
382
18
  LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
383
18
  return LoopPrefixes.subtract(BadPrefixes);
384
18
}
385
386
isl::schedule_node
387
ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
388
3
                                               int VectorWidth) {
389
3
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
390
3
  Node = Node.child(0).child(0);
391
3
  isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
392
3
  isl::map ScheduleRelation = isl::map::from_union_map(SchedRelUMap);
393
3
  isl::set ScheduleRange = ScheduleRelation.range();
394
3
  isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
395
3
  auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic");
396
3
  isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
397
3
  Node = Node.parent().parent();
398
3
  isl::union_set Options = IsolateOption.unite(AtomicOption);
399
3
  Node = Node.band_set_ast_build_options(Options);
400
3
  return Node;
401
3
}
402
403
isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
404
3
    isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
405
3
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
406
3
407
3
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
408
3
  auto ScheduleDimensions = Space.dim(isl::dim::set);
409
3
  assert(DimToVectorize < ScheduleDimensions);
410
3
411
3
  if (DimToVectorize > 0) {
412
2
    Node = isl::manage(
413
2
        isl_schedule_node_band_split(Node.release(), DimToVectorize));
414
2
    Node = Node.child(0);
415
2
  }
416
3
  if (DimToVectorize < ScheduleDimensions - 1)
417
2
    Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
418
3
  Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
419
3
  auto Sizes = isl::multi_val::zero(Space);
420
3
  Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
421
3
  Node =
422
3
      isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
423
3
  Node = isolateFullPartialTiles(Node, VectorWidth);
424
3
  Node = Node.child(0);
425
3
  // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
426
3
  // we will have troubles to match it in the backend.
427
3
  Node = Node.band_set_ast_build_options(
428
3
      isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
429
3
  Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
430
3
  Node = Node.child(0);
431
3
  if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
432
1
    Node = Node.parent();
433
3
  auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
434
3
  PrevectOpts++;
435
3
  return Node.insert_mark(LoopMarker);
436
3
}
437
438
isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
439
                                                   const char *Identifier,
440
                                                   ArrayRef<int> TileSizes,
441
24
                                                   int DefaultTileSize) {
442
24
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
443
24
  auto Dims = Space.dim(isl::dim::set);
444
24
  auto Sizes = isl::multi_val::zero(Space);
445
24
  std::string IdentifierString(Identifier);
446
86
  for (unsigned i = 0; i < Dims; 
i++62
) {
447
62
    auto tileSize = i < TileSizes.size() ? 
TileSizes[i]38
:
DefaultTileSize24
;
448
62
    Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
449
62
  }
450
24
  auto TileLoopMarkerStr = IdentifierString + " - Tiles";
451
24
  auto TileLoopMarker =
452
24
      isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr);
453
24
  Node = Node.insert_mark(TileLoopMarker);
454
24
  Node = Node.child(0);
455
24
  Node =
456
24
      isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
457
24
  Node = Node.child(0);
458
24
  auto PointLoopMarkerStr = IdentifierString + " - Points";
459
24
  auto PointLoopMarker =
460
24
      isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr);
461
24
  Node = Node.insert_mark(PointLoopMarker);
462
24
  return Node.child(0);
463
24
}
464
465
isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling(
466
8
    isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) {
467
8
  Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
468
8
  auto Ctx = Node.get_ctx();
469
8
  return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
470
8
}
471
472
27
static bool isSimpleInnermostBand(const isl::schedule_node &Node) {
473
27
  assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
474
27
  assert(isl_schedule_node_n_children(Node.keep()) == 1);
475
27
476
27
  auto ChildType = isl_schedule_node_get_type(Node.child(0).keep());
477
27
478
27
  if (ChildType == isl_schedule_node_leaf)
479
16
    return true;
480
11
481
11
  if (ChildType != isl_schedule_node_sequence)
482
11
    return false;
483
0
484
0
  auto Sequence = Node.child(0);
485
0
486
0
  for (int c = 0, nc = isl_schedule_node_n_children(Sequence.keep()); c < nc;
487
0
       ++c) {
488
0
    auto Child = Sequence.child(c);
489
0
    if (isl_schedule_node_get_type(Child.keep()) != isl_schedule_node_filter)
490
0
      return false;
491
0
    if (isl_schedule_node_get_type(Child.child(0).keep()) !=
492
0
        isl_schedule_node_leaf)
493
0
      return false;
494
0
  }
495
0
  return true;
496
27
}
497
498
199
bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
499
199
  if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
500
145
    return false;
501
54
502
54
  if (isl_schedule_node_n_children(Node.get()) != 1)
503
0
    return false;
504
54
505
54
  if (!isl_schedule_node_band_get_permutable(Node.get()))
506
19
    return false;
507
35
508
35
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
509
35
  auto Dims = Space.dim(isl::dim::set);
510
35
511
35
  if (Dims <= 1)
512
8
    return false;
513
27
514
27
  return isSimpleInnermostBand(Node);
515
27
}
516
517
__isl_give isl::schedule_node
518
9
ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
519
9
  if (FirstLevelTiling) {
520
9
    Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
521
9
                    FirstLevelDefaultTileSize);
522
9
    FirstLevelTileOpts++;
523
9
  }
524
9
525
9
  if (SecondLevelTiling) {
526
1
    Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
527
1
                    SecondLevelDefaultTileSize);
528
1
    SecondLevelTileOpts++;
529
1
  }
530
9
531
9
  if (RegisterTiling) {
532
1
    Node =
533
1
        applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
534
1
    RegisterTileOpts++;
535
1
  }
536
9
537
9
  if (PollyVectorizerChoice == VECTORIZER_NONE)
538
6
    return Node;
539
3
540
3
  auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
541
3
  auto Dims = Space.dim(isl::dim::set);
542
3
543
5
  for (int i = Dims - 1; i >= 0; 
i--2
)
544
5
    if (Node.band_member_get_coincident(i)) {
545
3
      Node = prevectSchedBand(Node, i, PrevectorWidth);
546
3
      break;
547
3
    }
548
9
549
9
  return Node;
550
9
}
551
552
/// Permute the two dimensions of the isl map.
553
///
554
/// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
555
/// have type @p DimType.
556
///
557
/// @param Map     The isl map to be modified.
558
/// @param DimType The type of the dimensions.
559
/// @param DstPos  The first dimension.
560
/// @param SrcPos  The second dimension.
561
/// @return        The modified map.
562
isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
563
21
                           unsigned SrcPos) {
564
21
  assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType));
565
21
  if (DstPos == SrcPos)
566
7
    return Map;
567
14
  isl::id DimId;
568
14
  if (Map.has_tuple_id(DimType))
569
0
    DimId = Map.get_tuple_id(DimType);
570
14
  auto FreeDim = DimType == isl::dim::in ? 
isl::dim::out0
: isl::dim::in;
571
14
  isl::id FreeDimId;
572
14
  if (Map.has_tuple_id(FreeDim))
573
14
    FreeDimId = Map.get_tuple_id(FreeDim);
574
14
  auto MaxDim = std::max(DstPos, SrcPos);
575
14
  auto MinDim = std::min(DstPos, SrcPos);
576
14
  Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
577
14
  Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
578
14
  Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
579
14
  Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
580
14
  if (DimId)
581
0
    Map = Map.set_tuple_id(DimType, DimId);
582
14
  if (FreeDimId)
583
14
    Map = Map.set_tuple_id(FreeDim, FreeDimId);
584
21
  return Map;
585
21
}
586
587
/// Check the form of the access relation.
588
///
589
/// Check that the access relation @p AccMap has the form M[i][j], where i
590
/// is a @p FirstPos and j is a @p SecondPos.
591
///
592
/// @param AccMap    The access relation to be checked.
593
/// @param FirstPos  The index of the input dimension that is mapped to
594
///                  the first output dimension.
595
/// @param SecondPos The index of the input dimension that is mapped to the
596
///                  second output dimension.
597
/// @return          True in case @p AccMap has the expected form and false,
598
///                  otherwise.
599
static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
600
52
                               int &SecondPos) {
601
52
  isl::space Space = AccMap.get_space();
602
52
  isl::map Universe = isl::map::universe(Space);
603
52
604
52
  if (Space.dim(isl::dim::out) != 2)
605
3
    return false;
606
49
607
49
  // MatMul has the form:
608
49
  // for (i = 0; i < N; i++)
609
49
  //   for (j = 0; j < M; j++)
610
49
  //     for (k = 0; k < P; k++)
611
49
  //       C[i, j] += A[i, k] * B[k, j]
612
49
  //
613
49
  // Permutation of three outer loops: 3! = 6 possibilities.
614
49
  int FirstDims[] = {0, 0, 1, 1, 2, 2};
615
49
  int SecondDims[] = {1, 2, 2, 0, 0, 1};
616
217
  for (int i = 0; i < 6; 
i += 1168
) {
617
196
    auto PossibleMatMul =
618
196
        Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
619
196
            .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
620
196
621
196
    AccMap = AccMap.intersect_domain(Domain);
622
196
    PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
623
196
624
196
    // If AccMap spans entire domain (Non-partial write),
625
196
    // compute FirstPos and SecondPos.
626
196
    // If AccMap != PossibleMatMul here (the two maps have been gisted at
627
196
    // this point), it means that the writes are not complete, or in other
628
196
    // words, it is a Partial write and Partial writes must be rejected.
629
196
    if (AccMap.is_equal(PossibleMatMul)) {
630
49
      if (FirstPos != -1 && 
FirstPos != FirstDims[i]42
)
631
14
        continue;
632
35
      FirstPos = FirstDims[i];
633
35
      if (SecondPos != -1 && 
SecondPos != SecondDims[i]28
)
634
7
        continue;
635
28
      SecondPos = SecondDims[i];
636
28
      return true;
637
28
    }
638
196
  }
639
49
640
49
  
return false21
;
641
52
}
642
643
/// Does the memory access represent a non-scalar operand of the matrix
644
/// multiplication.
645
///
646
/// Check that the memory access @p MemAccess is the read access to a non-scalar
647
/// operand of the matrix multiplication or its result.
648
///
649
/// @param MemAccess The memory access to be checked.
650
/// @param MMI       Parameters of the matrix multiplication operands.
651
/// @return          True in case the memory access represents the read access
652
///                  to a non-scalar operand of the matrix multiplication and
653
///                  false, otherwise.
654
static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
655
22
                                        MatMulInfoTy &MMI) {
656
22
  if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
657
0
    return false;
658
22
  auto AccMap = MemAccess->getLatestAccessRelation();
659
22
  isl::set StmtDomain = MemAccess->getStatement()->getDomain();
660
22
  if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && 
!MMI.ReadFromC7
) {
661
7
    MMI.ReadFromC = MemAccess;
662
7
    return true;
663
7
  }
664
15
  if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && 
!MMI.A7
) {
665
7
    MMI.A = MemAccess;
666
7
    return true;
667
7
  }
668
8
  if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && 
!MMI.B7
) {
669
7
    MMI.B = MemAccess;
670
7
    return true;
671
7
  }
672
1
  return false;
673
1
}
674
675
/// Check accesses to operands of the matrix multiplication.
676
///
677
/// Check that accesses of the SCoP statement, which corresponds to
678
/// the partial schedule @p PartialSchedule, are scalar in terms of loops
679
/// containing the matrix multiplication, in case they do not represent
680
/// accesses to the non-scalar operands of the matrix multiplication or
681
/// its result.
682
///
683
/// @param  PartialSchedule The partial schedule of the SCoP statement.
684
/// @param  MMI             Parameters of the matrix multiplication operands.
685
/// @return                 True in case the corresponding SCoP statement
686
///                         represents matrix multiplication and false,
687
///                         otherwise.
688
static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
689
7
                                    MatMulInfoTy &MMI) {
690
7
  auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
691
7
  auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
692
7
  unsigned OutDimNum = PartialSchedule.dim(isl::dim::out);
693
7
  assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
694
7
                          "and, consequently, the corresponding scheduling "
695
7
                          "functions have at least three dimensions.");
696
7
  auto MapI =
697
7
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
698
7
  auto MapJ =
699
7
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
700
7
  auto MapK =
701
7
      permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
702
7
703
7
  auto Accesses = getAccessesInOrder(*Stmt);
704
32
  for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; 
MemA++25
) {
705
25
    auto *MemAccessPtr = *MemA;
706
25
    if (MemAccessPtr->isLatestArrayKind() && 
MemAccessPtr != MMI.WriteToC22
&&
707
25
        
!isMatMulNonScalarReadAccess(MemAccessPtr, MMI)22
&&
708
25
        
!(MemAccessPtr->isStrideZero(MapI))1
&&
709
25
        
MemAccessPtr->isStrideZero(MapJ)0
&&
MemAccessPtr->isStrideZero(MapK)0
)
710
0
      return false;
711
25
  }
712
7
  return true;
713
7
}
714
715
/// Check for dependencies corresponding to the matrix multiplication.
716
///
717
/// Check that there is only true dependence of the form
718
/// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
719
/// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
720
/// to the dependency produced by the matrix multiplication.
721
///
722
/// @param  Schedule The schedule of the SCoP statement.
723
/// @param  D The SCoP dependencies.
724
/// @param  Pos The parameter to describe an acceptable true dependence.
725
///             In case it has a negative value, try to determine its
726
///             acceptable value.
727
/// @return True in case dependencies correspond to the matrix multiplication
728
///         and false, otherwise.
729
static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
730
7
                                  int &Pos) {
731
7
  auto Dep = isl::manage(D->getDependences(Dependences::TYPE_RAW));
732
7
  auto Red = isl::manage(D->getDependences(Dependences::TYPE_RED));
733
7
  if (Red)
734
7
    Dep = Dep.unite(Red);
735
7
  auto DomainSpace = Schedule.get_space().domain();
736
7
  auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
737
7
  auto Deltas = Dep.extract_map(Space).deltas();
738
7
  int DeltasDimNum = Deltas.dim(isl::dim::set);
739
28
  for (int i = 0; i < DeltasDimNum; 
i++21
) {
740
21
    auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
741
21
    Pos = Pos < 0 && Val.is_one() ? 
i7
:
Pos14
;
742
21
    if (Val.is_nan() || !(Val.is_zero() || 
(7
i == Pos7
&&
Val.is_one()7
)))
743
0
      return false;
744
21
  }
745
7
  if (DeltasDimNum == 0 || Pos < 0)
746
0
    return false;
747
7
  return true;
748
7
}
749
750
/// Check if the SCoP statement could probably be optimized with analytical
751
/// modeling.
752
///
753
/// containsMatrMult tries to determine whether the following conditions
754
/// are true:
755
/// 1. The last memory access modeling an array, MA1, represents writing to
756
///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
757
///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
758
///    under consideration.
759
/// 2. There is only one loop-carried true dependency, and it has the
760
///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
761
///    loop-carried or anti dependencies.
762
/// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
763
///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
764
///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
765
///    and all memory accesses of the SCoP that are different from MA1, MA2,
766
///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
767
///    of loops i1, i2 and i3.
768
///
769
/// @param PartialSchedule The PartialSchedule that contains a SCoP statement
770
///        to check.
771
/// @D     The SCoP dependencies.
772
/// @MMI   Parameters of the matrix multiplication operands.
773
static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
774
7
                             MatMulInfoTy &MMI) {
775
7
  auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
776
7
  auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
777
7
  if (Stmt->size() <= 1)
778
0
    return false;
779
7
780
7
  auto Accesses = getAccessesInOrder(*Stmt);
781
7
  for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); 
MemA--0
) {
782
7
    auto *MemAccessPtr = *MemA;
783
7
    if (!MemAccessPtr->isLatestArrayKind())
784
0
      continue;
785
7
    if (!MemAccessPtr->isWrite())
786
0
      return false;
787
7
    auto AccMap = MemAccessPtr->getLatestAccessRelation();
788
7
    if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
789
0
      return false;
790
7
    MMI.WriteToC = MemAccessPtr;
791
7
    break;
792
7
  }
793
7
794
7
  if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
795
0
    return false;
796
7
797
7
  if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
798
0
    return false;
799
7
800
7
  if (!MMI.A || !MMI.B || !MMI.ReadFromC)
801
0
    return false;
802
7
  return true;
803
7
}
804
805
/// Permute two dimensions of the band node.
806
///
807
/// Permute FirstDim and SecondDim dimensions of the Node.
808
///
809
/// @param Node The band node to be modified.
810
/// @param FirstDim The first dimension to be permuted.
811
/// @param SecondDim The second dimension to be permuted.
812
static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
813
                                                    unsigned FirstDim,
814
40
                                                    unsigned SecondDim) {
815
40
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
816
40
         isl_schedule_node_band_n_member(Node.get()) >
817
40
             std::max(FirstDim, SecondDim));
818
40
  auto PartialSchedule =
819
40
      isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
820
40
  auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
821
40
  auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
822
40
  PartialSchedule =
823
40
      PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
824
40
  PartialSchedule =
825
40
      PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
826
40
  Node = isl::manage(isl_schedule_node_delete(Node.release()));
827
40
  return Node.insert_partial_schedule(PartialSchedule);
828
40
}
829
830
isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
831
7
    isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
832
7
  Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
833
7
                             1);
834
7
  Node = Node.parent().parent();
835
7
  return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
836
7
}
837
838
isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
839
7
    isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
840
7
  assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
841
7
  if (MacroKernelParams.Mc == 1 && 
MacroKernelParams.Nc == 11
&&
842
7
      
MacroKernelParams.Kc == 11
)
843
1
    return Node;
844
6
  int DimOutNum = isl_schedule_node_band_n_member(Node.get());
845
6
  std::vector<int> TileSizes(DimOutNum, 1);
846
6
  TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
847
6
  TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
848
6
  TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
849
6
  Node = tileNode(Node, "1st level tiling", TileSizes, 1);
850
6
  Node = Node.parent().parent();
851
6
  Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
852
6
  Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
853
6
  return Node.child(0).child(0);
854
6
}
855
856
/// Get the size of the widest type of the matrix multiplication operands
857
/// in bytes, including alignment padding.
858
///
859
/// @param MMI Parameters of the matrix multiplication operands.
860
/// @return The size of the widest type of the matrix multiplication operands
861
///         in bytes, including alignment padding.
862
6
static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
863
6
  auto *S = MMI.A->getStatement()->getParent();
864
6
  auto &DL = S->getFunction().getParent()->getDataLayout();
865
6
  auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
866
6
  auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
867
6
  auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
868
6
  return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
869
6
}
870
871
/// Get the size of the widest type of the matrix multiplication operands
872
/// in bits.
873
///
874
/// @param MMI Parameters of the matrix multiplication operands.
875
/// @return The size of the widest type of the matrix multiplication operands
876
///         in bits.
877
7
static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
878
7
  auto *S = MMI.A->getStatement()->getParent();
879
7
  auto &DL = S->getFunction().getParent()->getDataLayout();
880
7
  auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
881
7
  auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
882
7
  auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
883
7
  return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
884
7
}
885
886
/// Get parameters of the BLIS micro kernel.
887
///
888
/// We choose the Mr and Nr parameters of the micro kernel to be large enough
889
/// such that no stalls caused by the combination of latencies and dependencies
890
/// are introduced during the updates of the resulting matrix of the matrix
891
/// multiplication. However, they should also be as small as possible to
892
/// release more registers for entries of multiplied matrices.
893
///
894
/// @param TTI Target Transform Info.
895
/// @param MMI Parameters of the matrix multiplication operands.
896
/// @return The structure of type MicroKernelParamsTy.
897
/// @see MicroKernelParamsTy
898
static struct MicroKernelParamsTy
899
7
getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) {
900
7
  assert(TTI && "The target transform info should be provided.");
901
7
902
7
  // Nvec - Number of double-precision floating-point numbers that can be hold
903
7
  // by a vector register. Use 2 by default.
904
7
  long RegisterBitwidth = VectorRegisterBitwidth;
905
7
906
7
  if (RegisterBitwidth == -1)
907
0
    RegisterBitwidth = TTI->getRegisterBitWidth(true);
908
7
  auto ElementSize = getMatMulTypeSize(MMI);
909
7
  assert(ElementSize > 0 && "The element size of the matrix multiplication "
910
7
                            "operands should be greater than zero.");
911
7
  auto Nvec = RegisterBitwidth / ElementSize;
912
7
  if (Nvec == 0)
913
0
    Nvec = 2;
914
7
  int Nr =
915
7
      ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
916
7
  int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
917
7
  return {Mr, Nr};
918
7
}
919
920
namespace {
921
/// Determine parameters of the target cache.
922
///
923
/// @param TTI Target Transform Info.
924
7
void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
925
7
  auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
926
7
  auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
927
7
  if (FirstCacheLevelSize == -1) {
928
1
    if (TTI->getCacheSize(L1DCache).hasValue())
929
0
      FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue();
930
1
    else
931
1
      FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
932
1
  }
933
7
  if (SecondCacheLevelSize == -1) {
934
1
    if (TTI->getCacheSize(L2DCache).hasValue())
935
0
      SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue();
936
1
    else
937
1
      SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
938
1
  }
939
7
  if (FirstCacheLevelAssociativity == -1) {
940
0
    if (TTI->getCacheAssociativity(L1DCache).hasValue())
941
0
      FirstCacheLevelAssociativity =
942
0
          TTI->getCacheAssociativity(L1DCache).getValue();
943
0
    else
944
0
      FirstCacheLevelAssociativity =
945
0
          static_cast<int>(FirstCacheLevelDefaultAssociativity);
946
0
  }
947
7
  if (SecondCacheLevelAssociativity == -1) {
948
1
    if (TTI->getCacheAssociativity(L2DCache).hasValue())
949
0
      SecondCacheLevelAssociativity =
950
0
          TTI->getCacheAssociativity(L2DCache).getValue();
951
1
    else
952
1
      SecondCacheLevelAssociativity =
953
1
          static_cast<int>(SecondCacheLevelDefaultAssociativity);
954
1
  }
955
7
}
956
} // namespace
957
958
/// Get parameters of the BLIS macro kernel.
959
///
960
/// During the computation of matrix multiplication, blocks of partitioned
961
/// matrices are mapped to different layers of the memory hierarchy.
962
/// To optimize data reuse, blocks should be ideally kept in cache between
963
/// iterations. Since parameters of the macro kernel determine sizes of these
964
/// blocks, there are upper and lower bounds on these parameters.
965
///
966
/// @param TTI Target Transform Info.
967
/// @param MicroKernelParams Parameters of the micro-kernel
968
///                          to be taken into account.
969
/// @param MMI Parameters of the matrix multiplication operands.
970
/// @return The structure of type MacroKernelParamsTy.
971
/// @see MacroKernelParamsTy
972
/// @see MicroKernelParamsTy
973
static struct MacroKernelParamsTy
974
getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
975
                     const MicroKernelParamsTy &MicroKernelParams,
976
7
                     MatMulInfoTy MMI) {
977
7
  getTargetCacheParameters(TTI);
978
7
  // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
979
7
  // it requires information about the first two levels of a cache to determine
980
7
  // all the parameters of a macro-kernel. It also checks that an associativity
981
7
  // degree of a cache level is greater than two. Otherwise, another algorithm
982
7
  // for determination of the parameters should be used.
983
7
  if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
984
7
        FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
985
7
        FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
986
0
    return {1, 1, 1};
987
7
  // The quotient should be greater than zero.
988
7
  if (PollyPatternMatchingNcQuotient <= 0)
989
0
    return {1, 1, 1};
990
7
  int Car = floor(
991
7
      (FirstCacheLevelAssociativity - 1) /
992
7
      (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
993
7
994
7
  // Car can be computed to be zero since it is floor to int.
995
7
  // On Mac OS, division by 0 does not raise a signal. This causes negative
996
7
  // tile sizes to be computed. Prevent division by Cac==0 by early returning
997
7
  // if this happens.
998
7
  if (Car == 0)
999
1
    return {1, 1, 1};
1000
6
1001
6
  auto ElementSize = getMatMulAlignTypeSize(MMI);
1002
6
  assert(ElementSize > 0 && "The element size of the matrix multiplication "
1003
6
                            "operands should be greater than zero.");
1004
6
  int Kc = (Car * FirstCacheLevelSize) /
1005
6
           (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1006
6
  double Cac =
1007
6
      static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1008
6
      SecondCacheLevelSize;
1009
6
  int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1010
6
  int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1011
6
1012
6
  assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1013
6
         "Matrix block sizes should be  greater than zero");
1014
6
  return {Mc, Nc, Kc};
1015
6
}
1016
1017
/// Create an access relation that is specific to
1018
///        the matrix multiplication pattern.
1019
///
1020
/// Create an access relation of the following form:
1021
/// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1022
/// where I is @p FirstDim, J is @p SecondDim.
1023
///
1024
/// It can be used, for example, to create relations that helps to consequently
1025
/// access elements of operands of a matrix multiplication after creation of
1026
/// the BLIS micro and macro kernels.
1027
///
1028
/// @see ScheduleTreeOptimizer::createMicroKernel
1029
/// @see ScheduleTreeOptimizer::createMacroKernel
1030
///
1031
/// Subsequently, the described access relation is applied to the range of
1032
/// @p MapOldIndVar, that is used to map original induction variables to
1033
/// the ones, which are produced by schedule transformations. It helps to
1034
/// define relations using a new space and, at the same time, keep them
1035
/// in the original one.
1036
///
1037
/// @param MapOldIndVar The relation, which maps original induction variables
1038
///                     to the ones, which are produced by schedule
1039
///                     transformations.
1040
/// @param FirstDim, SecondDim The input dimensions that are used to define
1041
///        the specified access relation.
1042
/// @return The specified access relation.
1043
isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
1044
12
                         unsigned SecondDim) {
1045
12
  auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
1046
12
  auto AccessRel = isl::map::universe(AccessRelSpace);
1047
12
  AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
1048
12
  AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
1049
12
  AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
1050
12
  return MapOldIndVar.apply_range(AccessRel);
1051
12
}
1052
1053
isl::schedule_node createExtensionNode(isl::schedule_node Node,
1054
12
                                       isl::map ExtensionMap) {
1055
12
  auto Extension = isl::union_map(ExtensionMap);
1056
12
  auto NewNode = isl::schedule_node::from_extension(Extension);
1057
12
  return Node.graft_before(NewNode);
1058
12
}
1059
1060
/// Apply the packing transformation.
1061
///
1062
/// The packing transformation can be described as a data-layout
1063
/// transformation that requires to introduce a new array, copy data
1064
/// to the array, and change memory access locations to reference the array.
1065
/// It can be used to ensure that elements of the new array are read in-stride
1066
/// access, aligned to cache lines boundaries, and preloaded into certain cache
1067
/// levels.
1068
///
1069
/// As an example let us consider the packing of the array A that would help
1070
/// to read its elements with in-stride access. An access to the array A
1071
/// is represented by an access relation that has the form
1072
/// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1073
/// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1074
/// k mod Kc, j mod Nr, i mod Mr].
1075
///
1076
/// To ensure that elements of the array A are read in-stride access, we add
1077
/// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1078
/// Scop::createScopArrayInfo, change the access relation
1079
/// S[i, j, k] -> A[i, k] to
1080
/// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1081
/// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1082
/// the copy statement created by Scop::addScopStmt.
1083
///
1084
/// @param Node The schedule node to be optimized.
1085
/// @param MapOldIndVar The relation, which maps original induction variables
1086
///                     to the ones, which are produced by schedule
1087
///                     transformations.
1088
/// @param MicroParams, MacroParams Parameters of the BLIS kernel
1089
///                                 to be taken into account.
1090
/// @param MMI Parameters of the matrix multiplication operands.
1091
/// @return The optimized schedule node.
1092
static isl::schedule_node
1093
optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
1094
                                 MicroKernelParamsTy MicroParams,
1095
                                 MacroKernelParamsTy MacroParams,
1096
6
                                 MatMulInfoTy &MMI) {
1097
6
  auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
1098
6
  auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1099
6
1100
6
  // Create a copy statement that corresponds to the memory access to the
1101
6
  // matrix B, the second operand of the matrix multiplication.
1102
6
  Node = Node.parent().parent().parent().parent().parent().parent();
1103
6
  Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
1104
6
  auto AccRel = getMatMulAccRel(isl::manage(MapOldIndVar.copy()), 3, 7);
1105
6
  unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1106
6
  unsigned SecondDimSize = MacroParams.Kc;
1107
6
  unsigned ThirdDimSize = MicroParams.Nr;
1108
6
  auto *SAI = Stmt->getParent()->createScopArrayInfo(
1109
6
      MMI.B->getElementType(), "Packed_B",
1110
6
      {FirstDimSize, SecondDimSize, ThirdDimSize});
1111
6
  AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1112
6
  auto OldAcc = MMI.B->getLatestAccessRelation();
1113
6
  MMI.B->setNewAccessRelation(AccRel);
1114
6
  auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1115
6
                                         MapOldIndVar.dim(isl::dim::out) - 2);
1116
6
  ExtMap = ExtMap.reverse();
1117
6
  ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1118
6
  auto Domain = Stmt->getDomain();
1119
6
1120
6
  // Restrict the domains of the copy statements to only execute when also its
1121
6
  // originating statement is executed.
1122
6
  auto DomainId = Domain.get_tuple_id();
1123
6
  auto *NewStmt = Stmt->getParent()->addScopStmt(
1124
6
      OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1125
6
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, isl::manage(DomainId.copy()));
1126
6
  ExtMap = ExtMap.intersect_range(isl::manage(Domain.copy()));
1127
6
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1128
6
  Node = createExtensionNode(Node, ExtMap);
1129
6
1130
6
  // Create a copy statement that corresponds to the memory access
1131
6
  // to the matrix A, the first operand of the matrix multiplication.
1132
6
  Node = Node.child(0);
1133
6
  AccRel = getMatMulAccRel(isl::manage(MapOldIndVar.copy()), 4, 6);
1134
6
  FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1135
6
  ThirdDimSize = MicroParams.Mr;
1136
6
  SAI = Stmt->getParent()->createScopArrayInfo(
1137
6
      MMI.A->getElementType(), "Packed_A",
1138
6
      {FirstDimSize, SecondDimSize, ThirdDimSize});
1139
6
  AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1140
6
  OldAcc = MMI.A->getLatestAccessRelation();
1141
6
  MMI.A->setNewAccessRelation(AccRel);
1142
6
  ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1143
6
                                    MapOldIndVar.dim(isl::dim::out) - 3);
1144
6
  ExtMap = ExtMap.reverse();
1145
6
  ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1146
6
  NewStmt = Stmt->getParent()->addScopStmt(
1147
6
      OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1148
6
1149
6
  // Restrict the domains of the copy statements to only execute when also its
1150
6
  // originating statement is executed.
1151
6
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1152
6
  ExtMap = ExtMap.intersect_range(Domain);
1153
6
  ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1154
6
  Node = createExtensionNode(Node, ExtMap);
1155
6
  return Node.child(0).child(0).child(0).child(0).child(0);
1156
6
}
1157
1158
/// Get a relation mapping induction variables produced by schedule
1159
/// transformations to the original ones.
1160
///
1161
/// @param Node The schedule node produced as the result of creation
1162
///        of the BLIS kernels.
1163
/// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1164
///                                             to be taken into account.
1165
/// @return  The relation mapping original induction variables to the ones
1166
///          produced by schedule transformation.
1167
/// @see ScheduleTreeOptimizer::createMicroKernel
1168
/// @see ScheduleTreeOptimizer::createMacroKernel
1169
/// @see getMacroKernelParams
1170
isl::map
1171
getInductionVariablesSubstitution(isl::schedule_node Node,
1172
                                  MicroKernelParamsTy MicroKernelParams,
1173
6
                                  MacroKernelParamsTy MacroKernelParams) {
1174
6
  auto Child = Node.child(0);
1175
6
  auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1176
6
  auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1177
6
  if (MapOldIndVar.dim(isl::dim::out) > 9)
1178
0
    return MapOldIndVar.project_out(isl::dim::out, 0,
1179
0
                                    MapOldIndVar.dim(isl::dim::out) - 9);
1180
6
  return MapOldIndVar;
1181
6
}
1182
1183
/// Isolate a set of partial tile prefixes and unroll the isolated part.
1184
///
1185
/// The set should ensure that it contains only partial tile prefixes that have
1186
/// exactly Mr x Nr iterations of the two innermost loops produced by
1187
/// the optimization of the matrix multiplication. Mr and Nr are parameters of
1188
/// the micro-kernel.
1189
///
1190
/// In case of parametric bounds, this helps to auto-vectorize the unrolled
1191
/// innermost loops, using the SLP vectorizer.
1192
///
1193
/// @param Node              The schedule node to be modified.
1194
/// @param MicroKernelParams Parameters of the micro-kernel
1195
///                          to be taken into account.
1196
/// @return The modified isl_schedule_node.
1197
static isl::schedule_node
1198
isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1199
6
                                 struct MicroKernelParamsTy MicroKernelParams) {
1200
6
  isl::schedule_node Child = Node.get_child(0);
1201
6
  isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1202
6
  isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1203
6
  unsigned Dims = Prefix.dim(isl::dim::set);
1204
6
  Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1205
6
  Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1206
6
  Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1207
6
1208
6
  isl::union_set IsolateOption =
1209
6
      getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1210
6
  isl::ctx Ctx = Node.get_ctx();
1211
6
  auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1212
6
  Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1213
6
  Node = Node.band_set_ast_build_options(Options);
1214
6
  Node = Node.parent().parent().parent();
1215
6
  IsolateOption = getIsolateOptions(Prefix, 3);
1216
6
  Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1217
6
  Node = Node.band_set_ast_build_options(Options);
1218
6
  Node = Node.child(0).child(0).child(0);
1219
6
  return Node;
1220
6
}
1221
1222
/// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1223
///
1224
/// @param Node The child of the mark node to be inserted.
1225
/// @param BasePtr The pointer to be marked.
1226
/// @return The modified isl_schedule_node.
1227
static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1228
7
                                                      Value *BasePtr) {
1229
7
  if (!BasePtr)
1230
0
    return Node;
1231
7
1232
7
  auto Id =
1233
7
      isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1234
7
  return Node.insert_mark(Id).child(0);
1235
7
}
1236
1237
/// Insert "Loop Vectorizer Disabled" mark node.
1238
///
1239
/// @param Node The child of the mark node to be inserted.
1240
/// @return The modified isl_schedule_node.
1241
6
static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1242
6
  auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr);
1243
6
  return Node.insert_mark(Id).child(0);
1244
6
}
1245
1246
/// Restore the initial ordering of dimensions of the band node
1247
///
1248
/// In case the band node represents all the dimensions of the iteration
1249
/// domain, recreate the band node to restore the initial ordering of the
1250
/// dimensions.
1251
///
1252
/// @param Node The band node to be modified.
1253
/// @return The modified schedule node.
1254
static isl::schedule_node
1255
7
getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1256
7
  assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
1257
7
  if (isl_schedule_node_get_type(Node.child(0).keep()) !=
1258
7
      isl_schedule_node_leaf)
1259
0
    return Node;
1260
7
  auto Domain = Node.get_universe_domain();
1261
7
  assert(isl_union_set_n_set(Domain.keep()) == 1);
1262
7
  if (Node.get_schedule_depth() != 0 ||
1263
7
      (isl::set(isl::manage(Domain.copy())).dim(isl::dim::set) !=
1264
7
       isl_schedule_node_band_n_member(Node.keep())))
1265
0
    return Node;
1266
7
  Node = isl::manage(isl_schedule_node_delete(Node.take()));
1267
7
  auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1268
7
  auto PartialScheduleMultiPwAff =
1269
7
      isl::multi_union_pw_aff(PartialSchedulePwAff);
1270
7
  PartialScheduleMultiPwAff =
1271
7
      PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1272
7
  return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1273
7
}
1274
1275
isl::schedule_node
1276
ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node,
1277
                                             const TargetTransformInfo *TTI,
1278
7
                                             MatMulInfoTy &MMI) {
1279
7
  assert(TTI && "The target transform info should be provided.");
1280
7
  Node = markInterIterationAliasFree(
1281
7
      Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1282
7
  int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1283
7
  assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1284
7
                          "and, consequently, the corresponding scheduling "
1285
7
                          "functions have at least three dimensions.");
1286
7
  Node = getBandNodeWithOriginDimOrder(Node);
1287
7
  Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1288
7
  int NewJ = MMI.j == DimOutNum - 3 ? 
MMI.i0
: MMI.j;
1289
7
  int NewK = MMI.k == DimOutNum - 3 ? 
MMI.i0
: MMI.k;
1290
7
  Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1291
7
  NewK = NewK == DimOutNum - 2 ? 
NewJ0
: NewK;
1292
7
  Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1293
7
  auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1294
7
  auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1295
7
  Node = createMacroKernel(Node, MacroKernelParams);
1296
7
  Node = createMicroKernel(Node, MicroKernelParams);
1297
7
  if (MacroKernelParams.Mc == 1 || 
MacroKernelParams.Nc == 16
||
1298
7
      
MacroKernelParams.Kc == 16
)
1299
1
    return Node;
1300
6
  auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1301
6
                                                        MacroKernelParams);
1302
6
  if (!MapOldIndVar)
1303
0
    return Node;
1304
6
  Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1305
6
  Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1306
6
  return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1307
6
                                          MacroKernelParams, MMI);
1308
6
}
1309
1310
bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1311
                                              const Dependences *D,
1312
14
                                              MatMulInfoTy &MMI) {
1313
14
  auto PartialSchedule = isl::manage(
1314
14
      isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1315
14
  Node = Node.child(0);
1316
14
  auto LeafType = isl_schedule_node_get_type(Node.get());
1317
14
  Node = Node.parent();
1318
14
  if (LeafType != isl_schedule_node_leaf ||
1319
14
      isl_schedule_node_band_n_member(Node.get()) < 3 ||
1320
14
      
Node.get_schedule_depth() != 07
||
1321
14
      
isl_union_map_n_map(PartialSchedule.get()) != 17
)
1322
7
    return false;
1323
7
  auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1324
7
  if (containsMatrMult(NewPartialSchedule, D, MMI))
1325
7
    return true;
1326
0
  return false;
1327
0
}
1328
1329
__isl_give isl_schedule_node *
1330
ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1331
199
                                    void *User) {
1332
199
  if (!isTileableBandNode(isl::manage(isl_schedule_node_copy(Node))))
1333
183
    return Node;
1334
16
1335
16
  const OptimizerAdditionalInfoTy *OAI =
1336
16
      static_cast<const OptimizerAdditionalInfoTy *>(User);
1337
16
1338
16
  MatMulInfoTy MMI;
1339
16
  if (PMBasedOpts && 
User14
&&
1340
16
      isMatrMultPattern(isl::manage(isl_schedule_node_copy(Node)), OAI->D,
1341
14
                        MMI)) {
1342
7
    DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1343
7
    MatMulOpts++;
1344
7
    return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1345
7
  }
1346
9
1347
9
  return standardBandOpts(isl::manage(Node), User).release();
1348
9
}
1349
1350
isl::schedule
1351
ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1352
12
                                        const OptimizerAdditionalInfoTy *OAI) {
1353
12
  auto Root = Schedule.get_root();
1354
12
  Root = optimizeScheduleNode(Root, OAI);
1355
12
  return Root.get_schedule();
1356
12
}
1357
1358
isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1359
12
    isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1360
12
  Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1361
12
      Node.release(), optimizeBand,
1362
12
      const_cast<void *>(static_cast<const void *>(OAI))));
1363
12
  return Node;
1364
12
}
1365
1366
bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1367
12
                                                 isl::schedule NewSchedule) {
1368
12
  // To understand if the schedule has been optimized we check if the schedule
1369
12
  // has changed at all.
1370
12
  // TODO: We can improve this by tracking if any necessarily beneficial
1371
12
  // transformations have been performed. This can e.g. be tiling, loop
1372
12
  // interchange, or ...) We can track this either at the place where the
1373
12
  // transformation has been performed or, in case of automatic ILP based
1374
12
  // optimizations, by comparing (yet to be defined) performance metrics
1375
12
  // before/after the scheduling optimizer
1376
12
  // (e.g., #stride-one accesses)
1377
12
  if (S.containsExtensionNode(NewSchedule))
1378
6
    return true;
1379
6
  auto NewScheduleMap = NewSchedule.get_map();
1380
6
  auto OldSchedule = S.getSchedule();
1381
6
  assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1382
6
                        "that make Scop::getSchedule() return nullptr.");
1383
6
  bool changed = !OldSchedule.is_equal(NewScheduleMap);
1384
6
  return changed;
1385
6
}
1386
1387
namespace {
1388
1389
class IslScheduleOptimizer : public ScopPass {
1390
public:
1391
  static char ID;
1392
1393
12
  explicit IslScheduleOptimizer() : ScopPass(ID) {}
1394
1395
12
  ~IslScheduleOptimizer() override { isl_schedule_free(LastSchedule); }
1396
1397
  /// Optimize the schedule of the SCoP @p S.
1398
  bool runOnScop(Scop &S) override;
1399
1400
  /// Print the new schedule for the SCoP @p S.
1401
  void printScop(raw_ostream &OS, Scop &S) const override;
1402
1403
  /// Register all analyses and transformation required.
1404
  void getAnalysisUsage(AnalysisUsage &AU) const override;
1405
1406
  /// Release the internal memory.
1407
49
  void releaseMemory() override {
1408
49
    isl_schedule_free(LastSchedule);
1409
49
    LastSchedule = nullptr;
1410
49
  }
1411
1412
private:
1413
  isl_schedule *LastSchedule = nullptr;
1414
};
1415
1416
} // namespace
1417
1418
char IslScheduleOptimizer::ID = 0;
1419
1420
/// Collect statistics for the schedule tree.
1421
///
1422
/// @param Schedule The schedule tree to analyze. If not a schedule tree it is
1423
/// ignored.
1424
/// @param Version  The version of the schedule tree that is analyzed.
1425
///                 0 for the original schedule tree before any transformation.
1426
///                 1 for the schedule tree after isl's rescheduling.
1427
///                 2 for the schedule tree after optimizations are applied
1428
///                 (tiling, pattern matching)
1429
36
static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) {
1430
36
  auto Root = Schedule.get_root();
1431
36
  if (!Root)
1432
0
    return;
1433
36
1434
36
  isl_schedule_node_foreach_descendant_top_down(
1435
36
      Root.get(),
1436
364
      [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool {
1437
364
        isl::schedule_node Node = isl::manage(isl_schedule_node_copy(nodeptr));
1438
364
        int Version = *static_cast<int *>(user);
1439
364
1440
364
        switch (isl_schedule_node_get_type(Node.get())) {
1441
364
        case isl_schedule_node_band: {
1442
100
          NumBands[Version]++;
1443
100
          if (isl_schedule_node_band_get_permutable(Node.get()) ==
1444
100
              isl_bool_true)
1445
45
            NumPermutable[Version]++;
1446
100
1447
100
          int CountMembers = isl_schedule_node_band_n_member(Node.get());
1448
100
          NumBandMembers[Version] += CountMembers;
1449
279
          for (int i = 0; i < CountMembers; 
i += 1179
) {
1450
179
            if (Node.band_member_get_coincident(i))
1451
74
              NumCoincident[Version]++;
1452
179
          }
1453
100
          break;
1454
364
        }
1455
364
1456
364
        case isl_schedule_node_filter:
1457
58
          NumFilters[Version]++;
1458
58
          break;
1459
364
1460
364
        case isl_schedule_node_extension:
1461
12
          NumExtension[Version]++;
1462
12
          break;
1463
364
1464
364
        default:
1465
194
          break;
1466
364
        }
1467
364
1468
364
        return isl_bool_true;
1469
364
      },
1470
36
      &Version);
1471
36
}
1472
1473
12
bool IslScheduleOptimizer::runOnScop(Scop &S) {
1474
12
  // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1475
12
  if (S.isToBeSkipped())
1476
0
    return false;
1477
12
1478
12
  // Skip empty SCoPs but still allow code generation as it will delete the
1479
12
  // loops present but not needed.
1480
12
  if (S.getSize() == 0) {
1481
0
    S.markAsOptimized();
1482
0
    return false;
1483
0
  }
1484
12
1485
12
  const Dependences &D =
1486
12
      getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1487
12
1488
12
  if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
1489
0
    DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
1490
0
    return false;
1491
0
  }
1492
12
1493
12
  if (!D.hasValidDependences())
1494
0
    return false;
1495
12
1496
12
  isl_schedule_free(LastSchedule);
1497
12
  LastSchedule = nullptr;
1498
12
1499
12
  // Build input data.
1500
12
  int ValidityKinds =
1501
12
      Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1502
12
  int ProximityKinds;
1503
12
1504
12
  if (OptimizeDeps == "all")
1505
12
    ProximityKinds =
1506
12
        Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1507
0
  else if (OptimizeDeps == "raw")
1508
0
    ProximityKinds = Dependences::TYPE_RAW;
1509
0
  else {
1510
0
    errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1511
0
           << " Falling back to optimizing all dependences.\n";
1512
0
    ProximityKinds =
1513
0
        Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1514
0
  }
1515
12
1516
12
  isl::union_set Domain = S.getDomains();
1517
12
1518
12
  if (!Domain)
1519
0
    return false;
1520
12
1521
12
  ScopsProcessed++;
1522
12
  walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
1523
12
1524
12
  isl::union_map Validity = give(D.getDependences(ValidityKinds));
1525
12
  isl::union_map Proximity = give(D.getDependences(ProximityKinds));
1526
12
1527
12
  // Simplify the dependences by removing the constraints introduced by the
1528
12
  // domains. This can speed up the scheduling time significantly, as large
1529
12
  // constant coefficients will be removed from the dependences. The
1530
12
  // introduction of some additional dependences reduces the possible
1531
12
  // transformations, but in most cases, such transformation do not seem to be
1532
12
  // interesting anyway. In some cases this option may stop the scheduler to
1533
12
  // find any schedule.
1534
12
  if (SimplifyDeps == "yes") {
1535
12
    Validity = Validity.gist_domain(Domain);
1536
12
    Validity = Validity.gist_range(Domain);
1537
12
    Proximity = Proximity.gist_domain(Domain);
1538
12
    Proximity = Proximity.gist_range(Domain);
1539
12
  } else 
if (0
SimplifyDeps != "no"0
) {
1540
0
    errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1541
0
              "or 'no'. Falling back to default: 'yes'\n";
1542
0
  }
1543
12
1544
12
  DEBUG(dbgs() << "\n\nCompute schedule from: ");
1545
12
  DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1546
12
  DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1547
12
  DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1548
12
1549
12
  unsigned IslSerializeSCCs;
1550
12
1551
12
  if (FusionStrategy == "max") {
1552
0
    IslSerializeSCCs = 0;
1553
12
  } else if (FusionStrategy == "min") {
1554
12
    IslSerializeSCCs = 1;
1555
12
  } else {
1556
0
    errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1557
0
              "fusion.\n";
1558
0
    IslSerializeSCCs = 0;
1559
0
  }
1560
12
1561
12
  int IslMaximizeBands;
1562
12
1563
12
  if (MaximizeBandDepth == "yes") {
1564
12
    IslMaximizeBands = 1;
1565
12
  } else 
if (0
MaximizeBandDepth == "no"0
) {
1566
0
    IslMaximizeBands = 0;
1567
0
  } else {
1568
0
    errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1569
0
              " or 'no'. Falling back to default: 'yes'\n";
1570
0
    IslMaximizeBands = 1;
1571
0
  }
1572
12
1573
12
  int IslOuterCoincidence;
1574
12
1575
12
  if (OuterCoincidence == "yes") {
1576
0
    IslOuterCoincidence = 1;
1577
12
  } else if (OuterCoincidence == "no") {
1578
12
    IslOuterCoincidence = 0;
1579
12
  } else {
1580
0
    errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1581
0
              "'yes' or 'no'. Falling back to default: 'no'\n";
1582
0
    IslOuterCoincidence = 0;
1583
0
  }
1584
12
1585
12
  isl_ctx *Ctx = S.getIslCtx().get();
1586
12
1587
12
  isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1588
12
  isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1589
12
  isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1590
12
  isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1591
12
  isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1592
12
  isl_options_set_tile_scale_tile_loops(Ctx, 0);
1593
12
1594
12
  auto OnErrorStatus = isl_options_get_on_error(Ctx);
1595
12
  isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1596
12
1597
12
  auto SC = isl::schedule_constraints::on_domain(Domain);
1598
12
  SC = SC.set_proximity(Proximity);
1599
12
  SC = SC.set_validity(Validity);
1600
12
  SC = SC.set_coincidence(Validity);
1601
12
  auto Schedule = SC.compute_schedule();
1602
12
  isl_options_set_on_error(Ctx, OnErrorStatus);
1603
12
1604
12
  walkScheduleTreeForStatistics(Schedule, 1);
1605
12
1606
12
  // In cases the scheduler is not able to optimize the code, we just do not
1607
12
  // touch the schedule.
1608
12
  if (!Schedule)
1609
0
    return false;
1610
12
1611
12
  ScopsRescheduled++;
1612
12
1613
12
  DEBUG({
1614
12
    auto *P = isl_printer_to_str(Ctx);
1615
12
    P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1616
12
    P = isl_printer_print_schedule(P, Schedule.get());
1617
12
    auto *str = isl_printer_get_str(P);
1618
12
    dbgs() << "NewScheduleTree: \n" << str << "\n";
1619
12
    free(str);
1620
12
    isl_printer_free(P);
1621
12
  });
1622
12
1623
12
  Function &F = S.getFunction();
1624
12
  auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1625
12
  const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1626
12
  auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1627
12
  walkScheduleTreeForStatistics(NewSchedule, 2);
1628
12
1629
12
  if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule))
1630
1
    return false;
1631
11
1632
11
  auto ScopStats = S.getStatistics();
1633
11
  ScopsOptimized++;
1634
11
  NumAffineLoopsOptimized += ScopStats.NumAffineLoops;
1635
11
  NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops;
1636
11
1637
11
  S.setScheduleTree(NewSchedule);
1638
11
  S.markAsOptimized();
1639
11
1640
11
  if (OptimizedScops)
1641
1
    errs() << S;
1642
12
1643
12
  return false;
1644
12
}
1645
1646
8
void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1647
8
  isl_printer *p;
1648
8
  char *ScheduleStr;
1649
8
1650
8
  OS << "Calculated schedule:\n";
1651
8
1652
8
  if (!LastSchedule) {
1653
8
    OS << "n/a\n";
1654
8
    return;
1655
8
  }
1656
0
1657
0
  p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1658
0
  p = isl_printer_print_schedule(p, LastSchedule);
1659
0
  ScheduleStr = isl_printer_get_str(p);
1660
0
  isl_printer_free(p);
1661
0
1662
0
  OS << ScheduleStr << "\n";
1663
0
}
1664
1665
12
void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1666
12
  ScopPass::getAnalysisUsage(AU);
1667
12
  AU.addRequired<DependenceInfo>();
1668
12
  AU.addRequired<TargetTransformInfoWrapperPass>();
1669
12
1670
12
  AU.addPreserved<DependenceInfo>();
1671
12
}
1672
1673
0
Pass *polly::createIslScheduleOptimizerPass() {
1674
0
  return new IslScheduleOptimizer();
1675
0
}
1676
1677
17.2k
INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1678
17.2k
                      "Polly - Optimize schedule of SCoP", false, false);
1679
17.2k
INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1680
17.2k
INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1681
17.2k
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1682
17.2k
INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1683
                    "Polly - Optimize schedule of SCoP", false, false)